repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Integral-Technology-Solutions/ConfigNOW | Lib/rfc822.py | 4 | 31273 | """RFC-822 message manipulation class.
XXX This is only a very rough sketch of a full RFC-822 parser;
in particular the tokenizing of addresses does not adhere to all the
quoting rules.
Directions for use:
To create a Message object: first open a file, e.g.:
fp = open(file, 'r')
You can use any other legal way of getting an open file object, e.g. use
sys.stdin or call os.popen().
Then pass the open file object to the Message() constructor:
m = Message(fp)
This class can work with any input object that supports a readline
method. If the input object has seek and tell capability, the
rewindbody method will work; also illegal lines will be pushed back
onto the input stream. If the input object lacks seek but has an
`unread' method that can push back a line of input, Message will use
that to push back illegal lines. Thus this class can be used to parse
messages coming from a buffered stream.
The optional `seekable' argument is provided as a workaround for
certain stdio libraries in which tell() discards buffered data before
discovering that the lseek() system call doesn't work. For maximum
portability, you should set the seekable argument to zero to prevent
that initial \code{tell} when passing in an unseekable object such as
a a file object created from a socket object. If it is 1 on entry --
which it is by default -- the tell() method of the open file object is
called once; if this raises an exception, seekable is reset to 0. For
other nonzero values of seekable, this test is not made.
To get the text of a particular header there are several methods:
str = m.getheader(name)
str = m.getrawheader(name)
where name is the name of the header, e.g. 'Subject'.
The difference is that getheader() strips the leading and trailing
whitespace, while getrawheader() doesn't. Both functions retain
embedded whitespace (including newlines) exactly as they are
specified in the header, and leave the case of the text unchanged.
For addresses and address lists there are functions
realname, mailaddress = m.getaddr(name) and
list = m.getaddrlist(name)
where the latter returns a list of (realname, mailaddr) tuples.
There is also a method
time = m.getdate(name)
which parses a Date-like field and returns a time-compatible tuple,
i.e. a tuple such as returned by time.localtime() or accepted by
time.mktime().
See the class definition for lower level access methods.
There are also some utility functions here.
"""
# Cleanup and extensions by Eric S. Raymond <esr@thyrsus.com>
import time
__all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"]
_blanklines = ('\r\n', '\n') # Optimization for islast()
class Message:
"""Represents a single RFC-822-compliant message."""
def __init__(self, fp, seekable = 1):
"""Initialize the class instance and read the headers."""
if seekable == 1:
# Exercise tell() to make sure it works
# (and then assume seek() works, too)
try:
fp.tell()
except:
seekable = 0
else:
seekable = 1
self.fp = fp
self.seekable = seekable
self.startofheaders = None
self.startofbody = None
#
if self.seekable:
try:
self.startofheaders = self.fp.tell()
except IOError:
self.seekable = 0
#
self.readheaders()
#
if self.seekable:
try:
self.startofbody = self.fp.tell()
except IOError:
self.seekable = 0
def rewindbody(self):
"""Rewind the file to the start of the body (if seekable)."""
if not self.seekable:
raise IOError, "unseekable file"
self.fp.seek(self.startofbody)
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that
terminates them. The (normally blank) line that ends the
headers is skipped, but not included in the returned list.
If a non-header line ends the headers, (which is an error),
an attempt is made to backspace over it; it is never
included in the returned list.
The variable self.status is set to the empty string if all
went well, otherwise it is an error message.
The variable self.headers is a completely uninterpreted list
of lines contained in the header (so printing them will
reproduce the header exactly as it appears in the file).
"""
self.dict = {}
self.unixfrom = ''
self.headers = list = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while 1:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline()
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# It's a continuation line.
list.append(line)
x = (self.dict[headerseen] + "\n " + line.strip())
self.dict[headerseen] = x.strip()
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
list.append(line)
self.dict[headerseen] = line[len(headerseen)+1:].strip()
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
def isheader(self, line):
"""Determine whether a given line is a legal header.
This method should return the header name, suitably canonicalized.
You may override this method in order to use Message parsing
on tagged data in RFC822-like formats with special header formats.
"""
i = line.find(':')
if i > 0:
return line[:i].lower()
else:
return None
def islast(self, line):
"""Determine whether a line is a legal end of RFC-822 headers.
You may override this method if your application wants
to bend the rules, e.g. to strip trailing whitespace,
or to recognize MH template separators ('--------').
For convenience (e.g. for code reading from sockets) a
line consisting of \r\n also matches.
"""
return line in _blanklines
def iscomment(self, line):
"""Determine whether a line should be skipped entirely.
You may override this method in order to use Message parsing
on tagged data in RFC822-like formats that support embedded
comments or free-text data.
"""
return None
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines
matching a given header name (and their continuation
lines). A list of the lines is returned, without
interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple
times, all occurrences are returned. Case is not
important in the header name.
"""
name = name.lower() + ':'
n = len(name)
list = []
hit = 0
for line in self.headers:
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
list.append(line)
return list
def getfirstmatchingheader(self, name):
"""Get the first header line matching name.
This is similar to getallmatchingheaders, but it returns
only the first matching header (and its continuation
lines).
"""
name = name.lower() + ':'
n = len(name)
list = []
hit = 0
for line in self.headers:
if hit:
if not line[:1].isspace():
break
elif line[:n].lower() == name:
hit = 1
if hit:
list.append(line)
return list
def getrawheader(self, name):
"""A higher-level interface to getfirstmatchingheader().
Return a string containing the literal text of the
header but with the keyword stripped. All leading,
trailing and embedded whitespace is kept in the
string, however.
Return None if the header does not occur.
"""
list = self.getfirstmatchingheader(name)
if not list:
return None
list[0] = list[0][len(name) + 1:]
return ''.join(list)
def getheader(self, name, default=None):
"""Get the header value for a name.
This is the normal interface: it returns a stripped
version of the header value for a given header name,
or None if it doesn't exist. This uses the dictionary
version which finds the *last* such header.
"""
try:
return self.dict[name.lower()]
except KeyError:
return default
get = getheader
def getheaders(self, name):
"""Get all values for a header.
This returns a list of values for headers given more than once;
each value in the result list is stripped in the same way as the
result of getheader(). If the header is not given, return an
empty list.
"""
result = []
current = ''
have_header = 0
for s in self.getallmatchingheaders(name):
if s[0].isspace():
if current:
current = "%s\n %s" % (current, s.strip())
else:
current = s.strip()
else:
if have_header:
result.append(current)
current = s[s.find(":") + 1:].strip()
have_header = 1
if have_header:
result.append(current)
return result
def getaddr(self, name):
"""Get a single address from a header, as a tuple.
An example return value:
('Guido van Rossum', 'guido@cwi.nl')
"""
# New, by Ben Escoto
alist = self.getaddrlist(name)
if alist:
return alist[0]
else:
return (None, None)
def getaddrlist(self, name):
"""Get a list of addresses from a header.
Retrieves a list of addresses from a header, where each address is a
tuple as returned by getaddr(). Scans all named headers, so it works
properly with multiple To: or Cc: headers for example.
"""
raw = []
for h in self.getallmatchingheaders(name):
if h[0] in ' \t':
raw.append(h)
else:
if raw:
raw.append(', ')
i = h.find(':')
if i > 0:
addr = h[i+1:]
raw.append(addr)
alladdrs = ''.join(raw)
a = AddrlistClass(alladdrs)
return a.getaddrlist()
def getdate(self, name):
"""Retrieve a date field from a header.
Retrieves a date field from the named header, returning
a tuple compatible with time.mktime().
"""
try:
data = self[name]
except KeyError:
return None
return parsedate(data)
def getdate_tz(self, name):
"""Retrieve a date field from a header as a 10-tuple.
The first 9 elements make up a tuple compatible with
time.mktime(), and the 10th is the offset of the poster's
time zone from GMT/UTC.
"""
try:
data = self[name]
except KeyError:
return None
return parsedate_tz(data)
# Access as a dictionary (only finds *last* header of each type):
def __len__(self):
"""Get the number of headers in a message."""
return len(self.dict)
def __getitem__(self, name):
"""Get a specific header, as from a dictionary."""
return self.dict[name.lower()]
def __setitem__(self, name, value):
"""Set the value of a header.
Note: This is not a perfect inversion of __getitem__, because
any changed headers get stuck at the end of the raw-headers list
rather than where the altered header was.
"""
del self[name] # Won't fail if it doesn't exist
self.dict[name.lower()] = value
text = name + ": " + value
lines = text.split("\n")
for line in lines:
self.headers.append(line + "\n")
def __delitem__(self, name):
"""Delete all occurrences of a specific header, if it is present."""
name = name.lower()
if not self.dict.has_key(name):
return
del self.dict[name]
name = name + ':'
n = len(name)
list = []
hit = 0
for i in range(len(self.headers)):
line = self.headers[i]
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
list.append(i)
list.reverse()
for i in list:
del self.headers[i]
def has_key(self, name):
"""Determine whether a message contains the named header."""
return self.dict.has_key(name.lower())
def keys(self):
"""Get all of a message's header field names."""
return self.dict.keys()
def values(self):
"""Get all of a message's header field values."""
return self.dict.values()
def items(self):
"""Get all of a message's headers.
Returns a list of name, value tuples.
"""
return self.dict.items()
def __str__(self):
str = ''
for hdr in self.headers:
str = str + hdr
return str
# Utility functions
# -----------------
# XXX Should fix unquote() and quote() to be really conformant.
# XXX The inverses of the parse functions may also be useful.
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str[0] == '"' and str[-1:] == '"':
return str[1:-1]
if str[0] == '<' and str[-1:] == '>':
return str[1:-1]
return str
def quote(str):
"""Add quotes around a string."""
return str.replace('\\', '\\\\').replace('"', '\\"')
def parseaddr(address):
"""Parse an address into a (realname, mailaddr) tuple."""
a = AddrlistClass(address)
list = a.getaddrlist()
if not list:
return (None, None)
else:
return list[0]
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of
RFC-822 in front of you.
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.atomends = self.specials + self.LWS + self.CR
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos = self.pos + 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else: break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
ad = self.getaddress()
if ad:
return ad + self.getaddrlist()
else: return []
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(' '.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos = self.pos + 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos = self.pos + 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(' '.join(plist) + ' (' + \
' '.join(self.commentlist) + ')', routeaddr)]
else: returnlist = [(' '.join(plist), routeaddr)]
else:
if plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos = self.pos + 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos = self.pos + 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = 0
self.pos = self.pos + 1
self.gotonext()
adlist = None
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = 0
elif self.field[self.pos] == '>':
self.pos = self.pos + 1
break
elif self.field[self.pos] == '@':
self.pos = self.pos + 1
expectroute = 1
elif self.field[self.pos] == ':':
self.pos = self.pos + 1
expectaddrspec = 1
else:
adlist = self.getaddrspec()
self.pos = self.pos + 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC-822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos = self.pos + 1
elif self.field[self.pos] == '"':
aslist.append('"%s"' % self.getquote())
elif self.field[self.pos] in self.atomends:
break
else: aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return ''.join(aslist)
aslist.append('@')
self.pos = self.pos + 1
self.gotonext()
return ''.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos = self.pos + 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos = self.pos + 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else: sdlist.append(self.getatom())
return ''.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments = 1):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC-822 comments
are allowed within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = 0
self.pos = self.pos + 1
while self.pos < len(self.field):
if quote == 1:
slist.append(self.field[self.pos])
quote = 0
elif self.field[self.pos] in endchars:
self.pos = self.pos + 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
elif self.field[self.pos] == '\\':
quote = 1
else:
slist.append(self.field[self.pos])
self.pos = self.pos + 1
return ''.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', 0)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', 1)
def getdomainliteral(self):
"""Parse an RFC-822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', 0)
def getatom(self):
"""Parse an RFC-822 atom."""
atomlist = ['']
while self.pos < len(self.field):
if self.field[self.pos] in self.atomends:
break
else: atomlist.append(self.field[self.pos])
self.pos = self.pos + 1
return ''.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC-822 phrases.
A phrase is a sequence of words, which are in turn either
RFC-822 atoms or quoted-strings. Phrases are canonicalized
by squeezing all runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos = self.pos + 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.atomends:
break
else: plist.append(self.getatom())
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __str__(self):
return ", ".join(map(dump_address_pair, self.addresslist))
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
def dump_address_pair(pair):
"""Dump a (name, address) pair in a canonicalized form."""
if pair[0]:
return '"' + pair[0] + '" <' + pair[1] + '>'
else:
return pair[1]
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
data = data.split()
if data[0][-1] in (',', '.') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if not mm in _monthnames:
dd, mm = mm, dd.lower()
if not mm in _monthnames:
return None
mm = _monthnames.index(mm)+1
if mm > 12: mm = mm - 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
tzoffset = None
tz = tz.upper()
if _timezones.has_key(tz):
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset/100)*3600 + (tzoffset % 100)*60)
tuple = (yy, mm, dd, thh, tmm, tss, 0, 0, 0, tzoffset)
return tuple
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if type(t) == type( () ):
return t[:9]
else: return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
def formatdate(timeval=None):
"""Returns time format preferred for Internet standards.
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
"""
if timeval is None:
timeval = time.time()
return "%s" % time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(timeval))
# When used as script, run a small test program.
# The first command line argument must be a filename containing one
# message in RFC-822 format.
if __name__ == '__main__':
import sys, os
file = os.path.join(os.environ['HOME'], 'Mail/inbox/1')
if sys.argv[1:]: file = sys.argv[1]
f = open(file, 'r')
m = Message(f)
print 'From:', m.getaddr('from')
print 'To:', m.getaddrlist('to')
print 'Subject:', m.getheader('subject')
print 'Date:', m.getheader('date')
date = m.getdate_tz('date')
tz = date[-1]
date = time.localtime(mktime_tz(date))
if date:
print 'ParsedDate:', time.asctime(date),
hhmmss = tz
hhmm, ss = divmod(hhmmss, 60)
hh, mm = divmod(hhmm, 60)
print "%+03d%02d" % (hh, mm),
if ss: print ".%02d" % ss,
print
else:
print 'ParsedDate:', None
m.rewindbody()
n = 0
while f.readline():
n = n + 1
print 'Lines:', n
print '-'*70
print 'len =', len(m)
if m.has_key('Date'): print 'Date =', m['Date']
if m.has_key('X-Nonsense'): pass
print 'keys =', m.keys()
print 'values =', m.values()
print 'items =', m.items()
| mit |
johankaito/fufuka | microblog/old-flask/lib/python2.7/site-packages/coverage/files.py | 209 | 10724 | """File wrangling."""
from coverage.backward import to_string
from coverage.misc import CoverageException
import fnmatch, os, os.path, re, sys
import ntpath, posixpath
class FileLocator(object):
"""Understand how filenames work."""
def __init__(self):
# The absolute path to our current directory.
self.relative_dir = os.path.normcase(abs_file(os.curdir) + os.sep)
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
self.canonical_filename_cache = {}
def relative_filename(self, filename):
"""Return the relative form of `filename`.
The filename will be relative to the current directory when the
`FileLocator` was constructed.
"""
fnorm = os.path.normcase(filename)
if fnorm.startswith(self.relative_dir):
filename = filename[len(self.relative_dir):]
return filename
def canonical_filename(self, filename):
"""Return a canonical filename for `filename`.
An absolute path with no redundant components and normalized case.
"""
if filename not in self.canonical_filename_cache:
if not os.path.isabs(filename):
for path in [os.curdir] + sys.path:
if path is None:
continue
f = os.path.join(path, filename)
if os.path.exists(f):
filename = f
break
cf = abs_file(filename)
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
def get_zip_data(self, filename):
"""Get data from `filename` if it is a zip file path.
Returns the string data read from the zip file, or None if no zip file
could be found or `filename` isn't in it. The data returned will be
an empty string if the file is empty.
"""
import zipimport
markers = ['.zip'+os.sep, '.egg'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
zi = zipimport.zipimporter(parts[0]+marker[:-1])
except zipimport.ZipImportError:
continue
try:
data = zi.get_data(parts[1])
except IOError:
continue
return to_string(data)
return None
if sys.platform == 'win32':
def actual_path(path):
"""Get the actual path of `path`, including the correct case."""
if path in actual_path.cache:
return actual_path.cache[path]
head, tail = os.path.split(path)
if not tail:
actpath = head
elif not head:
actpath = tail
else:
head = actual_path(head)
if head in actual_path.list_cache:
files = actual_path.list_cache[head]
else:
try:
files = os.listdir(head)
except OSError:
files = []
actual_path.list_cache[head] = files
normtail = os.path.normcase(tail)
for f in files:
if os.path.normcase(f) == normtail:
tail = f
break
actpath = os.path.join(head, tail)
actual_path.cache[path] = actpath
return actpath
actual_path.cache = {}
actual_path.list_cache = {}
else:
def actual_path(filename):
"""The actual path for non-Windows platforms."""
return filename
def abs_file(filename):
"""Return the absolute normalized form of `filename`."""
path = os.path.expandvars(os.path.expanduser(filename))
path = os.path.abspath(os.path.realpath(path))
path = actual_path(path)
return path
def isabs_anywhere(filename):
"""Is `filename` an absolute path on any OS?"""
return ntpath.isabs(filename) or posixpath.isabs(filename)
def prep_patterns(patterns):
"""Prepare the file patterns for use in a `FnmatchMatcher`.
If a pattern starts with a wildcard, it is used as a pattern
as-is. If it does not start with a wildcard, then it is made
absolute with the current directory.
If `patterns` is None, an empty list is returned.
"""
prepped = []
for p in patterns or []:
if p.startswith("*") or p.startswith("?"):
prepped.append(p)
else:
prepped.append(abs_file(p))
return prepped
class TreeMatcher(object):
"""A matcher for files in a tree."""
def __init__(self, directories):
self.dirs = directories[:]
def __repr__(self):
return "<TreeMatcher %r>" % self.dirs
def info(self):
"""A list of strings for displaying when dumping state."""
return self.dirs
def add(self, directory):
"""Add another directory to the list we match for."""
self.dirs.append(directory)
def match(self, fpath):
"""Does `fpath` indicate a file in one of our trees?"""
for d in self.dirs:
if fpath.startswith(d):
if fpath == d:
# This is the same file!
return True
if fpath[len(d)] == os.sep:
# This is a file in the directory
return True
return False
class FnmatchMatcher(object):
"""A matcher for files by filename pattern."""
def __init__(self, pats):
self.pats = pats[:]
def __repr__(self):
return "<FnmatchMatcher %r>" % self.pats
def info(self):
"""A list of strings for displaying when dumping state."""
return self.pats
def match(self, fpath):
"""Does `fpath` match one of our filename patterns?"""
for pat in self.pats:
if fnmatch.fnmatch(fpath, pat):
return True
return False
def sep(s):
"""Find the path separator used in this string, or os.sep if none."""
sep_match = re.search(r"[\\/]", s)
if sep_match:
the_sep = sep_match.group(0)
else:
the_sep = os.sep
return the_sep
class PathAliases(object):
"""A collection of aliases for paths.
When combining data files from remote machines, often the paths to source
code are different, for example, due to OS differences, or because of
serialized checkouts on continuous integration machines.
A `PathAliases` object tracks a list of pattern/result pairs, and can
map a path through those aliases to produce a unified path.
`locator` is a FileLocator that is used to canonicalize the results.
"""
def __init__(self, locator=None):
self.aliases = []
self.locator = locator
def add(self, pattern, result):
"""Add the `pattern`/`result` pair to the list of aliases.
`pattern` is an `fnmatch`-style pattern. `result` is a simple
string. When mapping paths, if a path starts with a match against
`pattern`, then that match is replaced with `result`. This models
isomorphic source trees being rooted at different places on two
different machines.
`pattern` can't end with a wildcard component, since that would
match an entire tree, and not just its root.
"""
# The pattern can't end with a wildcard component.
pattern = pattern.rstrip(r"\/")
if pattern.endswith("*"):
raise CoverageException("Pattern must not end with wildcards.")
pattern_sep = sep(pattern)
# The pattern is meant to match a filepath. Let's make it absolute
# unless it already is, or is meant to match any prefix.
if not pattern.startswith('*') and not isabs_anywhere(pattern):
pattern = abs_file(pattern)
pattern += pattern_sep
# Make a regex from the pattern. fnmatch always adds a \Z or $ to
# match the whole string, which we don't want.
regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(')
if regex_pat.endswith("$"):
regex_pat = regex_pat[:-1]
# We want */a/b.py to match on Windows too, so change slash to match
# either separator.
regex_pat = regex_pat.replace(r"\/", r"[\\/]")
# We want case-insensitive matching, so add that flag.
regex = re.compile(r"(?i)" + regex_pat)
# Normalize the result: it must end with a path separator.
result_sep = sep(result)
result = result.rstrip(r"\/") + result_sep
self.aliases.append((regex, result, pattern_sep, result_sep))
def map(self, path):
"""Map `path` through the aliases.
`path` is checked against all of the patterns. The first pattern to
match is used to replace the root of the path with the result root.
Only one pattern is ever used. If no patterns match, `path` is
returned unchanged.
The separator style in the result is made to match that of the result
in the alias.
"""
for regex, result, pattern_sep, result_sep in self.aliases:
m = regex.match(path)
if m:
new = path.replace(m.group(0), result)
if pattern_sep != result_sep:
new = new.replace(pattern_sep, result_sep)
if self.locator:
new = self.locator.canonical_filename(new)
return new
return path
def find_python_files(dirname):
"""Yield all of the importable Python files in `dirname`, recursively.
To be importable, the files have to be in a directory with a __init__.py,
except for `dirname` itself, which isn't required to have one. The
assumption is that `dirname` was specified directly, so the user knows
best, but subdirectories are checked for a __init__.py to be sure we only
find the importable files.
"""
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
if i > 0 and '__init__.py' not in filenames:
# If a directory doesn't have __init__.py, then it isn't
# importable and neither are its files
del dirnames[:]
continue
for filename in filenames:
# We're only interested in files that look like reasonable Python
# files: Must end with .py or .pyw, and must not have certain funny
# characters that probably mean they are editor junk.
if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
yield os.path.join(dirpath, filename)
| apache-2.0 |
agileblaze/OpenStackTwoFactorAuthentication | openstack_dashboard/dashboards/project/stacks/forms.py | 11 | 18300 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from oslo_utils import strutils
import six
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images \
import utils as image_utils
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
def create_upload_form_attributes(prefix, input_type, name):
"""Creates attribute dicts for the switchable upload form
:type prefix: str
:param prefix: prefix (environment, template) of field
:type input_type: str
:param input_type: field type (file, raw, url)
:type name: str
:param name: translated text label to display to user
:rtype: dict
:return: an attribute set to pass to form build
"""
attributes = {'class': 'switched', 'data-switch-on': prefix + 'source'}
attributes['data-' + prefix + 'source-' + input_type] = name
return attributes
class TemplateForm(forms.SelfHandlingForm):
class Meta(object):
name = _('Select Template')
help_text = _('Select a template to launch a stack.')
# TODO(jomara) - update URL choice for template & environment files
# w/ client side download when applicable
base_choices = [('file', _('File')),
('raw', _('Direct Input'))]
url_choice = [('url', _('URL'))]
attributes = {'class': 'switchable', 'data-slug': 'templatesource'}
template_source = forms.ChoiceField(label=_('Template Source'),
choices=base_choices + url_choice,
widget=forms.Select(attrs=attributes))
attributes = create_upload_form_attributes(
'template',
'file',
_('Template File'))
template_upload = forms.FileField(
label=_('Template File'),
help_text=_('A local template to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'url',
_('Template URL'))
template_url = forms.URLField(
label=_('Template URL'),
help_text=_('An external (HTTP) URL to load the template from.'),
widget=forms.TextInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'raw',
_('Template Data'))
template_data = forms.CharField(
label=_('Template Data'),
help_text=_('The raw contents of the template.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
attributes = {'data-slug': 'envsource', 'class': 'switchable'}
environment_source = forms.ChoiceField(
label=_('Environment Source'),
choices=base_choices,
widget=forms.Select(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'file',
_('Environment File'))
environment_upload = forms.FileField(
label=_('Environment File'),
help_text=_('A local environment to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'raw',
_('Environment Data'))
environment_data = forms.CharField(
label=_('Environment Data'),
help_text=_('The raw contents of the environment file.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(TemplateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned = super(TemplateForm, self).clean()
files = self.request.FILES
self.clean_uploaded_files('template', _('template'), cleaned, files)
self.clean_uploaded_files('environment', _('environment'), cleaned,
files)
# Validate the template and get back the params.
kwargs = {}
if cleaned['template_data']:
kwargs['template'] = cleaned['template_data']
else:
kwargs['template_url'] = cleaned['template_url']
if cleaned['environment_data']:
kwargs['environment'] = cleaned['environment_data']
try:
validated = api.heat.template_validate(self.request, **kwargs)
cleaned['template_validate'] = validated
except Exception as e:
raise forms.ValidationError(unicode(e))
return cleaned
def clean_uploaded_files(self, prefix, field_label, cleaned, files):
"""Cleans Template & Environment data from form upload.
Does some of the crunchy bits for processing uploads vs raw
data depending on what the user specified. Identical process
for environment data & template data.
:type prefix: str
:param prefix: prefix (environment, template) of field
:type field_label: str
:param field_label: translated prefix str for messages
:type input_type: dict
:param prefix: existing cleaned fields from form
:rtype: dict
:return: cleaned dict including environment & template data
"""
upload_str = prefix + "_upload"
data_str = prefix + "_data"
url = cleaned.get(prefix + '_url')
data = cleaned.get(prefix + '_data')
has_upload = upload_str in files
# Uploaded file handler
if has_upload and not url:
log_template_name = files[upload_str].name
LOG.info('got upload %s' % log_template_name)
tpl = files[upload_str].read()
if tpl.startswith('{'):
try:
json.loads(tpl)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': e}
raise forms.ValidationError(msg)
cleaned[data_str] = tpl
# URL handler
elif url and (has_upload or data):
msg = _('Please specify a %s using only one source method.')
msg = msg % field_label
raise forms.ValidationError(msg)
elif prefix == 'template':
# Check for raw template input - blank environment allowed
if not url and not data:
msg = _('You must specify a template via one of the '
'available sources.')
raise forms.ValidationError(msg)
def create_kwargs(self, data):
kwargs = {'parameters': data['template_validate'],
'environment_data': data['environment_data'],
'template_data': data['template_data'],
'template_url': data['template_url']}
if data.get('stack_id'):
kwargs['stack_id'] = data['stack_id']
return kwargs
def handle(self, request, data):
kwargs = self.create_kwargs(data)
# NOTE (gabriel): This is a bit of a hack, essentially rewriting this
# request so that we can chain it as an input to the next view...
# but hey, it totally works.
request.method = 'GET'
return self.next_view.as_view()(request, **kwargs)
class ChangeTemplateForm(TemplateForm):
class Meta(object):
name = _('Edit Template')
help_text = _('Select a new template to re-launch a stack.')
stack_id = forms.CharField(label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly':
'readonly'}))
class PreviewTemplateForm(TemplateForm):
class Meta(object):
name = _('Preview Template')
help_text = _('Select a new template to preview a stack.')
class CreateStackForm(forms.SelfHandlingForm):
param_prefix = '__param_'
class Meta(object):
name = _('Create Stack')
template_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
template_url = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
environment_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
parameters = forms.CharField(
widget=forms.widgets.HiddenInput)
stack_name = forms.RegexField(
max_length=255,
label=_('Stack Name'),
help_text=_('Name of the stack to create.'),
regex=r"^[a-zA-Z][a-zA-Z0-9_.-]*$",
error_messages={'invalid':
_('Name must start with a letter and may '
'only contain letters, numbers, underscores, '
'periods and hyphens.')})
timeout_mins = forms.IntegerField(
initial=60,
label=_('Creation Timeout (minutes)'),
help_text=_('Stack creation timeout in minutes.'))
enable_rollback = forms.BooleanField(
label=_('Rollback On Failure'),
help_text=_('Enable rollback on create/update failure.'),
required=False)
def __init__(self, *args, **kwargs):
parameters = kwargs.pop('parameters')
# special case: load template data from API, not passed in params
if(kwargs.get('validate_me')):
parameters = kwargs.pop('validate_me')
super(CreateStackForm, self).__init__(*args, **kwargs)
self._build_parameter_fields(parameters)
def _build_parameter_fields(self, template_validate):
self.fields['password'] = forms.CharField(
label=_('Password for user "%s"') % self.request.user.username,
help_text=_('This is required for operations to be performed '
'throughout the lifecycle of the stack'),
widget=forms.PasswordInput())
self.help_text = template_validate['Description']
params = template_validate.get('Parameters', {})
if template_validate.get('ParameterGroups'):
params_in_order = []
for group in template_validate['ParameterGroups']:
for param in group.get('parameters', []):
if param in params:
params_in_order.append((param, params[param]))
else:
# no parameter groups, simply sorted to make the order fixed
params_in_order = sorted(params.items())
for param_key, param in params_in_order:
field = None
field_key = self.param_prefix + param_key
field_args = {
'initial': param.get('Default', None),
'label': param.get('Label', param_key),
'help_text': param.get('Description', ''),
'required': param.get('Default', None) is None
}
param_type = param.get('Type', None)
hidden = strutils.bool_from_string(param.get('NoEcho', 'false'))
if 'CustomConstraint' in param:
choices = self._populate_custom_choices(
param['CustomConstraint'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif 'AllowedValues' in param:
choices = map(lambda x: (x, x), param['AllowedValues'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif param_type == 'Json' and 'Default' in param:
field_args['initial'] = json.dumps(param['Default'])
field = forms.CharField(**field_args)
elif param_type in ('CommaDelimitedList', 'String', 'Json'):
if 'MinLength' in param:
field_args['min_length'] = int(param['MinLength'])
field_args['required'] = param.get('MinLength', 0) > 0
if 'MaxLength' in param:
field_args['max_length'] = int(param['MaxLength'])
if hidden:
field_args['widget'] = forms.PasswordInput()
field = forms.CharField(**field_args)
elif param_type == 'Number':
if 'MinValue' in param:
field_args['min_value'] = int(param['MinValue'])
if 'MaxValue' in param:
field_args['max_value'] = int(param['MaxValue'])
field = forms.IntegerField(**field_args)
# heat-api currently returns the boolean type in lowercase
# (see https://bugs.launchpad.net/heat/+bug/1361448)
# so for better compatibility both are checked here
elif param_type in ('Boolean', 'boolean'):
field = forms.BooleanField(**field_args)
if field:
self.fields[field_key] = field
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'password': data.get('password')
}
if data.get('template_data'):
fields['template'] = data.get('template_data')
else:
fields['template_url'] = data.get('template_url')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
api.heat.stack_create(self.request, **fields)
messages.success(request, _("Stack creation started."))
return True
except Exception:
exceptions.handle(request)
def _populate_custom_choices(self, custom_type):
if custom_type == 'neutron.network':
return instance_utils.network_field_data(self.request, True)
if custom_type == 'nova.keypair':
return instance_utils.keypair_field_data(self.request, True)
if custom_type == 'glance.image':
return image_utils.image_field_data(self.request, True)
if custom_type == 'nova.flavor':
return instance_utils.flavor_field_data(self.request, True)
return []
class EditStackForm(CreateStackForm):
class Meta(object):
name = _('Update Stack Parameters')
stack_id = forms.CharField(
label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(
label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
stack_id = data.get('stack_id')
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'password': data.get('password')
}
# if the user went directly to this form, resubmit the existing
# template data. otherwise, submit what they had from the first form
if data.get('template_data'):
fields['template'] = data.get('template_data')
elif data.get('template_url'):
fields['template_url'] = data.get('template_url')
elif data.get('parameters'):
fields['template'] = data.get('parameters')
try:
api.heat.stack_update(self.request, stack_id=stack_id, **fields)
messages.success(request, _("Stack update started."))
return True
except Exception:
exceptions.handle(request)
class PreviewStackForm(CreateStackForm):
class Meta(object):
name = _('Preview Stack Parameters')
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(CreateStackForm, self).__init__(*args, **kwargs)
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
}
if data.get('template_data'):
fields['template'] = data.get('template_data')
else:
fields['template_url'] = data.get('template_url')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
stack_preview = api.heat.stack_preview(self.request, **fields)
request.method = 'GET'
return self.next_view.as_view()(request,
stack_preview=stack_preview)
except Exception:
exceptions.handle(request)
| apache-2.0 |
ckirby/django | django/core/checks/compatibility/django_1_8_0.py | 286 | 1052 | from __future__ import unicode_literals
from django.conf import global_settings, settings
from .. import Tags, Warning, register
@register(Tags.compatibility)
def check_duplicate_template_settings(app_configs, **kwargs):
if settings.TEMPLATES:
values = [
'TEMPLATE_DIRS',
'ALLOWED_INCLUDE_ROOTS',
'TEMPLATE_CONTEXT_PROCESSORS',
'TEMPLATE_DEBUG',
'TEMPLATE_LOADERS',
'TEMPLATE_STRING_IF_INVALID',
]
duplicates = [
value for value in values
if getattr(settings, value) != getattr(global_settings, value)
]
if duplicates:
return [Warning(
"The standalone TEMPLATE_* settings were deprecated in Django "
"1.8 and the TEMPLATES dictionary takes precedence. You must "
"put the values of the following settings into your default "
"TEMPLATES dict: %s." % ", ".join(duplicates),
id='1_8.W001',
)]
return []
| bsd-3-clause |
laosunhust/SceCells | tst/googletest/scripts/upload_gtest.py | 1963 | 2851 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = 'googletestframework@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| mit |
ProjectSWGCore/NGECore2 | scripts/mobiles/generic/faction/rebel/fatigued_rebel_officer_67.py | 2 | 1906 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('crackdown_rebel_first_lieutenant')
mobileTemplate.setLevel(67)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("rebel")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("rebel")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_dressed_rebel_first_lieutenant_bothan_male_01.iff')
templates.add('object/mobile/shared_dressed_rebel_first_lieutenant_human_female_01.iff')
templates.add('object/mobile/shared_dressed_rebel_first_lieutenant_human_male_01.iff')
templates.add('object/mobile/shared_dressed_rebel_first_lieutenant_moncal_female_01.iff')
templates.add('object/mobile/shared_dressed_rebel_first_lieutenant_moncal_female_02.iff')
templates.add('object/mobile/shared_dressed_rebel_first_lieutenant_sullustan_male_01.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e5.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('fatigued_rebel_officer_67', mobileTemplate)
return | lgpl-3.0 |
qwertyjune/BethSaidaBible | venv/lib/python2.7/site-packages/django/contrib/gis/db/backends/mysql/operations.py | 73 | 2319 | from django.db.backends.mysql.base import DatabaseOperations
from django.contrib.gis.db.backends.adapter import WKTAdapter
from django.contrib.gis.db.backends.base import BaseSpatialOperations
class MySQLOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.backends.mysql.compiler'
mysql = True
name = 'mysql'
select = 'AsText(%s)'
from_wkb = 'GeomFromWKB'
from_text = 'GeomFromText'
Adapter = WKTAdapter
Adaptor = Adapter # Backwards-compatibility alias.
geometry_functions = {
'bbcontains': 'MBRContains', # For consistency w/PostGIS API
'bboverlaps': 'MBROverlaps', # .. ..
'contained': 'MBRWithin', # .. ..
'contains': 'MBRContains',
'disjoint': 'MBRDisjoint',
'equals': 'MBREqual',
'exact': 'MBREqual',
'intersects': 'MBRIntersects',
'overlaps': 'MBROverlaps',
'same_as': 'MBREqual',
'touches': 'MBRTouches',
'within': 'MBRWithin',
}
gis_terms = set(geometry_functions) | set(['isnull'])
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, value, srid):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'expression'):
placeholder = self.get_expression_column(value)
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
geo_col, db_type = lvalue
lookup_info = self.geometry_functions.get(lookup_type, False)
if lookup_info:
sql = "%s(%s, %s)" % (lookup_info, geo_col,
self.get_geom_placeholder(value, field.srid))
return sql, []
# TODO: Is this really necessary? MySQL can't handle NULL geometries
# in its spatial indexes anyways.
if lookup_type == 'isnull':
return "%s IS %sNULL" % (geo_col, ('' if value else 'NOT ')), []
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
| gpl-3.0 |
GitHublong/hue | desktop/core/ext-py/boto-2.38.0/boto/handler.py | 150 | 2382 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
from boto.compat import StringIO
class XmlHandler(xml.sax.ContentHandler):
def __init__(self, root_node, connection):
self.connection = connection
self.nodes = [('root', root_node)]
self.current_text = ''
def startElement(self, name, attrs):
self.current_text = ''
new_node = self.nodes[-1][1].startElement(name, attrs, self.connection)
if new_node is not None:
self.nodes.append((name, new_node))
def endElement(self, name):
self.nodes[-1][1].endElement(name, self.current_text, self.connection)
if self.nodes[-1][0] == name:
if hasattr(self.nodes[-1][1], 'endNode'):
self.nodes[-1][1].endNode(self.connection)
self.nodes.pop()
self.current_text = ''
def characters(self, content):
self.current_text += content
class XmlHandlerWrapper(object):
def __init__(self, root_node, connection):
self.handler = XmlHandler(root_node, connection)
self.parser = xml.sax.make_parser()
self.parser.setContentHandler(self.handler)
self.parser.setFeature(xml.sax.handler.feature_external_ges, 0)
def parseString(self, content):
return self.parser.parse(StringIO(content))
| apache-2.0 |
anyc/londonlaw | londonlaw/common/map.py | 1 | 23659 | # London Law -- a networked manhunting board game
# Copyright (C) 2003-2004 Paul Pelzl
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, Version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
TAXI = "taxi"
BUS = "bus"
UNDERGROUND = "underground"
BLACK = "black"
# To find the routes available at location 100, set routes=locToRoutes[100].
# routes[n][0] is one of the locations that location 100 can connect to,
# and routes[n][1] is a tuple of the methods that can be used to reach
# that location. So for example:
#
# routes = locToRoutes[100]
# possible_moves = []
# for route in routes:
# if (has_taxi() and TAXI in route[1]) or
# (has_bus() and BUS in route[1]) or
# (has_underground() and UNDERGROUND in route[1]) or
# has_black():
# possible_moves.append(route[0])
#
# This routine would create a list of all possible locations that the
# player could move to, assuming has_taxi() and friend are properly
# defined.
# Note for proofreading: the algorithm for creating this map was (roughly) to look
# for first taxi routes (clockwise from the route going upward), then bus routes
# and underground routes.
#
# locToRoutes is generated from this by adding the (invisible) black ticket transports
# to all routes in _locToRoutesPartial.
_locToRoutesPartial = \
( None, # locToRoutes[0] has no value; need to access locToRoutes[1] and higher
( (8, (TAXI,)), (9, (TAXI,)), (58, (BUS,)), (46, (BUS, UNDERGROUND)) ), # 001
( (20, (TAXI,)), (10, (TAXI,)) ),
( (11, (TAXI,)), (12, (TAXI,)), (4, (TAXI,)), (22, (BUS,)), (23, (BUS,)) ),
( (3, (TAXI,)), (13, (TAXI,)) ),
( (15, (TAXI,)), (16, (TAXI,)) ), # 005
( (29, (TAXI,)), (7, (TAXI,)) ),
( (6, (TAXI,)), (17, (TAXI,)), (42, (BUS,)) ),
( (1, (TAXI,)), (19, (TAXI,)), (18, (TAXI,)) ),
( (1, (TAXI,)), (19, (TAXI,)), (20, (TAXI,)) ),
( (2, (TAXI,)), (11, (TAXI,)), (34, (TAXI,)), (21, (TAXI,)) ), # 010
( (3, (TAXI,)), (10, (TAXI,)), (22, (TAXI,)) ),
( (3, (TAXI,)), (23, (TAXI,)) ),
( (4, (TAXI,)), (14, (TAXI, BUS)), (24, (TAXI,)), (23, (TAXI, BUS)), (52, (BUS,)),
(89, (UNDERGROUND,)), (67, (UNDERGROUND,)), (46, (UNDERGROUND,)) ),
( (13, (TAXI, BUS)), (15, (TAXI, BUS)), (25, (TAXI,)) ),
( (5, (TAXI,)), (16, (TAXI,)), (28, (TAXI,)), (26, (TAXI,)), (14, (TAXI, BUS)),
(29, (BUS,)), (41, (BUS,)) ), # 015
( (5, (TAXI,)), (29, (TAXI,)), (28, (TAXI,)), (15, (TAXI,)) ),
( (7, (TAXI,)), (30, (TAXI,)), (29, (TAXI,)) ),
( (8, (TAXI,)), (31, (TAXI,)), (43, (TAXI,)) ),
( (8, (TAXI,)), (9, (TAXI,)), (32, (TAXI,)) ),
( (2, (TAXI,)), (9, (TAXI,)), (33, (TAXI,)) ), # 020
( (10, (TAXI,)), (33, (TAXI,)) ),
( (11, (TAXI,)), (23, (TAXI, BUS)), (35, (TAXI,)), (34, (TAXI, BUS)),
(3, (BUS,)), (65, (BUS,)) ),
( (12, (TAXI,)), (13, (TAXI, BUS)), (37, (TAXI,)), (22, (TAXI, BUS)),
(3, (BUS,)), (67, (BUS,)) ),
( (13, (TAXI,)), (38, (TAXI,)), (37, (TAXI,)) ),
( (14, (TAXI,)), (39, (TAXI,)), (38, (TAXI,)) ), # 025
( (15, (TAXI,)), (27, (TAXI,)), (39, (TAXI,)) ),
( (26, (TAXI,)), (28, (TAXI,)), (40, (TAXI,)) ),
( (15, (TAXI,)), (16, (TAXI,)), (41, (TAXI,)), (27, (TAXI,)) ),
( (6, (TAXI,)), (17, (TAXI,)), (42, (TAXI, BUS)), (41, (TAXI, BUS)), (16, (TAXI,)),
(55, (BUS,)), (15, (BUS,)) ),
( (17, (TAXI,)), (42, (TAXI,)) ), # 030
( (18, (TAXI,)), (44, (TAXI,)), (43, (TAXI,)) ),
( (19, (TAXI,)), (33, (TAXI,)), (45, (TAXI,)), (44, (TAXI,)) ),
( (20, (TAXI,)), (21, (TAXI,)), (46, (TAXI,)), (32, (TAXI,)) ),
( (10, (TAXI,)), (22, (TAXI, BUS)), (48, (TAXI,)), (47, (TAXI,)), (63, (BUS,)),
(46, (BUS,)) ),
( (22, (TAXI,)), (36, (TAXI,)), (65, (TAXI,)), (48, (TAXI,)) ), # 035
( (37, (TAXI,)), (49, (TAXI,)), (35, (TAXI,)) ),
( (23, (TAXI,)), (24, (TAXI,)), (50, (TAXI,)), (36, (TAXI,)) ),
( (24, (TAXI,)), (25, (TAXI,)), (51, (TAXI,)), (50, (TAXI,)) ),
( (26, (TAXI,)), (52, (TAXI,)), (51, (TAXI,)), (25, (TAXI,)) ),
( (27, (TAXI,)), (41, (TAXI,)), (53, (TAXI,)), (52, (TAXI,)) ), # 040
( (28, (TAXI,)), (29, (TAXI, BUS)), (54, (TAXI,)), (40, (TAXI,)),
(15, (BUS,)), (87, (BUS,)), (52, (BUS,)) ),
( (30, (TAXI,)), (56, (TAXI,)), (72, (TAXI, BUS)), (29, (TAXI, BUS)),
(7, (BUS,)) ),
( (18, (TAXI,)), (31, (TAXI,)), (57, (TAXI,)) ),
( (32, (TAXI,)), (58, (TAXI,)), (31, (TAXI,)) ),
( (32, (TAXI,)), (46, (TAXI,)), (60, (TAXI,)), (59, (TAXI,)), # 045
(58, (TAXI,)) ),
( (33, (TAXI,)), (47, (TAXI,)), (61, (TAXI,)), (45, (TAXI,)), (34, (BUS,)),
(78, (BUS,)), (58, (BUS,)), (1, (BUS, UNDERGROUND)), (13, (UNDERGROUND,)),
(79, (UNDERGROUND,)), (74, (UNDERGROUND,)) ),
( (34, (TAXI,)), (62, (TAXI,)), (46, (TAXI,)) ),
( (34, (TAXI,)), (35, (TAXI,)), (63, (TAXI,)), (62, (TAXI,)) ),
( (36, (TAXI,)), (50, (TAXI,)), (66, (TAXI,)) ),
( (37, (TAXI,)), (38, (TAXI,)), (49, (TAXI,)) ), # 050
( (38, (TAXI,)), (39, (TAXI,)), (52, (TAXI,)), (68, (TAXI,)), (67, (TAXI,)) ),
( (39, (TAXI,)), (40, (TAXI,)), (69, (TAXI,)), (51, (TAXI,)), (13, (BUS,)),
(41, (BUS,)), (86, (BUS,)), (67, (BUS,)) ),
( (40, (TAXI,)), (54, (TAXI,)), (69, (TAXI,)) ),
( (41, (TAXI,)), (55, (TAXI,)), (70, (TAXI,)), (53, (TAXI,)) ),
( (71, (TAXI,)), (54, (TAXI,)), (29, (BUS,)), (89, (BUS,)) ), # 055
( (42, (TAXI,)), (91, (TAXI,)) ),
( (43, (TAXI,)), (58, (TAXI,)), (73, (TAXI,)) ),
( (45, (TAXI,)), (59, (TAXI,)), (75, (TAXI,)), (74, (TAXI, BUS)), (57, (TAXI,)),
(44, (TAXI,)), (46, (BUS,)), (77, (BUS,)), (1, (BUS,)) ),
( (45, (TAXI,)), (76, (TAXI,)), (75, (TAXI,)), (58, (TAXI,)) ),
( (45, (TAXI,)), (61, (TAXI,)), (76, (TAXI,)) ), # 060
( (46, (TAXI,)), (62, (TAXI,)), (78, (TAXI,)), (76, (TAXI,)), (60, (TAXI,)) ),
( (47, (TAXI,)), (48, (TAXI,)), (79, (TAXI,)), (61, (TAXI,)) ),
( (48, (TAXI,)), (64, (TAXI,)), (80, (TAXI,)), (79, (TAXI, BUS)),
(34, (BUS,)), (65, (BUS,)), (100, (BUS,)) ),
( (65, (TAXI,)), (81, (TAXI,)), (63, (TAXI,)) ),
( (35, (TAXI,)), (66, (TAXI,)), (82, (TAXI, BUS)), (64, (TAXI,)), # 065
(22, (BUS,)), (67, (BUS,)), (63, (BUS,)) ),
( (49, (TAXI,)), (67, (TAXI,)), (82, (TAXI,)), (65, (TAXI,)) ),
( (51, (TAXI,)), (68, (TAXI,)), (84, (TAXI,)), (66, (TAXI,)), (23, (BUS,)),
(52, (BUS,)), (102, (BUS,)), (82, (BUS,)), (65, (BUS,)),
(13, (UNDERGROUND,)), (89, (UNDERGROUND,)), (111, (UNDERGROUND,)),
(79, (UNDERGROUND,)) ),
( (51, (TAXI,)), (69, (TAXI,)), (85, (TAXI,)), (67, (TAXI,)) ),
( (52, (TAXI,)), (53, (TAXI,)), (86, (TAXI,)), (68, (TAXI,)) ),
( (54, (TAXI,)), (71, (TAXI,)), (87, (TAXI,)) ), # 070
( (55, (TAXI,)), (72, (TAXI,)), (89, (TAXI,)), (70, (TAXI,)) ),
( (42, (TAXI, BUS)), (91, (TAXI,)), (90, (TAXI,)), (71, (TAXI,)),
(107, (BUS,)), (105, (BUS,)) ),
( (57, (TAXI,)), (74, (TAXI,)), (92, (TAXI,)) ),
( (58, (TAXI, BUS)), (75, (TAXI,)), (92, (TAXI,)), (73, (TAXI,)),
(94, (BUS,)), (46, (UNDERGROUND,)) ),
( (58, (TAXI,)), (59, (TAXI,)), (94, (TAXI,)), (74, (TAXI,)) ), # 075
( (59, (TAXI,)), (60, (TAXI,)), (61, (TAXI,)), (77, (TAXI,)) ),
( (78, (TAXI, BUS)), (96, (TAXI,)), (95, (TAXI,)), (76, (TAXI,)),
(124, (BUS,)), (94, (BUS,)), (58, (BUS,)) ),
( (61, (TAXI,)), (79, (TAXI, BUS)), (97, (TAXI,)), (77, (TAXI, BUS)),
(46, (BUS,)) ),
( (62, (TAXI,)), (63, (TAXI, BUS)), (98, (TAXI,)), (78, (TAXI, BUS)),
(46, (UNDERGROUND,)), (67, (UNDERGROUND,)), (111, (UNDERGROUND,)),
(93, (UNDERGROUND,)) ),
( (63, (TAXI,)), (100, (TAXI,)), (99, (TAXI,)) ), # 080
( (64, (TAXI,)), (82, (TAXI,)), (100, (TAXI,)) ),
( (65, (TAXI, BUS)), (66, (TAXI,)), (67, (BUS,)), (101, (TAXI,)), (140, (BUS,)),
(81, (TAXI,)), (100, (BUS,)) ),
( (102, (TAXI,)), (101, (TAXI,)) ),
( (67, (TAXI,)), (85, (TAXI,)) ),
( (68, (TAXI,)), (103, (TAXI,)), (84, (TAXI,)) ), # 085
( (69, (TAXI,)), (52, (BUS,)), (87, (BUS,)), (104, (TAXI,)), (116, (BUS,)),
(103, (TAXI,)), (102, (BUS,)) ),
( (70, (TAXI,)), (41, (BUS,)), (88, (TAXI,)), (105, (BUS,)), (86, (BUS,)) ),
( (89, (TAXI,)), (117, (TAXI,)), (87, (TAXI,)) ),
( (71, (TAXI,)), (55, (BUS,)), (13, (UNDERGROUND,)), (105, (TAXI, BUS)),
(128, (UNDERGROUND,)), (88, (TAXI,)), (140, (UNDERGROUND,)),
(67, (UNDERGROUND,)) ),
( (72, (TAXI,)), (91, (TAXI,)), (105, (TAXI,)) ), # 090
( (56, (TAXI,)), (107, (TAXI,)), (105, (TAXI,)), (90, (TAXI,)), (72, (TAXI,)) ),
( (73, (TAXI,)), (74, (TAXI,)), (93, (TAXI,)) ),
( (92, (TAXI,)), (94, (TAXI, BUS)), (79, (UNDERGROUND,)) ),
( (74, (BUS,)), (75, (TAXI,)), (95, (TAXI,)), (77, (BUS,)), (93, (TAXI, BUS)) ),
( (77, (TAXI,)), (122, (TAXI,)), (94, (TAXI,)) ), # 095
( (77, (TAXI,)), (97, (TAXI,)), (109, (TAXI,)) ),
( (78, (TAXI,)), (98, (TAXI,)), (109, (TAXI,)), (96, (TAXI,)) ),
( (79, (TAXI,)), (99, (TAXI,)), (110, (TAXI,)), (97, (TAXI,)) ),
( (80, (TAXI,)), (112, (TAXI,)), (110, (TAXI,)), (98, (TAXI,)) ),
( (81, (TAXI,)), (82, (BUS,)), (101, (TAXI,)), (113, (TAXI,)), # 100
(112, (TAXI,)), (111, (BUS,)), (80, (TAXI,)), (63, (BUS,)) ),
( (83, (TAXI,)), (114, (TAXI,)), (100, (TAXI,)), (82, (TAXI,)) ),
( (67, (BUS,)), (103, (TAXI,)), (86, (BUS,)), (115, (TAXI,)), (127, (BUS,)),
(83, (TAXI,)) ),
( (85, (TAXI,)), (86, (TAXI,)), (102, (TAXI,)) ),
( (86, (TAXI,)), (116, (TAXI,)) ),
( (90, (TAXI,)), (72, (BUS,)), (91, (TAXI,)), (106, (TAXI,)), # 105
(107, (BUS,)), (108, (TAXI, BUS)), (87, (BUS,)), (89, (TAXI, BUS)) ),
( (107, (TAXI,)), (105, (TAXI,)) ),
( (91, (TAXI,)), (72, (BUS,)), (119, (TAXI,)), (161, (BUS,)),
(106, (TAXI,)), (105, (BUS,)) ),
( (105, (TAXI, BUS)), (119, (TAXI,)), (135, (BUS,)), (117, (TAXI,)),
(116, (BUS,)), (115, (BLACK,)) ),
( (97, (TAXI,)), (110, (TAXI,)), (124, (TAXI,)), (96, (TAXI,)) ),
( (99, (TAXI,)), (111, (TAXI,)), (109, (TAXI,)), (98, (TAXI,)) ), # 110
( (112, (TAXI,)), (100, (BUS,)), (67, (UNDERGROUND,)),
(153, (UNDERGROUND,)), (124, (TAXI, BUS)), (163, (UNDERGROUND,)),
(110, (TAXI,)), (79, (UNDERGROUND,)) ),
( (100, (TAXI,)), (125, (TAXI,)), (111, (TAXI,)), (99, (TAXI,)) ),
( (114, (TAXI,)), (125, (TAXI,)), (100, (TAXI,)) ),
( (101, (TAXI,)), (115, (TAXI,)), (126, (TAXI,)),
(132, (TAXI,)), (131, (TAXI,)), (113, (TAXI,)) ),
( (102, (TAXI,)), (127, (TAXI,)), (126, (TAXI,)), (114, (TAXI,)), # 115
(108, (BLACK,)), (157, (BLACK,)) ),
( (104, (TAXI,)), (86, (BUS,)), (117, (TAXI,)), (108, (BUS,)),
(118, (TAXI,)), (142, (BUS,)), (127, (TAXI, BUS)) ),
( (88, (TAXI,)), (108, (TAXI,)), (129, (TAXI,)), (116, (TAXI,)) ),
( (116, (TAXI,)), (129, (TAXI,)), (142, (TAXI,)), (134, (TAXI,)) ),
( (107, (TAXI,)), (136, (TAXI,)), (108, (TAXI,)) ),
( (121, (TAXI,)), (144, (TAXI,)) ), # 120
( (122, (TAXI,)), (145, (TAXI,)), (120, (TAXI,)) ),
( (95, (TAXI,)), (123, (TAXI, BUS)), (146, (TAXI,)),
(121, (TAXI,)), (144, (BUS,)) ),
( (124, (TAXI, BUS)), (149, (TAXI,)), (165, (BUS,)), (148, (TAXI,)),
(137, (TAXI,)), (144, (BUS,)), (122, (TAXI, BUS)) ),
( (109, (TAXI,)), (111, (TAXI, BUS)), (130, (TAXI,)), (138, (TAXI,)),
(153, (BUS,)), (123, (TAXI, BUS)), (77, (BUS,)) ),
( (113, (TAXI,)), (131, (TAXI,)), (112, (TAXI,)) ), # 125
( (115, (TAXI,)), (127, (TAXI,)), (140, (TAXI,)), (114, (TAXI,)) ),
( (116, (TAXI, BUS)), (134, (TAXI,)), (133, (TAXI, BUS)),
(126, (TAXI,)), (115, (TAXI,)), (102, (BUS,)) ),
( (143, (TAXI,)), (135, (BUS,)), (89, (UNDERGROUND,)), (160, (TAXI,)),
(161, (BUS,)), (188, (TAXI,)), (199, (BUS,)), (172, (TAXI,)),
(187, (BUS,)), (185, (UNDERGROUND,)), (142, (TAXI, BUS)),
(140, (UNDERGROUND,)) ),
( (117, (TAXI,)), (135, (TAXI,)), (143, (TAXI,)), (142, (TAXI,)),
(118, (TAXI,)) ),
( (131, (TAXI,)), (139, (TAXI,)), (124, (TAXI,)) ), # 130
( (114, (TAXI,)), (130, (TAXI,)), (125, (TAXI,)) ),
( (114, (TAXI,)), (140, (TAXI,)) ),
( (127, (TAXI, BUS)), (141, (TAXI,)), (157, (BUS,)), (140, (TAXI, BUS)) ),
( (118, (TAXI,)), (142, (TAXI,)), (141, (TAXI,)), (127, (TAXI,)) ),
( (108, (BUS,)), (136, (TAXI,)), (161, (TAXI, BUS)), (143, (TAXI,)), # 135
(128, (BUS,)), (129, (TAXI,)) ),
( (119, (TAXI,)), (162, (TAXI,)), (135, (TAXI,)) ),
( (123, (TAXI,)), (147, (TAXI,)) ),
( (152, (TAXI,)), (150, (TAXI,)), (124, (TAXI,)) ),
( (130, (TAXI,)), (140, (TAXI,)), (154, (TAXI,)), (153, (TAXI,)) ),
( (132, (TAXI,)), (82, (BUS,)), (126, (TAXI,)), (89, (UNDERGROUND,)), # 140
(133, (TAXI, BUS)), (128, (UNDERGROUND,)), (156, (TAXI, BUS)),
(154, (TAXI, BUS)), (153, (UNDERGROUND,)), (139, (TAXI,)) ),
( (134, (TAXI,)), (142, (TAXI,)), (158, (TAXI,)), (133, (TAXI,)) ),
( (118, (TAXI,)), (116, (BUS,)), (129, (TAXI,)), (143, (TAXI,)),
(128, (TAXI, BUS)), (158, (TAXI,)), (157, (BUS,)), (141, (TAXI,)),
(134, (TAXI,)) ),
( (135, (TAXI,)), (160, (TAXI,)), (128, (TAXI,)), (142, (TAXI,)),
(129, (TAXI,)) ),
( (120, (TAXI,)), (122, (BUS,)), (145, (TAXI,)), (123, (BUS,)),
(163, (BUS,)), (177, (TAXI,)) ),
( (121, (TAXI,)), (146, (TAXI,)), (144, (TAXI,)) ), # 145
( (122, (TAXI,)), (147, (TAXI,)), (163, (TAXI,)), (145, (TAXI,)) ),
( (137, (TAXI,)), (164, (TAXI,)), (146, (TAXI,)) ),
( (123, (TAXI,)), (149, (TAXI,)), (164, (TAXI,)) ),
( (123, (TAXI,)), (150, (TAXI,)), (165, (TAXI,)), (148, (TAXI,)) ),
( (138, (TAXI,)), (151, (TAXI,)), (149, (TAXI,)) ), # 150
( (152, (TAXI,)), (166, (TAXI,)), (165, (TAXI,)), (150, (TAXI,)) ),
( (153, (TAXI,)), (151, (TAXI,)), (138, (TAXI,)) ),
( (139, (TAXI,)), (111, (UNDERGROUND,)), (154, (TAXI, BUS)),
(140, (UNDERGROUND,)), (167, (TAXI,)), (184, (BUS,)),
(185, (UNDERGROUND,)), (166, (TAXI,)), (180, (BUS,)),
(163, (UNDERGROUND,)), (152, (TAXI,)), (124, (BUS,)) ),
( (140, (TAXI, BUS)), (155, (TAXI,)), (156, (BUS,)), (153, (TAXI, BUS)),
(139, (TAXI,)) ),
( (156, (TAXI,)), (168, (TAXI,)), (167, (TAXI,)), (154, (TAXI,)) ), # 155
( (140, (TAXI, BUS)), (157, (TAXI, BUS)), (169, (TAXI,)),
(184, (BUS,)), (155, (TAXI,)), (154, (BUS,)) ),
( (133, (BUS,)), (158, (TAXI,)), (142, (BUS,)), (170, (TAXI,)),
(185, (BUS,)), (156, (TAXI, BUS)), (115, (BLACK,)), (194, (BLACK,)) ),
( (141, (TAXI,)), (142, (TAXI,)), (159, (TAXI,)), (157, (TAXI,)) ),
( (158, (TAXI,)), (172, (TAXI,)), (198, (TAXI,)), (186, (TAXI,)),
(170, (TAXI,)) ),
( (143, (TAXI,)), (161, (TAXI,)), (173, (TAXI,)), (128, (TAXI,)) ), # 160
( (107, (BUS,)), (174, (TAXI,)), (199, (BUS,)), (160, (TAXI,)),
(128, (BUS,)), (135, (TAXI, BUS)) ),
( (175, (TAXI,)), (136, (TAXI,)) ),
( (146, (TAXI,)), (111, (UNDERGROUND,)), (153, (UNDERGROUND,)),
(191, (BUS,)), (177, (TAXI,)), (176, (BUS,)), (144, (BUS,)) ),
( (147, (TAXI,)), (148, (TAXI,)), (179, (TAXI,)), (178, (TAXI,)) ),
( (149, (TAXI,)), (123, (BUS,)), (151, (TAXI,)), (180, (TAXI, BUS)), # 165
(179, (TAXI,)), (191, (BUS,)) ),
( (153, (TAXI,)), (183, (TAXI,)), (181, (TAXI,)), (151, (TAXI,)) ),
( (155, (TAXI,)), (168, (TAXI,)), (183, (TAXI,)), (153, (TAXI,)) ),
( (155, (TAXI,)), (184, (TAXI,)), (167, (TAXI,)) ),
( (156, (TAXI,)), (184, (TAXI,)) ),
( (157, (TAXI,)), (159, (TAXI,)), (185, (TAXI,)) ), # 170
( (173, (TAXI,)), (175, (TAXI,)), (199, (TAXI,)) ),
( (128, (TAXI,)), (187, (TAXI,)), (159, (TAXI,)) ),
( (160, (TAXI,)), (174, (TAXI,)), (171, (TAXI,)), (188, (TAXI,)) ),
( (175, (TAXI,)), (173, (TAXI,)), (161, (TAXI,)) ),
( (162, (TAXI,)), (171, (TAXI,)), (174, (TAXI,)) ), # 175
( (177, (TAXI,)), (163, (BUS,)), (189, (TAXI,)), (190, (BUS,)) ),
( (144, (TAXI,)), (163, (TAXI,)), (176, (TAXI,)) ),
( (164, (TAXI,)), (191, (TAXI,)), (189, (TAXI,)) ),
( (165, (TAXI,)), (191, (TAXI,)), (164, (TAXI,)) ),
( (165, (TAXI, BUS)), (181, (TAXI,)), (153, (BUS,)), (193, (TAXI,)), # 180
(184, (BUS,)), (190, (BUS,)) ),
( (166, (TAXI,)), (182, (TAXI,)), (193, (TAXI,)), (180, (TAXI,)) ),
( (183, (TAXI,)), (195, (TAXI,)), (181, (TAXI,)) ),
( (167, (TAXI,)), (196, (TAXI,)), (182, (TAXI,)), (166, (TAXI,)) ),
( (169, (TAXI,)), (156, (BUS,)), (185, (TAXI, BUS)), (197, (TAXI,)),
(196, (TAXI,)), (180, (BUS,)), (168, (TAXI,)), (153, (BUS,)) ),
( (170, (TAXI,)), (157, (BUS,)), (186, (TAXI,)), (187, (BUS,)), # 185
(128, (UNDERGROUND,)), (184, (TAXI, BUS)), (153, (UNDERGROUND,)) ),
( (159, (TAXI,)), (198, (TAXI,)), (185, (TAXI,)) ),
( (172, (TAXI,)), (128, (BUS,)), (188, (TAXI,)), (198, (TAXI,)),
(185, (BUS,)) ),
( (128, (TAXI,)), (173, (TAXI,)), (199, (TAXI,)), (187, (TAXI,)) ),
( (178, (TAXI,)), (190, (TAXI,)), (176, (TAXI,)) ),
( (191, (TAXI, BUS)), (192, (TAXI,)), (180, (BUS,)), # 190
(189, (TAXI,)), (176, (BUS,)) ),
( (179, (TAXI,)), (165, (BUS,)), (192, (TAXI,)), (190, (TAXI, BUS)),
(178, (TAXI,)), (163, (BUS,)) ),
( (191, (TAXI,)), (194, (TAXI,)), (190, (TAXI,)) ),
( (181, (TAXI,)), (194, (TAXI,)), (180, (TAXI,)) ),
( (195, (TAXI,)), (192, (TAXI,)), (193, (TAXI,)), (157, (BLACK,)) ),
( (182, (TAXI,)), (197, (TAXI,)), (194, (TAXI,)) ), # 195
( (183, (TAXI,)), (184, (TAXI,)), (197, (TAXI,)) ),
( (196, (TAXI,)), (184, (TAXI,)), (195, (TAXI,)) ),
( (159, (TAXI,)), (187, (TAXI,)), (199, (TAXI,)), (186, (TAXI,)) ),
( (188, (TAXI,)), (128, (BUS,)), (171, (TAXI,)), (161, (BUS,)),
(198, (TAXI,)) )
)
# precomputed maximum distance for this map
MAX_DISTANCE = 10
locToRoutes = (None,)
for i in range(1, len(_locToRoutesPartial)):
routes = ()
for dest, transports in _locToRoutesPartial[i]:
if BLACK not in transports:
new_transports = transports + (BLACK,)
else:
new_transports = transports
routes += ((dest, new_transports),)
locToRoutes += (routes,)
# Test a map to see if it is self-consistent--i.e., check that routes from A to B also
# exist from B to A. More effective than proofreading...
def checkMap(m):
passed = 1
partial = 0
for i in range(1, len(m)):
print "testing location " + str(i)
routes = m[i]
for route in routes:
if route[0] < len(m):
destRoutes = m[route[0]]
matched = 0
for destRoute in destRoutes:
if destRoute[0] == i:
matched = 1
if destRoute[1] != route[1]:
print ("Error at location "+str(i)+": route to " + str(route[0]) +
" is not self-consistent.")
passed = 0
break
if not matched:
print "Error at location "+str(i)+": missing route from "+str(route[0])+"."
passed = 0
else:
partial = 1
print "----------------------------------------------------------------------------------"
if partial:
cstr = "Incomplete"
else:
cstr = "Complete"
if passed:
print cstr + " map is self-consistent."
else:
print "Error: "+cstr+" map is not self-consistent."
# Generate a random map with valid connections, for testing purposes.
# Note that is uses lists, because they are mutable and easier to work with.
# The real map ought to be done with tuples.
#def getRandomMap(SIZE):
# transMethods = [TAXI, BUS, UNDERGROUND, BLACK]
#
# # Generate outgoing routes from all locations
# map = [None]
# for i in range(1,SIZE+1):
# routes = []
# destList = [] # throwaway counter variable
# for routeNum in range(random.randrange(1,4)):
# destination = random.randrange(1, SIZE+1)
# while destination in destList or destination == i:
# destination = random.randrange(1, SIZE+1)
# transports = []
# for transNum in range(random.randrange(1,4)):
# dummy = random.choice(transMethods)
# if dummy not in transports:
# transports.append(dummy)
# routes.append([destination, transports])
# destList.append(destination)
# map.append(routes)
#
# # Now make the map consistent by adding the reverse paths when necessary
# for i in range(1,SIZE+1):
# routes = map[i]
# for routeNum in range(len(routes)):
# destRoutes = map[routes[routeNum][0]]
# matchRoute = None
# for destRoute in destRoutes:
# if destRoute[0] == i:
# matchRoute = destRoute
# break
# if matchRoute:
# routes[routeNum][1] = matchRoute[1][:]
# else:
# destRoutes.append(routes[routeNum])
# # map is mutable, so it gets changed simply by changing 'routes'.
#
# return map
| gpl-2.0 |
LTHeaven/PokemonGo-Map | pogom/utils.py | 1 | 4641 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import getpass
import argparse
import re
import uuid
import os
import json
from datetime import datetime, timedelta
from . import config
from exceptions import APIKeyException
def parse_unicode(bytestring):
decoded_string = bytestring.decode(sys.getfilesystemencoding())
return decoded_string
def get_args():
# fuck PEP8
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--auth-service', type=str.lower, help='Auth Service', default='ptc')
parser.add_argument('-u', '--username', help='Username', required=True)
parser.add_argument('-p', '--password', help='Password', required=False)
parser.add_argument('-l', '--location', type=parse_unicode, help='Location, can be an address or coordinates', required=True)
parser.add_argument('-st', '--step-limit', help='Steps', required=True, type=int)
parser.add_argument('-H', '--host', help='Set web server listening host', default='127.0.0.1')
parser.add_argument('-P', '--port', type=int, help='Set web server listening port', default=5000)
parser.add_argument('-L', '--locale', help='Locale for Pokemon names: default en, check'
'locale folder for more options', default='en')
parser.add_argument('-c', '--china', help='Coordinates transformer for China', action='store_true')
parser.add_argument('-d', '--debug', help='Debug Mode', action='store_true')
parser.add_argument('-m', '--mock', help='Mock mode. Starts the web server but not the background thread.', action='store_true', default=False)
parser.add_argument('-ns', '--no-server', help='No-Server Mode. Starts the searcher but not the Webserver.', action='store_true', default=False, dest='no_server')
parser.add_argument('-k', '--google-maps-key', help='Google Maps Javascript API Key', default=None, dest='gmaps_key')
parser.set_defaults(DEBUG=False)
args = parser.parse_args()
if args.password is None:
args.password = getpass.getpass()
return args
def insert_mock_data():
num_pokemon = 6
num_pokestop = 6
num_gym = 6
from .models import Pokemon, Pokestop, Gym
from .search import generate_location_steps
latitude, longitude = float(config['ORIGINAL_LATITUDE']), float(config['ORIGINAL_LONGITUDE'])
locations = [l for l in generate_location_steps((latitude, longitude), num_pokemon)]
disappear_time = datetime.now() + timedelta(hours=1)
detect_time = datetime.now()
for i in xrange(num_pokemon):
Pokemon.create(encounter_id=uuid.uuid4(),
spawnpoint_id='sp{}'.format(i),
pokemon_id=(i+1) % 150,
latitude=locations[i][0],
longitude=locations[i][1],
disappear_time=disappear_time,
detect_time=detect_time)
for i in range(num_pokestop):
Pokestop.create(pokestop_id=uuid.uuid4(),
enabled=True,
latitude=locations[i+num_pokemon][0],
longitude=locations[i+num_pokemon][1],
last_modified=datetime.now(),
#Every other pokestop be lured
lure_expiration=disappear_time if (i % 2 == 0) else None
)
for i in range(num_gym):
Gym.create(gym_id=uuid.uuid4(),
team_id=i % 3,
guard_pokemon_id=(i+1) % 150,
latitude=locations[i + num_pokemon + num_pokestop][0],
longitude=locations[i + num_pokemon + num_pokestop][1],
last_modified=datetime.now(),
enabled=True,
gym_points=1000
)
def get_pokemon_name(pokemon_id):
if not hasattr(get_pokemon_name, 'names'):
file_path = os.path.join(
config['ROOT_PATH'],
config['LOCALES_DIR'],
'pokemon.{}.json'.format(config['LOCALE']))
with open(file_path, 'r') as f:
get_pokemon_name.names = json.loads(f.read())
return get_pokemon_name.names[str(pokemon_id)]
def load_credentials(filepath):
with open(filepath+os.path.sep+'credentials.json') as file:
creds = json.load(file)
if not creds['gmaps_key']:
raise APIKeyException(\
"No Google Maps Javascript API key entered in credentials.json file!"
" Please take a look at the wiki for instructions on how to generate this key,"
" then add that key to the file!")
return creds
| mit |
xq262144/hue | desktop/core/ext-py/tablib-0.10.0/tablib/compat.py | 10 | 1213 | # -*- coding: utf-8 -*-
"""
tablib.compat
~~~~~~~~~~~~~
Tablib compatiblity module.
"""
import sys
is_py3 = (sys.version_info[0] > 2)
try:
from collections import OrderedDict
except ImportError:
from tablib.packages.ordereddict import OrderedDict
if is_py3:
from io import BytesIO
import tablib.packages.xlwt3 as xlwt
import tablib.packages.xlrd3 as xlrd
from tablib.packages.xlrd3.biffh import XLRDError
from tablib.packages import markup3 as markup
from tablib.packages import openpyxl3 as openpyxl
from tablib.packages.odf3 import opendocument, style, text, table
import csv
from io import StringIO
# py3 mappings
unicode = str
bytes = bytes
basestring = str
else:
from cStringIO import StringIO as BytesIO
from cStringIO import StringIO
#import tablib.packages.xlwt as xlwt
import tablib.packages.xlrd as xlrd
from tablib.packages.xlrd.biffh import XLRDError
from tablib.packages import markup
from itertools import ifilter
from tablib.packages import openpyxl
from tablib.packages.odf import opendocument, style, text, table
from tablib.packages import unicodecsv as csv
unicode = unicode
| apache-2.0 |
ra3xdh/qucs_s | qucs/python/qucs-py/qucs.py | 1 | 2643 | #!/usr/bin/python
# QUCS->Python+pylab converter ver.1.0
# Public domain code, writter by Wojciech M. Zabolotny
# ( wzab<at>ise.pw.edu.pl ) 20.01.2009
import sys
import pylab
class qucs_dep_var:
def __init__(this,vals,ind_vars):
this.val=vals
this.ind_vars=ind_vars
class qucs_data:
def __init__(this,fname=""):
this.indeps={}
this.deps={}
if fname != "":
f=open(fname,"rb")
l=f.readline().strip()
# In the first line check whether we have a qucs data
if l != "<Qucs Dataset 0.0.18>":
raise("This is not a qucs data file!")
# Now we should start reading dependent vars, and independent vars
# The next line should be either dependend variable or independent variable
while True:
l=f.readline().strip()
if l=="":
break
if l[0:6]=="<indep":
#This is declaration of the independent variable
this.create_indep(l[6:-1],f)
elif l[0:4]=="<dep":
#This is declaration of the dependent variable
this.create_dep(l[4:-1],f)
f.close()
def conv_dta(this,line):
nline=line.replace("j","")
if len(line)!=len(nline):
nline=nline+"j"
return complex(nline)
def create_dep(this,ldef,infile):
#Create the dependent variable with the name defined in the first field
vnames=ldef.split()
#Calculate the dimensions
dims=[]
vsize=1
for iname in vnames[1:]:
vs=len(this.indeps[iname])
dims.append(vs)
vsize*=vs
#Reserve the data buffer
dta = pylab.zeros(vsize,complex)
#Read the data
for i in xrange(0,vsize):
l=infile.readline().strip()
dta[i]=this.conv_dta(l)
#Now make sure, that the last line is "<indep>"
l=infile.readline().strip()
if l != "</dep>":
raise("Wrong syntax in line: "+l)
#Reshape the data buffer into the multi-dimensional array
dta=pylab.reshape(dta,dims,'FORTRAN')
this.deps[vnames[0]]=qucs_dep_var(dta,vnames[1:])
def create_indep(this,ldef, infile):
#Create the independent variable with the name defined in the first field
#In the first line we should find the variable name and its length
[vname, vsize]=ldef.split()
vsize=int(vsize)
#Create the empty data
dta = pylab.zeros(vsize,complex)
#Read the data
for i in xrange(0,vsize):
l=infile.readline().strip()
dta[i]=this.conv_dta(l)
#Now make sure, that the last line is "<indep>"
l=infile.readline().strip()
if l != "</indep>":
raise("Wrong syntax in line: "+l)
this.indeps[vname]=dta
| gpl-2.0 |
maciekcc/tensorflow | tensorflow/python/framework/common_shapes_test.py | 41 | 9191 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for common shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class CommonShapesTest(test_util.TensorFlowTestCase):
# Asserts that we get the same result with numpy (for known shapes), and that
# the order of arguments does not matter (i.e., broadcasting is reflexive).
def _assert_incompatible_broadcast(self, shape1, shape2):
if shape1.dims is not None and shape2.dims is not None:
zeros1 = np.zeros(shape1.as_list())
zeros2 = np.zeros(shape2.as_list())
with self.assertRaises(ValueError):
np.broadcast(zeros1, zeros2)
with self.assertRaises(ValueError):
np.broadcast(zeros2, zeros1)
with self.assertRaises(ValueError):
common_shapes.broadcast_shape(shape1, shape2)
with self.assertRaises(ValueError):
common_shapes.broadcast_shape(shape2, shape1)
# Asserts that we get the same result with numpy (for known shapes), and that
# the order of arguments does not matter (i.e., broadcasting is reflexive).
def _assert_broadcast(self, expected, shape1, shape2):
if shape1.dims is not None and shape2.dims is not None:
expected_np = expected.as_list()
zeros1 = np.zeros(shape1.as_list())
zeros2 = np.zeros(shape2.as_list())
self.assertAllEqual(expected_np, np.broadcast(zeros1, zeros2).shape)
self.assertAllEqual(expected_np, np.broadcast(zeros2, zeros1).shape)
self.assertEqual(
expected, common_shapes.broadcast_shape(shape1, shape2))
self.assertEqual(
expected, common_shapes.broadcast_shape(shape2, shape1))
else:
self.assertEqual(expected, common_shapes.broadcast_shape(shape1, shape2))
self.assertEqual(expected, common_shapes.broadcast_shape(shape2, shape1))
def testBroadcast_one_dimension(self):
s1 = tensor_shape.vector(5)
s2 = tensor_shape.vector(7)
unknown = tensor_shape.unknown_shape()
scalar = tensor_shape.scalar()
expanded_scalar = tensor_shape.TensorShape([1])
# Tensors with same shape should have the same broadcast result.
for shape in (s1, s2, unknown, scalar, expanded_scalar):
self._assert_broadcast(expected=shape, shape1=shape, shape2=shape)
# [] and [1] act like identity.
self._assert_broadcast(expected=s1, shape1=s1, shape2=scalar)
self._assert_broadcast(expected=s2, shape1=s2, shape2=scalar)
self._assert_broadcast(expected=s1, shape1=s1, shape2=expanded_scalar)
self._assert_broadcast(expected=s2, shape1=s2, shape2=expanded_scalar)
self._assert_broadcast(expected=unknown, shape1=s1, shape2=unknown)
self._assert_broadcast(expected=unknown, shape1=s2, shape2=unknown)
self._assert_broadcast(
expected=expanded_scalar, shape1=scalar, shape2=expanded_scalar)
self._assert_incompatible_broadcast(shape1=s1, shape2=s2)
def testBroadcast_many_dimensions(self):
unknown = tensor_shape.unknown_shape()
shape_0 = tensor_shape.scalar()
shape_1 = tensor_shape.vector(1)
shape_4 = tensor_shape.vector(4)
shape_1x4 = tensor_shape.matrix(1, 4)
shape_4x1 = tensor_shape.matrix(4, 1)
shape_3x4 = tensor_shape.matrix(3, 4)
shape_4x3 = tensor_shape.matrix(4, 3)
# Tensors with same shape should have the same broadcast result.
for shape in (
shape_0, shape_1, shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
self._assert_broadcast(expected=shape, shape1=shape, shape2=shape)
# [] and [1] act like identity.
for identity in (shape_0, shape_1):
for shape in (shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
self._assert_broadcast(expected=shape, shape1=identity, shape2=shape)
# Unknown in, unknown out.
for shape in (shape_4, shape_1x4, shape_4x1, shape_3x4, shape_4x3):
self._assert_broadcast(expected=unknown, shape1=shape, shape2=unknown)
self._assert_broadcast(expected=shape_1x4, shape1=shape_4, shape2=shape_1x4)
shape_4x4 = tensor_shape.matrix(4, 4)
self._assert_broadcast(expected=shape_4x4, shape1=shape_4, shape2=shape_4x1)
self._assert_broadcast(expected=shape_3x4, shape1=shape_4, shape2=shape_3x4)
self._assert_incompatible_broadcast(shape1=shape_4, shape2=shape_4x3)
self._assert_broadcast(
expected=shape_4x4, shape1=shape_1x4, shape2=shape_4x1)
self._assert_broadcast(
expected=shape_3x4, shape1=shape_1x4, shape2=shape_3x4)
self._assert_incompatible_broadcast(shape1=shape_1x4, shape2=shape_4x3)
self._assert_incompatible_broadcast(shape1=shape_4x1, shape2=shape_3x4)
self._assert_broadcast(
expected=shape_4x3, shape1=shape_4x1, shape2=shape_4x3)
self._assert_incompatible_broadcast(shape1=shape_3x4, shape2=shape_4x3)
# Asserts that the order of arguments does not matter (i.e., broadcasting is
# reflexive).
def _assert_broadcast_with_unknown_dims(self, expected, shape1, shape2):
actual_dims = common_shapes.broadcast_shape(shape1, shape2).dims
reflexive_actual_dims = common_shapes.broadcast_shape(shape2, shape1).dims
if actual_dims is None:
self.assertIsNone(reflexive_actual_dims)
elif reflexive_actual_dims is None:
self.assertIsNone(actual_dims)
else:
self.assertEqual(len(actual_dims), len(reflexive_actual_dims))
for actual_dim, reflexive_actual_dim in zip(
actual_dims, reflexive_actual_dims):
self.assertEqual(actual_dim.value, reflexive_actual_dim.value)
expected_dims = expected.dims
if expected_dims is None:
self.assertIsNone(actual_dims)
elif actual_dims is None:
self.assertIsNone(expected_dims)
else:
self.assertEqual(len(expected_dims), len(actual_dims))
for expected_dim, actual_dim in zip(expected_dims, actual_dims):
self.assertEqual(expected_dim.value, actual_dim.value)
def testBroadcast_unknown_dims(self):
unknown = tensor_shape.unknown_shape()
shape_0 = tensor_shape.scalar()
shape_1 = tensor_shape.vector(1)
# pylint: disable=invalid-name
shape_U = tensor_shape.vector(None)
shape_1xU = tensor_shape.matrix(1, None)
shape_Ux1 = tensor_shape.matrix(None, 1)
shape_4xU = tensor_shape.matrix(4, None)
shape_Ux4 = tensor_shape.matrix(None, 4)
# pylint: enable=invalid-name
# Tensors with same shape should have the same broadcast result.
for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
self._assert_broadcast_with_unknown_dims(
expected=shape, shape1=shape, shape2=shape)
# [] and [1] act like identity.
for identity in (shape_0, shape_1):
for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
self._assert_broadcast_with_unknown_dims(
expected=shape, shape1=identity, shape2=shape)
# Unknown in, unknown out.
for shape in (shape_U, shape_1xU, shape_Ux1, shape_4xU, shape_Ux4):
self._assert_broadcast_with_unknown_dims(
expected=unknown, shape1=shape, shape2=unknown)
self._assert_broadcast_with_unknown_dims(
expected=shape_1xU, shape1=shape_U, shape2=shape_1xU)
shape_UxU = tensor_shape.matrix(None, None) # pylint: disable=invalid-name
self._assert_broadcast_with_unknown_dims(
expected=shape_UxU, shape1=shape_U, shape2=shape_Ux1)
self._assert_broadcast_with_unknown_dims(
expected=shape_4xU, shape1=shape_U, shape2=shape_4xU)
self._assert_broadcast_with_unknown_dims(
expected=shape_Ux4, shape1=shape_U, shape2=shape_Ux4)
self._assert_broadcast_with_unknown_dims(
expected=shape_UxU, shape1=shape_1xU, shape2=shape_Ux1)
self._assert_broadcast_with_unknown_dims(
expected=shape_4xU, shape1=shape_1xU, shape2=shape_4xU)
self._assert_broadcast_with_unknown_dims(
expected=shape_Ux4, shape1=shape_1xU, shape2=shape_Ux4)
self._assert_broadcast_with_unknown_dims(
expected=shape_4xU, shape1=shape_Ux1, shape2=shape_4xU)
self._assert_broadcast_with_unknown_dims(
expected=shape_Ux4, shape1=shape_Ux1, shape2=shape_Ux4)
shape_4x4 = tensor_shape.matrix(4, 4)
self._assert_broadcast_with_unknown_dims(
expected=shape_4x4, shape1=shape_4xU, shape2=shape_Ux4)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
cevaris/pants | src/python/pants/pantsd/util.py | 4 | 1329 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.bin.options_initializer import OptionsInitializer
from pants.build_graph.intermediate_target_factory import IntermediateTargetFactoryBase
from pants.goal.goal import Goal
from pants.goal.run_tracker import RunTracker
from pants.subsystem.subsystem import Subsystem
def clean_global_runtime_state(reset_runtracker=True, reset_subsystem=False):
"""Resets the global runtime state of a pants runtime for cleaner forking.
:param bool reset_runtracker: Whether or not to clean RunTracker global state.
:param bool reset_subsystem: Whether or not to clean Subsystem global state.
"""
if reset_runtracker:
# Reset RunTracker state.
RunTracker.global_instance().reset(reset_options=False)
if reset_subsystem:
# Reset subsystem state.
Subsystem.reset()
#TODO: Think of an alternative for IntermediateTargetFactoryBase._targets to avoid this call
IntermediateTargetFactoryBase.reset()
# Reset Goals and Tasks.
Goal.clear()
# Reset backend/plugins state.
OptionsInitializer.reset()
| apache-2.0 |
navrasio/mxnet | example/fcn-xs/fcn_xs.py | 45 | 3773 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import sys, os
import argparse
import mxnet as mx
import numpy as np
import logging
import symbol_fcnxs
import init_fcnxs
from data import FileIter
from solver import Solver
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ctx = mx.gpu(0)
def main():
fcnxs = symbol_fcnxs.get_fcn32s_symbol(numclass=21, workspace_default=1536)
fcnxs_model_prefix = "model_pascal/FCN32s_VGG16"
if args.model == "fcn16s":
fcnxs = symbol_fcnxs.get_fcn16s_symbol(numclass=21, workspace_default=1536)
fcnxs_model_prefix = "model_pascal/FCN16s_VGG16"
elif args.model == "fcn8s":
fcnxs = symbol_fcnxs.get_fcn8s_symbol(numclass=21, workspace_default=1536)
fcnxs_model_prefix = "model_pascal/FCN8s_VGG16"
arg_names = fcnxs.list_arguments()
_, fcnxs_args, fcnxs_auxs = mx.model.load_checkpoint(args.prefix, args.epoch)
if not args.retrain:
if args.init_type == "vgg16":
fcnxs_args, fcnxs_auxs = init_fcnxs.init_from_vgg16(ctx, fcnxs, fcnxs_args, fcnxs_auxs)
elif args.init_type == "fcnxs":
fcnxs_args, fcnxs_auxs = init_fcnxs.init_from_fcnxs(ctx, fcnxs, fcnxs_args, fcnxs_auxs)
train_dataiter = FileIter(
root_dir = "./VOC2012",
flist_name = "train.lst",
# cut_off_size = 400,
rgb_mean = (123.68, 116.779, 103.939),
)
val_dataiter = FileIter(
root_dir = "./VOC2012",
flist_name = "val.lst",
rgb_mean = (123.68, 116.779, 103.939),
)
model = Solver(
ctx = ctx,
symbol = fcnxs,
begin_epoch = 0,
num_epoch = 50,
arg_params = fcnxs_args,
aux_params = fcnxs_auxs,
learning_rate = 1e-10,
momentum = 0.99,
wd = 0.0005)
model.fit(
train_data = train_dataiter,
eval_data = val_dataiter,
batch_end_callback = mx.callback.Speedometer(1, 10),
epoch_end_callback = mx.callback.do_checkpoint(fcnxs_model_prefix))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert vgg16 model to vgg16fc model.')
parser.add_argument('--model', default='fcnxs',
help='The type of fcn-xs model, e.g. fcnxs, fcn16s, fcn8s.')
parser.add_argument('--prefix', default='VGG_FC_ILSVRC_16_layers',
help='The prefix(include path) of vgg16 model with mxnet format.')
parser.add_argument('--epoch', type=int, default=74,
help='The epoch number of vgg16 model.')
parser.add_argument('--init-type', default="vgg16",
help='the init type of fcn-xs model, e.g. vgg16, fcnxs')
parser.add_argument('--retrain', action='store_true', default=False,
help='true means continue training.')
args = parser.parse_args()
logging.info(args)
main()
| apache-2.0 |
openiitbombayx/edx-platform | common/test/acceptance/accessibility/test_lms_dashboard_axs.py | 68 | 1293 | """
Accessibility tests for LMS dashboard page.
Run just this test with:
SELENIUM_BROWSER=phantomjs paver test_bokchoy -d accessibility -t test_lms_dashboard_axs.py
"""
from ..tests.lms.test_lms_dashboard import BaseLmsDashboardTest
class LmsDashboardAxsTest(BaseLmsDashboardTest):
"""
Class to test lms student dashboard accessibility.
"""
def test_dashboard_course_listings_axs(self):
"""
Test the accessibility of the course listings
"""
course_listings = self.dashboard_page.get_course_listings()
self.assertEqual(len(course_listings), 1)
report = self.dashboard_page.do_axs_audit()
# There was one page in this session
self.assertEqual(1, len(report))
result = report[0]
# Verify that this page has no accessibility errors.
self.assertEqual(0, len(result.errors))
# Verify that this page currently has 2 accessibility warnings.
self.assertEqual(2, len(result.warnings))
# And that these are the warnings that the page currently gives.
for warning in result.warnings:
self.assertTrue(
warning.startswith(('Warning: AX_FOCUS_01', 'Warning: AX_COLOR_01',)),
msg="Unexpected warning: {}".format(warning))
| agpl-3.0 |
ibollen/repo | subcmds/prune.py | 35 | 1708 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from color import Coloring
from command import PagedCommand
class Prune(PagedCommand):
common = True
helpSummary = "Prune (delete) already merged topics"
helpUsage = """
%prog [<project>...]
"""
def Execute(self, opt, args):
all = []
for project in self.GetProjects(args):
all.extend(project.PruneHeads())
if not all:
return
class Report(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr='bold')
out = Report(all[0].project.config)
out.project('Pending Branches')
out.nl()
project = None
for branch in all:
if project != branch.project:
project = branch.project
out.nl()
out.project('project %s/' % project.relpath)
out.nl()
commits = branch.commits
date = branch.date
print '%s %-33s (%2d commit%s, %s)' % (
branch.name == project.CurrentBranch and '*' or ' ',
branch.name,
len(commits),
len(commits) != 1 and 's' or ' ',
date)
| apache-2.0 |
rickyboy69/tvportugal | mechanize/_msiecookiejar.py | 134 | 14694 | """Microsoft Internet Explorer cookie loading on Windows.
Copyright 2002-2003 Johnny Lee <typo_pl@hotmail.com> (MSIE Perl code)
Copyright 2002-2006 John J Lee <jjl@pobox.com> (The Python port)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
# XXX names and comments are not great here
import os, re, time, struct, logging
if os.name == "nt":
import _winreg
from _clientcookie import FileCookieJar, CookieJar, Cookie, \
MISSING_FILENAME_TEXT, LoadError
debug = logging.getLogger("mechanize").debug
def regload(path, leaf):
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, path, 0,
_winreg.KEY_ALL_ACCESS)
try:
value = _winreg.QueryValueEx(key, leaf)[0]
except WindowsError:
value = None
return value
WIN32_EPOCH = 0x019db1ded53e8000L # 1970 Jan 01 00:00:00 in Win32 FILETIME
def epoch_time_offset_from_win32_filetime(filetime):
"""Convert from win32 filetime to seconds-since-epoch value.
MSIE stores create and expire times as Win32 FILETIME, which is 64
bits of 100 nanosecond intervals since Jan 01 1601.
mechanize expects time in 32-bit value expressed in seconds since the
epoch (Jan 01 1970).
"""
if filetime < WIN32_EPOCH:
raise ValueError("filetime (%d) is before epoch (%d)" %
(filetime, WIN32_EPOCH))
return divmod((filetime - WIN32_EPOCH), 10000000L)[0]
def binary_to_char(c): return "%02X" % ord(c)
def binary_to_str(d): return "".join(map(binary_to_char, list(d)))
class MSIEBase:
magic_re = re.compile(r"Client UrlCache MMF Ver \d\.\d.*")
padding = "\x0d\xf0\xad\x0b"
msie_domain_re = re.compile(r"^([^/]+)(/.*)$")
cookie_re = re.compile("Cookie\:.+\@([\x21-\xFF]+).*?"
"(.+\@[\x21-\xFF]+\.txt)")
# path under HKEY_CURRENT_USER from which to get location of index.dat
reg_path = r"software\microsoft\windows" \
r"\currentversion\explorer\shell folders"
reg_key = "Cookies"
def __init__(self):
self._delayload_domains = {}
def _delayload_domain(self, domain):
# if necessary, lazily load cookies for this domain
delayload_info = self._delayload_domains.get(domain)
if delayload_info is not None:
cookie_file, ignore_discard, ignore_expires = delayload_info
try:
self.load_cookie_data(cookie_file,
ignore_discard, ignore_expires)
except (LoadError, IOError):
debug("error reading cookie file, skipping: %s", cookie_file)
else:
del self._delayload_domains[domain]
def _load_cookies_from_file(self, filename):
debug("Loading MSIE cookies file: %s", filename)
cookies = []
cookies_fh = open(filename)
try:
while 1:
key = cookies_fh.readline()
if key == "": break
rl = cookies_fh.readline
def getlong(rl=rl): return long(rl().rstrip())
def getstr(rl=rl): return rl().rstrip()
key = key.rstrip()
value = getstr()
domain_path = getstr()
flags = getlong() # 0x2000 bit is for secure I think
lo_expire = getlong()
hi_expire = getlong()
lo_create = getlong()
hi_create = getlong()
sep = getstr()
if "" in (key, value, domain_path, flags, hi_expire, lo_expire,
hi_create, lo_create, sep) or (sep != "*"):
break
m = self.msie_domain_re.search(domain_path)
if m:
domain = m.group(1)
path = m.group(2)
cookies.append({"KEY": key, "VALUE": value,
"DOMAIN": domain, "PATH": path,
"FLAGS": flags, "HIXP": hi_expire,
"LOXP": lo_expire, "HICREATE": hi_create,
"LOCREATE": lo_create})
finally:
cookies_fh.close()
return cookies
def load_cookie_data(self, filename,
ignore_discard=False, ignore_expires=False):
"""Load cookies from file containing actual cookie data.
Old cookies are kept unless overwritten by newly loaded ones.
You should not call this method if the delayload attribute is set.
I think each of these files contain all cookies for one user, domain,
and path.
filename: file containing cookies -- usually found in a file like
C:\WINNT\Profiles\joe\Cookies\joe@blah[1].txt
"""
now = int(time.time())
cookie_data = self._load_cookies_from_file(filename)
for cookie in cookie_data:
flags = cookie["FLAGS"]
secure = ((flags & 0x2000) != 0)
filetime = (cookie["HIXP"] << 32) + cookie["LOXP"]
expires = epoch_time_offset_from_win32_filetime(filetime)
if expires < now:
discard = True
else:
discard = False
domain = cookie["DOMAIN"]
initial_dot = domain.startswith(".")
if initial_dot:
domain_specified = True
else:
# MSIE 5 does not record whether the domain cookie-attribute
# was specified.
# Assuming it wasn't is conservative, because with strict
# domain matching this will match less frequently; with regular
# Netscape tail-matching, this will match at exactly the same
# times that domain_specified = True would. It also means we
# don't have to prepend a dot to achieve consistency with our
# own & Mozilla's domain-munging scheme.
domain_specified = False
# assume path_specified is false
# XXX is there other stuff in here? -- e.g. comment, commentURL?
c = Cookie(0,
cookie["KEY"], cookie["VALUE"],
None, False,
domain, domain_specified, initial_dot,
cookie["PATH"], False,
secure,
expires,
discard,
None,
None,
{"flags": flags})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
CookieJar.set_cookie(self, c)
def load_from_registry(self, ignore_discard=False, ignore_expires=False,
username=None):
"""
username: only required on win9x
"""
cookies_dir = regload(self.reg_path, self.reg_key)
filename = os.path.normpath(os.path.join(cookies_dir, "INDEX.DAT"))
self.load(filename, ignore_discard, ignore_expires, username)
def _really_load(self, index, filename, ignore_discard, ignore_expires,
username):
now = int(time.time())
if username is None:
username = os.environ['USERNAME'].lower()
cookie_dir = os.path.dirname(filename)
data = index.read(256)
if len(data) != 256:
raise LoadError("%s file is too short" % filename)
# Cookies' index.dat file starts with 32 bytes of signature
# followed by an offset to the first record, stored as a little-
# endian DWORD.
sig, size, data = data[:32], data[32:36], data[36:]
size = struct.unpack("<L", size)[0]
# check that sig is valid
if not self.magic_re.match(sig) or size != 0x4000:
raise LoadError("%s ['%s' %s] does not seem to contain cookies" %
(str(filename), sig, size))
# skip to start of first record
index.seek(size, 0)
sector = 128 # size of sector in bytes
while 1:
data = ""
# Cookies are usually in two contiguous sectors, so read in two
# sectors and adjust if not a Cookie.
to_read = 2 * sector
d = index.read(to_read)
if len(d) != to_read:
break
data = data + d
# Each record starts with a 4-byte signature and a count
# (little-endian DWORD) of sectors for the record.
sig, size, data = data[:4], data[4:8], data[8:]
size = struct.unpack("<L", size)[0]
to_read = (size - 2) * sector
## from urllib import quote
## print "data", quote(data)
## print "sig", quote(sig)
## print "size in sectors", size
## print "size in bytes", size*sector
## print "size in units of 16 bytes", (size*sector) / 16
## print "size to read in bytes", to_read
## print
if sig != "URL ":
assert sig in ("HASH", "LEAK", \
self.padding, "\x00\x00\x00\x00"), \
"unrecognized MSIE index.dat record: %s" % \
binary_to_str(sig)
if sig == "\x00\x00\x00\x00":
# assume we've got all the cookies, and stop
break
if sig == self.padding:
continue
# skip the rest of this record
assert to_read >= 0
if size != 2:
assert to_read != 0
index.seek(to_read, 1)
continue
# read in rest of record if necessary
if size > 2:
more_data = index.read(to_read)
if len(more_data) != to_read: break
data = data + more_data
cookie_re = ("Cookie\:%s\@([\x21-\xFF]+).*?" % username +
"(%s\@[\x21-\xFF]+\.txt)" % username)
m = re.search(cookie_re, data, re.I)
if m:
cookie_file = os.path.join(cookie_dir, m.group(2))
if not self.delayload:
try:
self.load_cookie_data(cookie_file,
ignore_discard, ignore_expires)
except (LoadError, IOError):
debug("error reading cookie file, skipping: %s",
cookie_file)
else:
domain = m.group(1)
i = domain.find("/")
if i != -1:
domain = domain[:i]
self._delayload_domains[domain] = (
cookie_file, ignore_discard, ignore_expires)
class MSIECookieJar(MSIEBase, FileCookieJar):
"""FileCookieJar that reads from the Windows MSIE cookies database.
MSIECookieJar can read the cookie files of Microsoft Internet Explorer
(MSIE) for Windows version 5 on Windows NT and version 6 on Windows XP and
Windows 98. Other configurations may also work, but are untested. Saving
cookies in MSIE format is NOT supported. If you save cookies, they'll be
in the usual Set-Cookie3 format, which you can read back in using an
instance of the plain old CookieJar class. Don't save using the same
filename that you loaded cookies from, because you may succeed in
clobbering your MSIE cookies index file!
You should be able to have LWP share Internet Explorer's cookies like
this (note you need to supply a username to load_from_registry if you're on
Windows 9x or Windows ME):
cj = MSIECookieJar(delayload=1)
# find cookies index file in registry and load cookies from it
cj.load_from_registry()
opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
response = opener.open("http://example.com/")
Iterating over a delayloaded MSIECookieJar instance will not cause any
cookies to be read from disk. To force reading of all cookies from disk,
call read_all_cookies. Note that the following methods iterate over self:
clear_temporary_cookies, clear_expired_cookies, __len__, __repr__, __str__
and as_string.
Additional methods:
load_from_registry(ignore_discard=False, ignore_expires=False,
username=None)
load_cookie_data(filename, ignore_discard=False, ignore_expires=False)
read_all_cookies()
"""
def __init__(self, filename=None, delayload=False, policy=None):
MSIEBase.__init__(self)
FileCookieJar.__init__(self, filename, delayload, policy)
def set_cookie(self, cookie):
if self.delayload:
self._delayload_domain(cookie.domain)
CookieJar.set_cookie(self, cookie)
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
domains = self._cookies.copy()
domains.update(self._delayload_domains)
domains = domains.keys()
cookies = []
for domain in domains:
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookies_for_domain(self, domain, request):
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
if self.delayload:
self._delayload_domain(domain)
return CookieJar._cookies_for_domain(self, domain, request)
def read_all_cookies(self):
"""Eagerly read in all cookies."""
if self.delayload:
for domain in self._delayload_domains.keys():
self._delayload_domain(domain)
def load(self, filename, ignore_discard=False, ignore_expires=False,
username=None):
"""Load cookies from an MSIE 'index.dat' cookies index file.
filename: full path to cookie index file
username: only required on win9x
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
index = open(filename, "rb")
try:
self._really_load(index, filename, ignore_discard, ignore_expires,
username)
finally:
index.close()
| apache-2.0 |
bmanojlovic/ansible | lib/ansible/modules/network/ldap_entry.py | 10 | 9628 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Peter Sagerson <psagers@ignorare.net>
# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
try:
import ldap
import ldap.modlist
import ldap.sasl
HAS_LDAP = True
except ImportError:
HAS_LDAP = False
DOCUMENTATION = """
---
module: ldap_entry
short_description: Add or remove LDAP entries.
description:
- Add or remove LDAP entries. This module only asserts the existence or
non-existence of an LDAP entry, not its attributes. To assert the
attribute values of an entry, see M(ldap_attr).
notes:
- The default authentication settings will attempt to use a SASL EXTERNAL
bind over a UNIX domain socket. This works well with the default Ubuntu
install for example, which includes a cn=peercred,cn=external,cn=auth ACL
rule allowing root to modify the server configuration. If you need to use
a simple bind to access your server, pass the credentials in I(bind_dn)
and I(bind_pw).
version_added: '2.3'
author:
- Jiri Tyr (@jtyr)
requirements:
- python-ldap
options:
bind_dn:
required: false
default: null
description:
- A DN to bind with. If this is omitted, we'll try a SASL bind with
the EXTERNAL mechanism. If this is blank, we'll use an anonymous
bind.
bind_pw:
required: false
default: null
description:
- The password to use with I(bind_dn).
dn:
required: true
description:
- The DN of the entry to add or remove.
attributes:
required: false
default: null
description:
- If I(state=present), attributes necessary to create an entry. Existing
entries are never modified. To assert specific attribute values on an
existing entry, use M(ldap_attr) module instead.
objectClass:
required: false
default: null
description:
- If I(state=present), value or list of values to use when creating
the entry. It can either be a string or an actual list of
strings.
params:
required: false
default: null
description:
- List of options which allows to overwrite any of the task or the
I(attributes) options. To remove an option, set the value of the option
to C(null).
server_uri:
required: false
default: ldapi:///
description:
- A URI to the LDAP server. The default value lets the underlying
LDAP client library look for a UNIX domain socket in its default
location.
start_tls:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- If true, we'll use the START_TLS LDAP extension.
state:
required: false
choices: [present, absent]
default: present
description:
- The target state of the entry.
"""
EXAMPLES = """
- name: Make sure we have a parent entry for users
ldap_entry:
dn: ou=users,dc=example,dc=com
objectClass: organizationalUnit
- name: Make sure we have an admin user
ldap_entry:
dn: cn=admin,dc=example,dc=com
objectClass:
- simpleSecurityObject
- organizationalRole
attributes:
description: An LDAP administrator
userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
- name: Get rid of an old entry
ldap_entry:
dn: ou=stuff,dc=example,dc=com
state: absent
server_uri: ldap://localhost/
bind_dn: cn=admin,dc=example,dc=com
bind_pw: password
#
# The same as in the previous example but with the authentication details
# stored in the ldap_auth variable:
#
# ldap_auth:
# server_uri: ldap://localhost/
# bind_dn: cn=admin,dc=example,dc=com
# bind_pw: password
- name: Get rid of an old entry
ldap_entry:
dn: ou=stuff,dc=example,dc=com
state: absent
params: "{{ ldap_auth }}"
"""
RETURN = """
# Default return values
"""
class LdapEntry(object):
def __init__(self, module):
# Shortcuts
self.module = module
self.bind_dn = self.module.params['bind_dn']
self.bind_pw = self.module.params['bind_pw']
self.dn = self.module.params['dn']
self.server_uri = self.module.params['server_uri']
self.start_tls = self.module.params['start_tls']
self.state = self.module.params['state']
# Add the objectClass into the list of attributes
self.module.params['attributes']['objectClass'] = (
self.module.params['objectClass'])
# Load attributes
if self.state == 'present':
self.attrs = self._load_attrs()
# Establish connection
self.connection = self._connect_to_ldap()
def _load_attrs(self):
""" Turn attribute's value to array. """
attrs = {}
for name, value in self.module.params['attributes'].items():
if name not in attrs:
attrs[name] = []
if isinstance(value, list):
attrs[name] = value
else:
attrs[name].append(str(value))
return attrs
def add(self):
""" If self.dn does not exist, returns a callable that will add it. """
def _add():
self.connection.add_s(self.dn, modlist)
if not self._is_entry_present():
modlist = ldap.modlist.addModlist(self.attrs)
action = _add
else:
action = None
return action
def delete(self):
""" If self.dn exists, returns a callable that will delete it. """
def _delete():
self.connection.delete_s(self.dn)
if self._is_entry_present():
action = _delete
else:
action = None
return action
def _is_entry_present(self):
try:
self.connection.search_s(self.dn, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
is_present = False
else:
is_present = True
return is_present
def _connect_to_ldap(self):
connection = ldap.initialize(self.server_uri)
if self.start_tls:
try:
connection.start_tls_s()
except ldap.LDAPError:
e = get_exception()
self.module.fail_json(msg="Cannot start TLS.", details=str(e))
try:
if self.bind_dn is not None:
connection.simple_bind_s(self.bind_dn, self.bind_pw)
else:
connection.sasl_interactive_bind_s('', ldap.sasl.external())
except ldap.LDAPError:
e = get_exception()
self.module.fail_json(
msg="Cannot bind to the server.", details=str(e))
return connection
def main():
module = AnsibleModule(
argument_spec={
'attributes': dict(default={}, type='dict'),
'bind_dn': dict(),
'bind_pw': dict(default='', no_log=True),
'dn': dict(required=True),
'objectClass': dict(type='raw'),
'params': dict(type='dict'),
'server_uri': dict(default='ldapi:///'),
'start_tls': dict(default=False, type='bool'),
'state': dict(default='present', choices=['present', 'absent']),
},
supports_check_mode=True,
)
if not HAS_LDAP:
module.fail_json(
msg="Missing requried 'ldap' module (pip install python-ldap).")
state = module.params['state']
# Chek if objectClass is present when needed
if state == 'present' and module.params['objectClass'] is None:
module.fail_json(msg="At least one objectClass must be provided.")
# Check if objectClass is of the correct type
if (
module.params['objectClass'] is not None and not (
isinstance(module.params['objectClass'], basestring) or
isinstance(module.params['objectClass'], list))):
module.fail_json(msg="objectClass must be either a string or a list.")
# Update module parameters with user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
for key, val in module.params['params'].items():
if key in module.argument_spec:
module.params[key] = val
else:
module.params['attributes'][key] = val
# Remove the params
module.params.pop('params', None)
# Instantiate the LdapEntry object
ldap = LdapEntry(module)
# Get the action function
if state == 'present':
action = ldap.add()
elif state == 'absent':
action = ldap.delete()
# Perform the action
if action is not None and not module.check_mode:
try:
action()
except Exception:
e = get_exception()
module.fail_json(msg="Entry action failed.", details=str(e))
module.exit_json(changed=(action is not None))
if __name__ == '__main__':
main()
| gpl-3.0 |
EsharEditor/ambari-hue-service | package/scripts/common.py | 2 | 7249 | #!/usr/bin/env python
import sys, os, pwd, grp, signal, time
from resource_management import *
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute, Directory, File
from resource_management.core.shell import call
from resource_management.core.system import System
from resource_management.libraries.functions.default import default
def setup_user():
"""
Creates Hue user home directory and sets up the correct ownership.
"""
__create_hue_user()
__set_home_dir_ownership()
def __create_hue_user():
import params
try:
grp.getgrnam(params.hue_group)
except KeyError:
Logger.info(format("Creating group '{params.hue_group}' for Hue Service"))
Group(
group_name = params.hue_group,
ignore_failures = True
)
try:
pwd.getpwnam(params.hue_user)
except KeyError:
Logger.info(format("Creating user '{params.hue_user}' for Hue Service"))
User(
username = params.hue_user,
groups = [params.hue_group],
ignore_failures = True
)
def __set_home_dir_ownership():
import params
"""
Updates the Hue user home directory to be owned by hue:hue.
"""
if not os.path.exists("/home/{0}".format(params.hue_user)):
Directory(params.hue_local_home_dir,
mode=0700,
cd_access='a',
owner=params.hue_user,
group=params.hue_group,
create_parents=True
)
def download_hue():
import params
"""
Download Hue to the installation directory
"""
Execute('{0} | xargs wget -O hue.tgz'.format(params.download_url))
Execute('tar -zxvf hue.tgz -C {0} && rm -f hue.tgz'.format(params.hue_install_dir))
# Ensure all Hue files owned by hue
Execute('chown -R {0}:{1} {2}'.format(params.hue_user,params.hue_group,params.hue_dir))
Execute('ln -s {0} /usr/hdp/current/hue-server'.format(params.hue_dir))
Logger.info("Hue Service is installed")
def add_hdfs_configuration(if_ranger=False, security_enabled=False):
import params
services_configurations = {}
services_configurations['core-site'] = {}
services_configurations['core-site']['hadoop.proxyuser.hue.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.hue.hosts'] = '*'
services_configurations['hdfs-site'] = {}
services_configurations['hdfs-site']['dfs.namenode.acls.enabled'] = 'true'
if params.hue_hbase_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.hbase.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.hbase.hosts'] = '*'
if params.hue_hive_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.hive.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.hive.hosts'] = '*'
if params.hue_spark_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.spark.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.spark.hosts'] = '*'
if params.hue_oozie_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.oozie.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.oozie.hosts'] = '*'
if params.dfs_ha_enabled:
services_configurations['core-site']['hadoop.proxyuser.httpfs.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.httpfs.hosts'] = '*'
services_configurations['httpfs-site'] = {}
services_configurations['httpfs-site']['httpfs.proxyuser.hue.groups'] = '*'
services_configurations['httpfs-site']['httpfs.proxyuser.hue.hosts'] = '*'
if security_enabled:
services_configurations['core-site']['hadoop.proxyuser.HTTP.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.HTTP.hosts'] = '*'
services_configurations['core-site']['hue.kerberos.principal.shortname'] = 'hue'
add_configurations(services_configurations)
def add_hbase_configuration(if_ranger=False, security_enabled=False):
import params
services_configurations = {}
services_configurations['hbase-site'] = {}
if if_ranger:
services_configurations['hbase-site']['hbase.regionserver.thrift.http'] = 'true'
services_configurations['hbase-site']['hbase.thrift.support.proxyuser'] = 'true'
if security_enabled:
services_configurations['hbase-site']['hbase.thrift.security.qop'] = 'auth'
services_configurations['hbase-site']['hbase.thrift.support.proxyuser'] = 'true'
services_configurations['hbase-site']['hbase.regionserver.thrift.http'] = 'true'
services_configurations['hbase-site']['hbase.thrift.kerberos.principal'] = params.HTTP_principal
services_configurations['hbase-site']['hbase.thrift.keytab.file'] = params.HTTP_keytab
services_configurations['hbase-site']['hbase.rpc.engine'] = 'org.apache.hadoop.hbase.ipc.SecureRpcEngine'
add_configurations(services_configurations)
def add_hive_configuration(if_ranger=False, security_enabled=False):
services_configurations = {}
services_configurations['hive-site'] = {}
services_configurations['hive-site']['hive.security.authorization.sqlstd.confwhitelist.append'] = 'hive.server2.logging.operation.verbose'
services_configurations['webhcat-site'] = {}
services_configurations['webhcat-site']['webhcat.proxyuser.hue.groups'] = '*'
services_configurations['webhcat-site']['webhcat.proxyuser.hue.hosts'] = '*'
if if_ranger:
services_configurations['hive-site']['hive.server2.enable.impersonation'] = 'true'
add_configurations(services_configurations)
def add_oozie_configuration(if_ranger=False, security_enabled=False):
services_configurations = {}
services_configurations['oozie-site'] = {}
services_configurations['oozie-site']['oozie.service.ProxyUserService.proxyuser.hue.groups'] = '*'
services_configurations['oozie-site']['oozie.service.ProxyUserService.proxyuser.hue.hosts'] = '*'
add_configurations(services_configurations)
def add_spark_configuration(if_ranger=False, security_enabled=False):
services_configurations = {}
services_configurations['livy-conf'] = {}
services_configurations['livy-conf']['livy.server.csrf_protection.enabled'] = 'false'
add_configurations(services_configurations)
def add_configurations(services_configurations):
"""
Run the script file to add configurations
#/var/lib/ambari-server/resources/scripts/configs.sh set ambari-server-host \
cluster_name core-site "hadoop.proxyuser.hbase.hosts" "*"
services_configurations:{'configuration file1':{'key1':'value1','key2':'value2',...},
'configuration file2':{'key1':'value1','key2':'value2',...}
...}
"""
import params
if isinstance(services_configurations, dict):
for i in range(len(services_configurations)):
key1 = services_configurations.keys()[i]
value1 = services_configurations[key1]
if isinstance(value1, dict):
for j in range(len(value1)):
key2 = value1.keys()[j]
value2 = value1[key2]
cmd = format(params.service_packagedir + "/files/configs.sh set " + params.ambari_server_hostname + " " + params.cluster_name + " " + key1 + " '" + key2 + "' '"+ value2 + "'")
Execute(cmd)
| apache-2.0 |
ronkyo/mi-instrument | mi/idk/test/test_result_set.py | 6 | 7547 | #!/usr/bin/env python
"""
@package mi.idk.test.test_result_set
@file mi.idk/test/test_result_set.py
@author Bill French
@brief Read a result set file and test the verification methods
"""
__author__ = 'Bill French'
__license__ = 'Apache 2.0'
import os
import re
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.common import BaseEnum
from mi.core.unit_test import MiUnitTest
from mi.idk.result_set import ResultSet
from mi.core.instrument.data_particle import DataParticle, DataParticleKey
from mi.core.log import get_logger ; log = get_logger()
from mi.idk.metadata import Metadata
from mi.core.exceptions import SampleException
TIME_REGEX = r'\d{1,2}/\d{1,2}/\d{4}\s*\d{1,2}:\d{1,2}:\d{1,2}'
TIME_MATCHER = re.compile(TIME_REGEX, re.DOTALL)
DATA_REGEX = r'^\s*(\d*\.\d*),\s*(\d*\.\d*),\s*(\d*\.\d*),\s*(\d*\.\d)'
DATA_MATCHER = re.compile(DATA_REGEX, re.DOTALL)
class CtdpfParserDataParticleKey(BaseEnum):
TEMPERATURE = "temperature"
CONDUCTIVITY = "conductivity"
PRESSURE = "pressure"
OXYGEN = "oxygen"
class CtdpfParserDataParticle(DataParticle):
"""
Class for parsing data from the CTDPF instrument on a HYPM SP platform node
"""
_data_particle_type = 'ctdpf_parsed'
def _build_parsed_values(self):
"""
Take something in the data format CSV delimited values and turn it into
a particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
match = DATA_MATCHER.match(self.raw_data)
if not match:
raise SampleException("CtdParserDataParticle: No regex match of parsed sample data: [%s]", self.raw_data)
try:
temp = float(match.group(2))
cond = float(match.group(1))
press = float(match.group(3))
o2 = float(match.group(4))
except (ValueError, TypeError, IndexError) as ex:
raise SampleException("Error (%s) while decoding parameters in data: [%s]" % (ex, self.raw_data))
result = [{DataParticleKey.VALUE_ID: CtdpfParserDataParticleKey.TEMPERATURE,
DataParticleKey.VALUE: temp},
{DataParticleKey.VALUE_ID: CtdpfParserDataParticleKey.CONDUCTIVITY,
DataParticleKey.VALUE: cond},
{DataParticleKey.VALUE_ID: CtdpfParserDataParticleKey.PRESSURE,
DataParticleKey.VALUE: press},
{DataParticleKey.VALUE_ID: CtdpfParserDataParticleKey.OXYGEN,
DataParticleKey.VALUE: o2}]
log.debug('CtdpfParserDataParticle: particle=%s', result)
return result
@attr('UNIT', group='mi')
class TestResultSet(MiUnitTest):
"""
Test the metadata object
"""
def _get_result_set_file(self, filename):
"""
return the full path to the result_set_file in
the same directory as the test file.
"""
test_dir = os.path.dirname(__file__)
return os.path.join(test_dir, filename)
def setUp(self):
"""
Setup the test case
"""
def test_ntp_conversion(self):
rs = ResultSet(self._get_result_set_file("record_set_files/test_data_1.txt.result.yml"))
ts = rs._string_to_ntp_date_time("1970-01-01T00:00:00.00Z")
self.assertEqual(ts, 2208988800.0)
ts = rs._string_to_ntp_date_time("1970-01-01T00:00:00.00")
self.assertEqual(ts, 2208988800.0)
ts = rs._string_to_ntp_date_time("1970-01-01T00:00:00")
self.assertEqual(ts, 2208988800.0)
ts = rs._string_to_ntp_date_time("1970-01-01T00:00:00Z")
self.assertEqual(ts, 2208988800.0)
ts = rs._string_to_ntp_date_time("1970-01-01T00:01:00.101Z")
self.assertEqual(ts, 2208988860.101)
self.assertRaises(ValueError, rs._string_to_ntp_date_time, "09/05/2013 02:47:21.000")
def test_simple_result_set(self):
"""
Try the first result set with a single record.
"""
rs = ResultSet(self._get_result_set_file("record_set_files/test_data_1.txt.result.yml"))
# Test the happy path
base_timestamp = 3583861263.0
particle_a = CtdpfParserDataParticle("10.5914, 4.1870, 161.06, 2693.0",
internal_timestamp=base_timestamp, new_sequence=True)
particle_b = CtdpfParserDataParticle("10.5915, 4.1871, 161.07, 2693.1",
internal_timestamp=base_timestamp)
self.assertTrue(rs.verify([particle_a, particle_b]))
self.assertIsNone(rs.report())
# test record count mismatch
self.assertFalse(rs.verify([particle_a]))
self.assertIsNotNone(rs.report())
# test out of order record
self.assertFalse(rs.verify([particle_b, particle_a]))
self.assertIsNotNone(rs.report())
# test bad data record
self.assertFalse(rs.verify([particle_a, particle_a]))
self.assertIsNotNone(rs.report())
# multiple data types in result
self.assertFalse(rs.verify([particle_a, 'foo']))
self.assertIsNotNone(rs.report())
# stream name mismatch
particle_a._data_particle_type = 'foo'
particle_b._data_particle_type = 'foo'
self.assertFalse(rs.verify([particle_a, particle_b]))
self.assertIsNotNone(rs.report())
# internal timestamp mismatch
particle_a = CtdpfParserDataParticle("10.5914, 4.1870, 161.06, 2693.0",
internal_timestamp=base_timestamp+1, new_sequence=True)
particle_b = CtdpfParserDataParticle("10.5915, 4.1871, 161.07, 2693.1",
internal_timestamp=base_timestamp+2)
self.assertFalse(rs.verify([particle_a, particle_a]))
self.assertIsNotNone(rs.report())
def test_simple_result_set_as_dict(self):
"""
Try the first result set with a single record from dict.
"""
rs = ResultSet(self._get_result_set_file("record_set_files/test_data_1.txt.result.yml"))
# Test the happy path
base_timestamp = 3583861263.0
particle_a = CtdpfParserDataParticle("10.5914, 4.1870, 161.06, 2693.0",
internal_timestamp=base_timestamp, new_sequence=True).generate_dict()
particle_b = CtdpfParserDataParticle("10.5915, 4.1871, 161.07, 2693.1",
internal_timestamp=base_timestamp).generate_dict()
self.assertTrue(rs.verify([particle_a, particle_b]))
self.assertIsNone(rs.report())
# test record count mismatch
self.assertFalse(rs.verify([particle_a]))
self.assertIsNotNone(rs.report())
# test out of order record
self.assertFalse(rs.verify([particle_b, particle_a]))
self.assertIsNotNone(rs.report())
# test bad data record
self.assertFalse(rs.verify([particle_a, particle_a]))
self.assertIsNotNone(rs.report())
def test_round(self):
rs = ResultSet(self._get_result_set_file("record_set_files/test_data_2.txt.result.yml"))
# Test the happy path
base_timestamp = 3583861263.0
particle_a = CtdpfParserDataParticle("10.5914, 4.1870, 161.06, 2693.0",
internal_timestamp=base_timestamp, new_sequence=True).generate_dict()
self.assertTrue(rs.verify([particle_a]))
self.assertIsNone(rs.report())
| bsd-2-clause |
wistoch/meego-app-browser | third_party/pyftpdlib/test/test_ftpd.py | 6 | 63389 | #!/usr/bin/env python
# test_ftpd.py
# ======================================================================
# Copyright (C) 2007 Giampaolo Rodola' <g.rodola@gmail.com>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Giampaolo Rodola' not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# Giampaolo Rodola' DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT Giampaolo Rodola' BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
# This test suite has been run successfully on the following systems:
#
# -----------------------------------------------------------
# System | Python version
# -----------------------------------------------------------
# Linux Ubuntu 2.6.20-15 | 2.4, 2.5
# Linux Kubuntu 8.04 32 & 64 bits | 2.5.2
# Linux Debian 2.4.27-2-386 | 2.3.5
# Windows XP prof SP3 | 2.3, 2.4, 2.5, 2.6-RC2
# Windows Vista Ultimate 64 bit | 2.5.1
# Windows Vista Business 32 bit | 2.5.1
# Windows Server 2008 64bit | 2.5.1
# Windows Mobile 6.1 | PythonCE 2.5
# OS X 10.4.10 | 2.3, 2.4, 2.5
# FreeBSD 7.0 | 2.4, 2.5
# -----------------------------------------------------------
import threading
import unittest
import socket
import os
import time
import re
import tempfile
import ftplib
import random
import warnings
import sys
from pyftpdlib import ftpserver
__release__ = 'pyftpdlib 0.5.0'
# Attempt to use IP rather than hostname (test suite will run a lot faster)
try:
HOST = socket.gethostbyname('localhost')
except socket.error:
HOST = 'localhost'
USER = 'user'
PASSWD = '12345'
HOME = os.getcwd()
try:
from test.test_support import TESTFN
except ImportError:
TESTFN = 'temp-fname'
TESTFN2 = TESTFN + '2'
TESTFN3 = TESTFN + '3'
def try_address(host, port=0):
"""Try to bind a daemon on the given host:port and return True
if that has been possible."""
try:
ftpserver.FTPServer((host, port), None)
except socket.error:
return False
else:
return True
SUPPORTS_IPV4 = try_address('127.0.0.1')
SUPPORTS_IPV6 = socket.has_ipv6 and try_address('::1')
class TestAbstractedFS(unittest.TestCase):
"""Test for conversion utility methods of AbstractedFS class."""
def test_ftpnorm(self):
# Tests for ftpnorm method.
ae = self.assertEquals
fs = ftpserver.AbstractedFS()
fs.cwd = '/'
ae(fs.ftpnorm(''), '/')
ae(fs.ftpnorm('/'), '/')
ae(fs.ftpnorm('.'), '/')
ae(fs.ftpnorm('..'), '/')
ae(fs.ftpnorm('a'), '/a')
ae(fs.ftpnorm('/a'), '/a')
ae(fs.ftpnorm('/a/'), '/a')
ae(fs.ftpnorm('a/..'), '/')
ae(fs.ftpnorm('a/b'), '/a/b')
ae(fs.ftpnorm('a/b/..'), '/a')
ae(fs.ftpnorm('a/b/../..'), '/')
fs.cwd = '/sub'
ae(fs.ftpnorm(''), '/sub')
ae(fs.ftpnorm('/'), '/')
ae(fs.ftpnorm('.'), '/sub')
ae(fs.ftpnorm('..'), '/')
ae(fs.ftpnorm('a'), '/sub/a')
ae(fs.ftpnorm('a/'), '/sub/a')
ae(fs.ftpnorm('a/..'), '/sub')
ae(fs.ftpnorm('a/b'), '/sub/a/b')
ae(fs.ftpnorm('a/b/'), '/sub/a/b')
ae(fs.ftpnorm('a/b/..'), '/sub/a')
ae(fs.ftpnorm('a/b/../..'), '/sub')
ae(fs.ftpnorm('a/b/../../..'), '/')
ae(fs.ftpnorm('//'), '/') # UNC paths must be collapsed
def test_ftp2fs(self):
# Tests for ftp2fs method.
ae = self.assertEquals
fs = ftpserver.AbstractedFS()
join = lambda x, y: os.path.join(x, y.replace('/', os.sep))
def goforit(root):
fs.root = root
fs.cwd = '/'
ae(fs.ftp2fs(''), root)
ae(fs.ftp2fs('/'), root)
ae(fs.ftp2fs('.'), root)
ae(fs.ftp2fs('..'), root)
ae(fs.ftp2fs('a'), join(root, 'a'))
ae(fs.ftp2fs('/a'), join(root, 'a'))
ae(fs.ftp2fs('/a/'), join(root, 'a'))
ae(fs.ftp2fs('a/..'), root)
ae(fs.ftp2fs('a/b'), join(root, r'a/b'))
ae(fs.ftp2fs('/a/b'), join(root, r'a/b'))
ae(fs.ftp2fs('/a/b/..'), join(root, 'a'))
ae(fs.ftp2fs('/a/b/../..'), root)
fs.cwd = '/sub'
ae(fs.ftp2fs(''), join(root, 'sub'))
ae(fs.ftp2fs('/'), root)
ae(fs.ftp2fs('.'), join(root, 'sub'))
ae(fs.ftp2fs('..'), root)
ae(fs.ftp2fs('a'), join(root, 'sub/a'))
ae(fs.ftp2fs('a/'), join(root, 'sub/a'))
ae(fs.ftp2fs('a/..'), join(root, 'sub'))
ae(fs.ftp2fs('a/b'), join(root, 'sub/a/b'))
ae(fs.ftp2fs('a/b/..'), join(root, 'sub/a'))
ae(fs.ftp2fs('a/b/../..'), join(root, 'sub'))
ae(fs.ftp2fs('a/b/../../..'), root)
ae(fs.ftp2fs('//a'), join(root, 'a')) # UNC paths must be collapsed
if os.sep == '\\':
goforit(r'C:\dir')
goforit('C:\\')
# on DOS-derived filesystems (e.g. Windows) this is the same
# as specifying the current drive directory (e.g. 'C:\\')
goforit('\\')
elif os.sep == '/':
goforit('/home/user')
goforit('/')
else:
# os.sep == ':'? Don't know... let's try it anyway
goforit(os.getcwd())
def test_fs2ftp(self):
# Tests for fs2ftp method.
ae = self.assertEquals
fs = ftpserver.AbstractedFS()
join = lambda x, y: os.path.join(x, y.replace('/', os.sep))
def goforit(root):
fs.root = root
ae(fs.fs2ftp(root), '/')
ae(fs.fs2ftp(join(root, '/')), '/')
ae(fs.fs2ftp(join(root, '.')), '/')
ae(fs.fs2ftp(join(root, '..')), '/') # can't escape from root
ae(fs.fs2ftp(join(root, 'a')), '/a')
ae(fs.fs2ftp(join(root, 'a/')), '/a')
ae(fs.fs2ftp(join(root, 'a/..')), '/')
ae(fs.fs2ftp(join(root, 'a/b')), '/a/b')
ae(fs.fs2ftp(join(root, 'a/b')), '/a/b')
ae(fs.fs2ftp(join(root, 'a/b/..')), '/a')
ae(fs.fs2ftp(join(root, '/a/b/../..')), '/')
fs.cwd = '/sub'
ae(fs.fs2ftp(join(root, 'a/')), '/a')
if os.sep == '\\':
goforit(r'C:\dir')
goforit('C:\\')
# on DOS-derived filesystems (e.g. Windows) this is the same
# as specifying the current drive directory (e.g. 'C:\\')
goforit('\\')
fs.root = r'C:\dir'
ae(fs.fs2ftp('C:\\'), '/')
ae(fs.fs2ftp('D:\\'), '/')
ae(fs.fs2ftp('D:\\dir'), '/')
elif os.sep == '/':
goforit('/')
assert os.path.realpath('/__home/user') == '/__home/user', \
'Test skipped (symlinks not allowed).'
goforit('/__home/user')
fs.root = '/__home/user'
ae(fs.fs2ftp('/__home'), '/')
ae(fs.fs2ftp('/'), '/')
ae(fs.fs2ftp('/__home/userx'), '/')
else:
# os.sep == ':'? Don't know... let's try it anyway
goforit(os.getcwd())
def test_validpath(self):
# Tests for validpath method.
fs = ftpserver.AbstractedFS()
fs.root = HOME
self.failUnless(fs.validpath(HOME))
self.failUnless(fs.validpath(HOME + '/'))
self.failIf(fs.validpath(HOME + 'xxx'))
if hasattr(os, 'symlink'):
# Tests for validpath on systems supporting symbolic links.
def _safe_remove(self, path):
# convenience function for removing temporary files
try:
os.remove(path)
except os.error:
pass
def test_validpath_validlink(self):
# Test validpath by issuing a symlink pointing to a path
# inside the root directory.
fs = ftpserver.AbstractedFS()
fs.root = HOME
try:
open(TESTFN, 'w')
os.symlink(TESTFN, TESTFN2)
self.failUnless(fs.validpath(TESTFN))
finally:
self._safe_remove(TESTFN)
self._safe_remove(TESTFN2)
def test_validpath_external_symlink(self):
# Test validpath by issuing a symlink pointing to a path
# outside the root directory.
fs = ftpserver.AbstractedFS()
fs.root = HOME
try:
# tempfile should create our file in /tmp directory
# which should be outside the user root. If it is not
# we just skip the test.
file = tempfile.NamedTemporaryFile()
if HOME == os.path.dirname(file.name):
return
os.symlink(file.name, TESTFN)
self.failIf(fs.validpath(TESTFN))
finally:
self._safe_remove(TESTFN)
file.close()
class TestDummyAuthorizer(unittest.TestCase):
"""Tests for DummyAuthorizer class."""
# temporarily change warnings to exceptions for the purposes of testing
def setUp(self):
self.tempdir = tempfile.mkdtemp(dir=HOME)
self.subtempdir = tempfile.mkdtemp(dir=os.path.join(HOME, self.tempdir))
self.tempfile = open(os.path.join(self.tempdir, TESTFN), 'w').name
self.subtempfile = open(os.path.join(self.subtempdir, TESTFN), 'w').name
warnings.filterwarnings("error")
def tearDown(self):
os.remove(self.tempfile)
os.remove(self.subtempfile)
os.rmdir(self.subtempdir)
os.rmdir(self.tempdir)
warnings.resetwarnings()
def assertRaisesWithMsg(self, excClass, msg, callableObj, *args, **kwargs):
try:
callableObj(*args, **kwargs)
except excClass, why:
if str(why) == msg:
return
raise self.failureException("%s != %s" %(str(why), msg))
else:
if hasattr(excClass,'__name__'): excName = excClass.__name__
else: excName = str(excClass)
raise self.failureException, "%s not raised" % excName
def test_common_methods(self):
auth = ftpserver.DummyAuthorizer()
# create user
auth.add_user(USER, PASSWD, HOME)
auth.add_anonymous(HOME)
# check credentials
self.failUnless(auth.validate_authentication(USER, PASSWD))
self.failIf(auth.validate_authentication(USER, 'wrongpwd'))
# remove them
auth.remove_user(USER)
auth.remove_user('anonymous')
# raise exc if user does not exists
self.assertRaises(KeyError, auth.remove_user, USER)
# raise exc if path does not exist
self.assertRaisesWithMsg(ftpserver.AuthorizerError,
'No such directory: "%s"' %'?:\\',
auth.add_user, USER, PASSWD, '?:\\')
self.assertRaisesWithMsg(ftpserver.AuthorizerError,
'No such directory: "%s"' %'?:\\',
auth.add_anonymous, '?:\\')
# raise exc if user already exists
auth.add_user(USER, PASSWD, HOME)
auth.add_anonymous(HOME)
self.assertRaisesWithMsg(ftpserver.AuthorizerError,
'User "%s" already exists' %USER,
auth.add_user, USER, PASSWD, HOME)
self.assertRaisesWithMsg(ftpserver.AuthorizerError,
'User "anonymous" already exists',
auth.add_anonymous, HOME)
auth.remove_user(USER)
auth.remove_user('anonymous')
# raise on wrong permission
self.assertRaisesWithMsg(ftpserver.AuthorizerError,
'No such permission "?"',
auth.add_user, USER, PASSWD, HOME, perm='?')
self.assertRaisesWithMsg(ftpserver.AuthorizerError,
'No such permission "?"',
auth.add_anonymous, HOME, perm='?')
# expect warning on write permissions assigned to anonymous user
for x in "adfmw":
self.assertRaisesWithMsg(RuntimeWarning,
"Write permissions assigned to anonymous user.",
auth.add_anonymous, HOME, perm=x)
def test_override_perm_interface(self):
auth = ftpserver.DummyAuthorizer()
auth.add_user(USER, PASSWD, HOME, perm='elr')
# raise exc if user does not exists
self.assertRaises(KeyError, auth.override_perm, USER+'w', HOME, 'elr')
# raise exc if path does not exist or it's not a directory
self.assertRaisesWithMsg(ftpserver.AuthorizerError,
'No such directory: "%s"' %'?:\\',
auth.override_perm, USER, '?:\\', 'elr')
self.assertRaisesWithMsg(ftpserver.AuthorizerError,
'No such directory: "%s"' %self.tempfile,
auth.override_perm, USER, self.tempfile, 'elr')
# raise on wrong permission
self.assertRaisesWithMsg(ftpserver.AuthorizerError,
'No such permission "?"', auth.override_perm,
USER, HOME, perm='?')
# expect warning on write permissions assigned to anonymous user
auth.add_anonymous(HOME)
for p in "adfmw":
self.assertRaisesWithMsg(RuntimeWarning,
"Write permissions assigned to anonymous user.",
auth.override_perm, 'anonymous', HOME, p)
# raise on attempt to override home directory permissions
self.assertRaisesWithMsg(ftpserver.AuthorizerError,
"Can't override home directory permissions",
auth.override_perm, USER, HOME, perm='w')
# raise on attempt to override a path escaping home directory
if os.path.dirname(HOME) != HOME:
self.assertRaisesWithMsg(ftpserver.AuthorizerError,
"Path escapes user home directory",
auth.override_perm, USER,
os.path.dirname(HOME), perm='w')
# try to re-set an overridden permission
auth.override_perm(USER, self.tempdir, perm='w')
auth.override_perm(USER, self.tempdir, perm='wr')
def test_override_perm_recursive_paths(self):
auth = ftpserver.DummyAuthorizer()
auth.add_user(USER, PASSWD, HOME, perm='elr')
self.assert_(auth.has_perm(USER, 'w', self.tempdir) is False)
auth.override_perm(USER, self.tempdir, perm='w', recursive=True)
self.assert_(auth.has_perm(USER, 'w', HOME) is False)
self.assert_(auth.has_perm(USER, 'w', self.tempdir) is True)
self.assert_(auth.has_perm(USER, 'w', self.tempfile) is True)
self.assert_(auth.has_perm(USER, 'w', self.subtempdir) is True)
self.assert_(auth.has_perm(USER, 'w', self.subtempfile) is True)
self.assert_(auth.has_perm(USER, 'w', HOME + '@') is False)
self.assert_(auth.has_perm(USER, 'w', self.tempdir + '@') is False)
path = os.path.join(self.tempdir + '@', os.path.basename(self.tempfile))
self.assert_(auth.has_perm(USER, 'w', path) is False)
# test case-sensitiveness
if (os.name in ('nt', 'ce')) or (sys.platform == 'cygwin'):
self.assert_(auth.has_perm(USER, 'w', self.tempdir.upper()) is True)
def test_override_perm_not_recursive_paths(self):
auth = ftpserver.DummyAuthorizer()
auth.add_user(USER, PASSWD, HOME, perm='elr')
self.assert_(auth.has_perm(USER, 'w', self.tempdir) is False)
auth.override_perm(USER, self.tempdir, perm='w')
self.assert_(auth.has_perm(USER, 'w', HOME) is False)
self.assert_(auth.has_perm(USER, 'w', self.tempdir) is True)
self.assert_(auth.has_perm(USER, 'w', self.tempfile) is True)
self.assert_(auth.has_perm(USER, 'w', self.subtempdir) is False)
self.assert_(auth.has_perm(USER, 'w', self.subtempfile) is False)
self.assert_(auth.has_perm(USER, 'w', HOME + '@') is False)
self.assert_(auth.has_perm(USER, 'w', self.tempdir + '@') is False)
path = os.path.join(self.tempdir + '@', os.path.basename(self.tempfile))
self.assert_(auth.has_perm(USER, 'w', path) is False)
# test case-sensitiveness
if (os.name in ('nt', 'ce')) or (sys.platform == 'cygwin'):
self.assert_(auth.has_perm(USER, 'w', self.tempdir.upper()) is True)
class TestCallLater(unittest.TestCase):
"""Tests for CallLater class."""
def setUp(self):
for task in ftpserver._tasks:
if not task.cancelled:
task.cancel()
del ftpserver._tasks[:]
def scheduler(self, timeout=0.01, count=100):
while ftpserver._tasks and count > 0:
ftpserver._scheduler()
count -= 1
time.sleep(timeout)
def test_interface(self):
fun = lambda: 0
self.assertRaises(AssertionError, ftpserver.CallLater, -1, fun)
x = ftpserver.CallLater(3, fun)
self.assertRaises(AssertionError, x.delay, -1)
self.assert_(x.cancelled is False)
x.cancel()
self.assert_(x.cancelled is True)
self.assertRaises(AssertionError, x.call)
self.assertRaises(AssertionError, x.reset)
self.assertRaises(AssertionError, x.delay, 2)
self.assertRaises(AssertionError, x.cancel)
def test_order(self):
l = []
fun = lambda x: l.append(x)
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
ftpserver.CallLater(x, fun, x)
self.scheduler()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
def test_delay(self):
l = []
fun = lambda x: l.append(x)
ftpserver.CallLater(0.01, fun, 0.01).delay(0.07)
ftpserver.CallLater(0.02, fun, 0.02).delay(0.08)
ftpserver.CallLater(0.03, fun, 0.03)
ftpserver.CallLater(0.04, fun, 0.04)
ftpserver.CallLater(0.05, fun, 0.05)
ftpserver.CallLater(0.06, fun, 0.06).delay(0.001)
self.scheduler()
self.assertEqual(l, [0.06, 0.03, 0.04, 0.05, 0.01, 0.02])
def test_reset(self):
# will fail on such systems where time.time() does not provide
# time with a better precision than 1 second.
l = []
fun = lambda x: l.append(x)
ftpserver.CallLater(0.01, fun, 0.01)
ftpserver.CallLater(0.02, fun, 0.02)
ftpserver.CallLater(0.03, fun, 0.03)
x = ftpserver.CallLater(0.04, fun, 0.04)
ftpserver.CallLater(0.05, fun, 0.05)
time.sleep(0.1)
x.reset()
self.scheduler()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.05, 0.04])
def test_cancel(self):
l = []
fun = lambda x: l.append(x)
ftpserver.CallLater(0.01, fun, 0.01).cancel()
ftpserver.CallLater(0.02, fun, 0.02)
ftpserver.CallLater(0.03, fun, 0.03)
ftpserver.CallLater(0.04, fun, 0.04)
ftpserver.CallLater(0.05, fun, 0.05).cancel()
self.scheduler()
self.assertEqual(l, [0.02, 0.03, 0.04])
class TestFtpAuthentication(unittest.TestCase):
"test: USER, PASS, REIN."
def setUp(self):
self.server = FTPd()
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
self.f1 = open(TESTFN, 'w+b')
self.f2 = open(TESTFN2, 'w+b')
def tearDown(self):
self.client.close()
self.server.stop()
if not self.f1.closed:
self.f1.close()
if not self.f2.closed:
self.f2.close()
os.remove(TESTFN)
os.remove(TESTFN2)
def test_auth_ok(self):
self.client.login(user=USER, passwd=PASSWD)
def test_anon_auth(self):
self.client.login(user='anonymous', passwd='anon@')
self.client.login(user='AnonYmoUs', passwd='anon@')
self.client.login(user='anonymous', passwd='')
# Commented after delayed response on wrong credentials has been
# introduced because tests take too much to complete.
## def test_auth_failed(self):
## self.assertRaises(ftplib.error_perm, self.client.login, USER, 'wrong')
## self.assertRaises(ftplib.error_perm, self.client.login, 'wrong', PASSWD)
## self.assertRaises(ftplib.error_perm, self.client.login, 'wrong', 'wrong')
## def test_max_auth(self):
## self.assertRaises(ftplib.error_perm, self.client.login, USER, 'wrong')
## self.assertRaises(ftplib.error_perm, self.client.login, USER, 'wrong')
## self.assertRaises(ftplib.error_perm, self.client.login, USER, 'wrong')
## # If authentication fails for 3 times ftpd disconnects the
## # client. We can check if that happens by using self.client.sendcmd()
## # on the 'dead' socket object. If socket object is really
## # closed it should be raised a socket.error exception (Windows)
## # or a EOFError exception (Linux).
## self.assertRaises((socket.error, EOFError), self.client.sendcmd, '')
def test_rein(self):
self.client.login(user=USER, passwd=PASSWD)
self.client.sendcmd('rein')
# user not authenticated, error response expected
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'pwd')
# by logging-in again we should be able to execute a
# file-system command
self.client.login(user=USER, passwd=PASSWD)
self.client.sendcmd('pwd')
def test_rein_during_transfer(self):
self.client.login(user=USER, passwd=PASSWD)
data = 'abcde12345' * 100000
self.f1.write(data)
self.f1.close()
self.client.voidcmd('TYPE I')
conn = self.client.transfercmd('retr ' + TESTFN)
rein_sent = 0
while 1:
chunk = conn.recv(8192)
if not chunk:
break
self.f2.write(chunk)
if not rein_sent:
rein_sent = 1
# flush account, error response expected
self.client.sendcmd('rein')
self.assertRaises(ftplib.error_perm, self.client.dir)
# a 226 response is expected once tranfer finishes
self.assertEqual(self.client.voidresp()[:3], '226')
# account is still flushed, error response is still expected
self.assertRaises(ftplib.error_perm, self.client.sendcmd,
'size ' + TESTFN)
# by logging-in again we should be able to execute a
# filesystem command
self.client.login(user=USER, passwd=PASSWD)
self.client.sendcmd('pwd')
self.f2.seek(0)
self.assertEqual(hash(data), hash (self.f2.read()))
def test_user(self):
# Test USER while already authenticated and no transfer
# is in progress.
self.client.login(user=USER, passwd=PASSWD)
self.client.sendcmd('user ' + USER) # authentication flushed
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'pwd')
self.client.sendcmd('pass ' + PASSWD)
self.client.sendcmd('pwd')
def test_user_on_transfer(self):
# Test USER while already authenticated and a transfer is
# in progress.
self.client.login(user=USER, passwd=PASSWD)
data = 'abcde12345' * 100000
self.f1.write(data)
self.f1.close()
self.client.voidcmd('TYPE I')
conn = self.client.transfercmd('retr ' + TESTFN)
rein_sent = 0
while 1:
chunk = conn.recv(8192)
if not chunk:
break
self.f2.write(chunk)
# stop transfer while it isn't finished yet
if not rein_sent:
rein_sent = 1
# flush account, expect an error response
self.client.sendcmd('user ' + USER)
self.assertRaises(ftplib.error_perm, self.client.dir)
# a 226 response is expected once tranfer finishes
self.assertEqual(self.client.voidresp()[:3], '226')
# account is still flushed, error response is still expected
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'pwd')
# by logging-in again we should be able to execute a
# filesystem command
self.client.sendcmd('pass ' + PASSWD)
self.client.sendcmd('pwd')
self.f2.seek(0)
self.assertEqual(hash(data), hash (self.f2.read()))
class TestFtpDummyCmds(unittest.TestCase):
"test: TYPE, STRU, MODE, NOOP, SYST, ALLO, HELP"
def setUp(self):
self.server = FTPd()
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
self.client.login(USER, PASSWD)
def tearDown(self):
self.client.close()
self.server.stop()
def test_type(self):
self.client.sendcmd('type a')
self.client.sendcmd('type i')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'type ?!?')
def test_stru(self):
self.client.sendcmd('stru f')
self.client.sendcmd('stru F')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'stru ?!?')
def test_mode(self):
self.client.sendcmd('mode s')
self.client.sendcmd('mode S')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'mode ?!?')
def test_noop(self):
self.client.sendcmd('noop')
def test_syst(self):
self.client.sendcmd('syst')
def test_allo(self):
self.client.sendcmd('allo x')
def test_quit(self):
self.client.sendcmd('quit')
def test_help(self):
self.client.sendcmd('help')
cmd = random.choice(ftpserver.proto_cmds.keys())
self.client.sendcmd('help %s' %cmd)
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'help ?!?')
def test_rest(self):
# test error conditions only;
# restored data-transfer is tested later
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'rest')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'rest str')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'rest -1')
def test_opts_feat(self):
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'opts mlst bad_fact')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'opts mlst type ;')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'opts not_mlst')
# utility function which used for extracting the MLST "facts"
# string from the FEAT response
def mlst():
resp = self.client.sendcmd('feat')
return re.search(r'^\s*MLST\s+(\S+)$', resp, re.MULTILINE).group(1)
# we rely on "type", "perm", "size", and "modify" facts which
# are those available on all platforms
self.failUnless('type*;perm*;size*;modify*;' in mlst())
self.assertEqual(self.client.sendcmd('opts mlst type;'), '200 MLST OPTS type;')
self.assertEqual(self.client.sendcmd('opts mLSt TypE;'), '200 MLST OPTS type;')
self.failUnless('type*;perm;size;modify;' in mlst())
self.assertEqual(self.client.sendcmd('opts mlst'), '200 MLST OPTS ')
self.failUnless(not '*' in mlst())
self.assertEqual(self.client.sendcmd('opts mlst fish;cakes;'), '200 MLST OPTS ')
self.failUnless(not '*' in mlst())
self.assertEqual(self.client.sendcmd('opts mlst fish;cakes;type;'), \
'200 MLST OPTS type;')
self.failUnless('type*;perm;size;modify;' in mlst())
class TestFtpCmdsSemantic(unittest.TestCase):
arg_cmds = ('allo','appe','dele','eprt','mdtm','mode','mkd','opts','port',
'rest','retr','rmd','rnfr','rnto','size', 'stor', 'stru','type',
'user','xmkd','xrmd')
def setUp(self):
self.server = FTPd()
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
self.client.login(USER, PASSWD)
def tearDown(self):
self.client.close()
self.server.stop()
def test_arg_cmds(self):
# test commands requiring an argument
expected = "501 Syntax error: command needs an argument."
for cmd in self.arg_cmds:
self.client.putcmd(cmd)
resp = self.client.getmultiline()
self.assertEqual(resp, expected)
def test_no_arg_cmds(self):
# test commands accepting no arguments
expected = "501 Syntax error: command does not accept arguments."
for cmd in ('abor','cdup','feat','noop','pasv','pwd','quit','rein',
'syst','xcup','xpwd'):
self.client.putcmd(cmd + ' arg')
resp = self.client.getmultiline()
self.assertEqual(resp, expected)
def test_auth_cmds(self):
# test those commands requiring client to be authenticated
expected = "530 Log in with USER and PASS first."
self.client.sendcmd('rein')
for cmd in ftpserver.proto_cmds:
cmd = cmd.lower()
if cmd in ('feat','help','noop','user','pass','stat','syst','quit'):
continue
if cmd in self.arg_cmds:
cmd = cmd + ' arg'
self.client.putcmd(cmd)
resp = self.client.getmultiline()
self.assertEqual(resp, expected)
def test_no_auth_cmds(self):
# test those commands that do not require client to be authenticated
self.client.sendcmd('rein')
for cmd in ('feat','help','noop','stat','syst'):
self.client.sendcmd(cmd)
# STAT provided with an argument is equal to LIST hence not allowed
# if not authenticated
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'stat /')
self.client.sendcmd('quit')
class TestFtpFsOperations(unittest.TestCase):
"test: PWD, CWD, CDUP, SIZE, RNFR, RNTO, DELE, MKD, RMD, MDTM, STAT"
def setUp(self):
self.server = FTPd()
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
self.client.login(USER, PASSWD)
self.tempfile = os.path.basename(open(TESTFN, 'w+b').name)
self.tempdir = os.path.basename(tempfile.mktemp(dir=HOME))
os.mkdir(self.tempdir)
def tearDown(self):
self.client.close()
self.server.stop()
if os.path.exists(self.tempfile):
os.remove(self.tempfile)
if os.path.exists(self.tempdir):
os.rmdir(self.tempdir)
def test_cwd(self):
self.client.cwd(self.tempdir)
self.assertEqual(self.client.pwd(), '/' + self.tempdir)
self.assertRaises(ftplib.error_perm, self.client.cwd, 'subtempdir')
# cwd provided with no arguments is supposed to move us to the
# root directory
self.client.sendcmd('cwd')
self.assertEqual(self.client.pwd(), '/')
def test_pwd(self):
self.assertEqual(self.client.pwd(), '/')
self.client.cwd(self.tempdir)
self.assertEqual(self.client.pwd(), '/' + self.tempdir)
def test_cdup(self):
self.client.cwd(self.tempdir)
self.assertEqual(self.client.pwd(), '/' + self.tempdir)
self.client.sendcmd('cdup')
self.assertEqual(self.client.pwd(), '/')
# make sure we can't escape from root directory
self.client.sendcmd('cdup')
self.assertEqual(self.client.pwd(), '/')
def test_mkd(self):
tempdir = os.path.basename(tempfile.mktemp(dir=HOME))
self.client.mkd(tempdir)
# make sure we can't create directories which already exist
# (probably not really necessary);
# let's use a try/except statement to avoid leaving behind
# orphaned temporary directory in the event of a test failure.
try:
self.client.mkd(tempdir)
except ftplib.error_perm:
os.rmdir(tempdir) # ok
else:
self.fail('ftplib.error_perm not raised.')
def test_rmd(self):
self.client.rmd(self.tempdir)
self.assertRaises(ftplib.error_perm, self.client.rmd, self.tempfile)
# make sure we can't remove the root directory
self.assertRaises(ftplib.error_perm, self.client.rmd, '/')
def test_dele(self):
self.client.delete(self.tempfile)
self.assertRaises(ftplib.error_perm, self.client.delete, self.tempdir)
def test_rnfr_rnto(self):
# rename file
tempname = os.path.basename(tempfile.mktemp(dir=HOME))
self.client.rename(self.tempfile, tempname)
self.client.rename(tempname, self.tempfile)
# rename dir
tempname = os.path.basename(tempfile.mktemp(dir=HOME))
self.client.rename(self.tempdir, tempname)
self.client.rename(tempname, self.tempdir)
# rnfr/rnto over non-existing paths
bogus = os.path.basename(tempfile.mktemp(dir=HOME))
self.assertRaises(ftplib.error_perm, self.client.rename, bogus, '/x')
self.assertRaises(ftplib.error_perm, self.client.rename, self.tempfile, '/')
# make sure we can't rename root directory
self.assertRaises(ftplib.error_perm, self.client.rename, '/', '/x')
def test_mdtm(self):
self.client.sendcmd('mdtm ' + self.tempfile)
# make sure we can't use mdtm against directories
try:
self.client.sendcmd('mdtm ' + self.tempdir)
except ftplib.error_perm, err:
self.failUnless("not retrievable" in str(err))
else:
self.fail('Exception not raised')
def test_size(self):
self.client.size(self.tempfile)
# make sure we can't use size against directories
try:
self.client.sendcmd('size ' + self.tempdir)
except ftplib.error_perm, err:
self.failUnless("not retrievable" in str(err))
else:
self.fail('Exception not raised')
class TestFtpRetrieveData(unittest.TestCase):
"test: RETR, REST, LIST, NLST, argumented STAT"
def setUp(self):
self.server = FTPd()
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
self.client.login(USER, PASSWD)
self.f1 = open(TESTFN, 'w+b')
self.f2 = open(TESTFN2, 'w+b')
def tearDown(self):
self.client.close()
self.server.stop()
if not self.f1.closed:
self.f1.close()
if not self.f2.closed:
self.f2.close()
os.remove(TESTFN)
os.remove(TESTFN2)
def test_retr(self):
data = 'abcde12345' * 100000
self.f1.write(data)
self.f1.close()
self.client.retrbinary("retr " + TESTFN, self.f2.write)
self.f2.seek(0)
self.assertEqual(hash(data), hash(self.f2.read()))
def test_restore_on_retr(self):
data = 'abcde12345' * 100000
fname_1 = os.path.basename(self.f1.name)
self.f1.write(data)
self.f1.close()
# look at ftplib.FTP.retrbinary method to understand this mess
self.client.voidcmd('TYPE I')
conn = self.client.transfercmd('retr ' + fname_1)
chunk = conn.recv(len(data) / 2)
self.f2.write(chunk)
conn.close()
# transfer wasn't finished yet so we expect a 426 response
self.assertRaises(ftplib.error_temp, self.client.voidresp)
# resuming transfer by using a marker value greater than the
# file size stored on the server should result in an error
# on retr (RFC-1123)
file_size = self.client.size(fname_1)
self.client.sendcmd('rest %s' %((file_size + 1)))
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'retr ' + fname_1)
# test resume
self.client.sendcmd('rest %s' %len(chunk))
self.client.retrbinary("retr " + fname_1, self.f2.write)
self.f2.seek(0)
self.assertEqual(hash(data), hash (self.f2.read()))
def _test_listing_cmds(self, cmd):
"""Tests common to LIST NLST and MLSD commands."""
# assume that no argument has the same meaning of "/"
l1 = l2 = []
self.client.retrlines(cmd, l1.append)
self.client.retrlines(cmd + ' /', l2.append)
self.assertEqual(l1, l2)
if cmd.lower() != 'mlsd':
# if pathname is a file one line is expected
x = []
self.client.retrlines('%s ' %cmd + TESTFN, x.append)
self.assertEqual(len(x), 1)
self.failUnless(''.join(x).endswith(TESTFN))
# non-existent path, 550 response is expected
bogus = os.path.basename(tempfile.mktemp(dir=HOME))
self.assertRaises(ftplib.error_perm, self.client.retrlines,
'%s ' %cmd + bogus, lambda x: x)
# for an empty directory we excpect that the data channel is
# opened anyway and that no data is received
x = []
tempdir = os.path.basename(tempfile.mkdtemp(dir=HOME))
try:
self.client.retrlines('%s %s' %(cmd, tempdir), x.append)
self.assertEqual(x, [])
finally:
os.rmdir(tempdir)
def test_nlst(self):
# common tests
self._test_listing_cmds('nlst')
def test_list(self):
# common tests
self._test_listing_cmds('list')
# known incorrect pathname arguments (e.g. old clients) are
# expected to be treated as if pathname would be == '/'
l1 = l2 = l3 = l4 = l5 = []
self.client.retrlines('list /', l1.append)
self.client.retrlines('list -a', l2.append)
self.client.retrlines('list -l', l3.append)
self.client.retrlines('list -al', l4.append)
self.client.retrlines('list -la', l5.append)
tot = (l1, l2, l3, l4, l5)
for x in range(len(tot) - 1):
self.assertEqual(tot[x], tot[x+1])
def test_mlst(self):
# utility function for extracting the line of interest
mlstline = lambda cmd: self.client.voidcmd(cmd).split('\n')[1]
# the fact set must be preceded by a space
self.failUnless(mlstline('mlst').startswith(' '))
# where TVFS is supported, a fully qualified pathname is expected
self.failUnless(mlstline('mlst ' + TESTFN).endswith('/' + TESTFN))
self.failUnless(mlstline('mlst').endswith('/'))
# assume that no argument has the same meaning of "/"
self.assertEqual(mlstline('mlst'), mlstline('mlst /'))
# non-existent path
bogus = os.path.basename(tempfile.mktemp(dir=HOME))
self.assertRaises(ftplib.error_perm, mlstline, bogus)
# test file/dir notations
self.failUnless('type=dir' in mlstline('mlst'))
self.failUnless('type=file' in mlstline('mlst ' + TESTFN))
# let's add some tests for OPTS command
self.client.sendcmd('opts mlst type;')
self.assertEqual(mlstline('mlst'), ' type=dir; /')
# where no facts are present, two leading spaces before the
# pathname are required (RFC-3659)
self.client.sendcmd('opts mlst')
self.assertEqual(mlstline('mlst'), ' /')
def test_mlsd(self):
# common tests
self._test_listing_cmds('mlsd')
dir = os.path.basename(tempfile.mkdtemp(dir=HOME))
try:
try:
self.client.retrlines('mlsd ' + TESTFN, lambda x: x)
except ftplib.error_perm, resp:
# if path is a file a 501 response code is expected
self.assertEqual(str(resp)[0:3], "501")
else:
self.fail("Exception not raised")
finally:
os.rmdir(dir)
def test_stat(self):
# test STAT provided with argument which is equal to LIST
self.client.sendcmd('stat /')
self.client.sendcmd('stat ' + TESTFN)
self.client.putcmd('stat *')
resp = self.client.getmultiline()
self.assertEqual(resp, '550 Globbing not supported.')
bogus = os.path.basename(tempfile.mktemp(dir=HOME))
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'stat ' + bogus)
class TestFtpAbort(unittest.TestCase):
"test: ABOR"
def setUp(self):
self.server = FTPd()
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
self.client.login(USER, PASSWD)
self.f1 = open(TESTFN, 'w+b')
self.f2 = open(TESTFN2, 'w+b')
def tearDown(self):
self.client.close()
self.server.stop()
if not self.f1.closed:
self.f1.close()
if not self.f2.closed:
self.f2.close()
os.remove(self.f1.name)
os.remove(self.f2.name)
def test_abor_no_data(self):
# Case 1: ABOR while no data channel is opened: respond with 225.
resp = self.client.sendcmd('ABOR')
self.failUnlessEqual('225 No transfer to abort.', resp)
def test_abor_pasv(self):
# Case 2: user sends a PASV, a data-channel socket is listening
# but not connected, and ABOR is sent: close listening data
# socket, respond with 225.
self.client.makepasv()
respcode = self.client.sendcmd('ABOR')[:3]
self.failUnlessEqual('225', respcode)
def test_abor_port(self):
# Case 3: data channel opened with PASV or PORT, but ABOR sent
# before a data transfer has been started: close data channel,
# respond with 225
self.client.makeport()
respcode = self.client.sendcmd('ABOR')[:3]
self.failUnlessEqual('225', respcode)
def test_abor(self):
# Case 4: ABOR while a data transfer on DTP channel is in
# progress: close data channel, respond with 426, respond
# with 226.
data = 'abcde12345' * 100000
self.f1.write(data)
self.f1.close()
# this ugly loop construct is to simulate an interrupted
# transfer since ftplib doesn't like running storbinary()
# in a separate thread
self.client.voidcmd('TYPE I')
conn = self.client.transfercmd('retr ' + TESTFN)
chunk = conn.recv(len(data) / 2)
# stop transfer while it isn't finished yet
self.client.putcmd('ABOR')
# transfer isn't finished yet so ftpd should respond with 426
self.assertRaises(ftplib.error_temp, self.client.voidresp)
# transfer successfully aborted, so should now respond with a 226
self.failUnlessEqual('226', self.client.voidresp()[:3])
if hasattr(socket, 'MSG_OOB'):
def test_oob_abor(self):
# Send ABOR by following the RFC-959 directives of sending
# Telnet IP/Synch sequence as OOB data.
# On some systems like FreeBSD this happened to be a problem
# due to a different SO_OOBINLINE behavior.
# On some platforms (e.g. Python CE) the test may fail
# although the MSG_OOB constant is defined.
self.client.sock.sendall(chr(244), socket.MSG_OOB)
self.client.sock.sendall(chr(242), socket.MSG_OOB)
self.client.sock.sendall('abor\r\n')
self.client.sock.settimeout(1)
self.assertEqual(self.client.getresp()[:3], '225')
class TestFtpStoreData(unittest.TestCase):
"test: STOR, STOU, APPE, REST"
def setUp(self):
self.server = FTPd()
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
self.client.login(USER, PASSWD)
self.f1 = open(TESTFN, 'w+b')
self.f2 = open(TESTFN2, 'w+b')
def tearDown(self):
self.client.close()
self.server.stop()
if not self.f1.closed:
self.f1.close()
if not self.f2.closed:
self.f2.close()
os.remove(TESTFN)
os.remove(TESTFN2)
def test_stor(self):
# TESTFN3 is the remote file name
try:
data = 'abcde12345' * 100000
self.f1.write(data)
self.f1.seek(0)
self.client.storbinary('stor ' + TESTFN3, self.f1)
self.client.retrbinary('retr ' + TESTFN3, self.f2.write)
self.f2.seek(0)
self.assertEqual(hash(data), hash (self.f2.read()))
finally:
# we do not use os.remove because file could be still
# locked by ftpd thread
if os.path.exists(TESTFN3):
self.client.delete(TESTFN3)
def test_stou(self):
data = 'abcde12345' * 100000
self.f1.write(data)
self.f1.seek(0)
self.client.voidcmd('TYPE I')
# filename comes in as "1xx FILE: <filename>"
filename = self.client.sendcmd('stou').split('FILE: ')[1]
try:
sock = self.client.makeport()
conn, sockaddr = sock.accept()
while 1:
buf = self.f1.read(8192)
if not buf:
break
conn.sendall(buf)
conn.close()
# transfer finished, a 226 response is expected
self.client.voidresp()
self.client.retrbinary('retr ' + filename, self.f2.write)
self.f2.seek(0)
self.assertEqual(hash(data), hash (self.f2.read()))
finally:
# we do not use os.remove because file could be
# still locked by ftpd thread
if os.path.exists(filename):
self.client.delete(filename)
def test_stou_rest(self):
# watch for STOU preceded by REST, which makes no sense.
self.client.sendcmd('rest 10')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'stou')
def test_appe(self):
# TESTFN3 is the remote file name
try:
data1 = 'abcde12345' * 100000
self.f1.write(data1)
self.f1.seek(0)
self.client.storbinary('stor ' + TESTFN3, self.f1)
data2 = 'fghil67890' * 100000
self.f1.write(data2)
self.f1.seek(self.client.size(TESTFN3))
self.client.storbinary('appe ' + TESTFN3, self.f1)
self.client.retrbinary("retr " + TESTFN3, self.f2.write)
self.f2.seek(0)
self.assertEqual(hash(data1 + data2), hash (self.f2.read()))
finally:
# we do not use os.remove because file could be still
# locked by ftpd thread
if os.path.exists(TESTFN3):
self.client.delete(TESTFN3)
def test_appe_rest(self):
# watch for APPE preceded by REST, which makes no sense.
self.client.sendcmd('rest 10')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'appe x')
def test_rest_on_stor(self):
# TESTFN3 is the remote file name
data = 'abcde12345' * 100000
self.f1.write(data)
self.f1.seek(0)
self.client.voidcmd('TYPE I')
conn = self.client.transfercmd('stor ' + TESTFN3)
bytes_sent = 0
while 1:
chunk = self.f1.read(8192)
conn.sendall(chunk)
bytes_sent += len(chunk)
# stop transfer while it isn't finished yet
if bytes_sent >= 524288: # 2^19
break
elif not chunk:
break
conn.close()
# transfer wasn't finished yet so we expect a 426 response
self.client.voidresp()
# resuming transfer by using a marker value greater than the
# file size stored on the server should result in an error
# on stor
file_size = self.client.size(TESTFN3)
self.assertEqual(file_size, bytes_sent)
self.client.sendcmd('rest %s' %((file_size + 1)))
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'stor ' + TESTFN3)
self.client.sendcmd('rest %s' %bytes_sent)
self.client.storbinary('stor ' + TESTFN3, self.f1)
self.client.retrbinary('retr ' + TESTFN3, self.f2.write)
self.f1.seek(0)
self.f2.seek(0)
self.assertEqual(hash(self.f1.read()), hash(self.f2.read()))
self.client.delete(TESTFN3)
class TestTimeouts(unittest.TestCase):
"""Test idle-timeout capabilities of control and data channels.
Some tests may fail on slow machines.
"""
def _setUp(self, idle_timeout=300, data_timeout=300, pasv_timeout=30):
self.server = FTPd()
self.server.handler.timeout = idle_timeout
self.server.handler.dtp_handler.timeout = data_timeout
self.server.handler.passive_dtp.timeout = pasv_timeout
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
self.client.login(USER, PASSWD)
def tearDown(self):
self.client.close()
self.server.handler.timeout = 300
self.server.handler.dtp_handler.timeout = 300
self.server.handler.passive_dtp.timeout = 30
self.server.stop()
def test_idle_timeout(self):
# Test control channel timeout. The client which does not send
# any command within the time specified in FTPHandler.timeout is
# supposed to be kicked off.
self._setUp(idle_timeout=0.1)
# fail if no msg is received within 1 second
self.client.sock.settimeout(1)
data = self.client.sock.recv(1024)
self.assertEqual(data, "421 Control connection timed out.\r\n")
# ensure client has been kicked off
self.assertRaises((socket.error, EOFError), self.client.sendcmd, 'noop')
def test_data_timeout(self):
# Test data channel timeout. The client which does not send
# or receive any data within the time specified in
# DTPHandler.timeout is supposed to be kicked off.
self._setUp(data_timeout=0.1)
addr = self.client.makepasv()
s = socket.socket()
s.connect(addr)
# fail if no msg is received within 1 second
self.client.sock.settimeout(1)
data = self.client.sock.recv(1024)
self.assertEqual(data, "421 Data connection timed out.\r\n")
# ensure client has been kicked off
self.assertRaises((socket.error, EOFError), self.client.sendcmd, 'noop')
def test_idle_data_timeout1(self):
# Tests that the control connection timeout is suspended while
# the data channel is opened
self._setUp(idle_timeout=0.1, data_timeout=0.2)
addr = self.client.makepasv()
s = socket.socket()
s.connect(addr)
# fail if no msg is received within 1 second
self.client.sock.settimeout(1)
data = self.client.sock.recv(1024)
self.assertEqual(data, "421 Data connection timed out.\r\n")
# ensure client has been kicked off
self.assertRaises((socket.error, EOFError), self.client.sendcmd, 'noop')
def test_idle_data_timeout2(self):
# Tests that the control connection timeout is restarted after
# data channel has been closed
self._setUp(idle_timeout=0.1, data_timeout=0.2)
addr = self.client.makepasv()
s = socket.socket()
s.connect(addr)
# close data channel
self.client.sendcmd('abor')
self.client.sock.settimeout(1)
data = self.client.sock.recv(1024)
self.assertEqual(data, "421 Control connection timed out.\r\n")
# ensure client has been kicked off
self.assertRaises((socket.error, EOFError), self.client.sendcmd, 'noop')
def test_pasv_timeout(self):
# Test pasv data channel timeout. The client which does not connect
# to the listening data socket within the time specified in
# PassiveDTP.timeout is supposed to receive a 421 response.
self._setUp(pasv_timeout=0.1)
self.client.makepasv()
# fail if no msg is received within 1 second
self.client.sock.settimeout(1)
data = self.client.sock.recv(1024)
self.assertEqual(data, "421 Passive data channel timed out.\r\n")
# client is not expected to be kicked off
self.client.sendcmd('noop')
class TestMaxConnections(unittest.TestCase):
"""Test maximum connections (FTPServer.max_cons)."""
def setUp(self):
self.server = FTPd()
self.server.server.max_cons = 3
self.server.start()
def tearDown(self):
self.server.server.max_cons = 0
self.server.stop()
def test_max_connections(self):
c1 = ftplib.FTP()
c2 = ftplib.FTP()
c3 = ftplib.FTP()
try:
c1.connect(self.server.host, self.server.port)
c2.connect(self.server.host, self.server.port)
self.assertRaises(ftplib.error_temp, c3.connect, self.server.host,
self.server.port)
# with passive data channel established
c2.close()
c1.login(USER, PASSWD)
c1.makepasv()
self.assertRaises(ftplib.error_temp, c2.connect, self.server.host,
self.server.port)
# with passive data socket waiting for connection
c1.login(USER, PASSWD)
c1.sendcmd('pasv')
self.assertRaises(ftplib.error_temp, c2.connect, self.server.host,
self.server.port)
# with active data channel established
c1.login(USER, PASSWD)
c1.makeport()
self.assertRaises(ftplib.error_temp, c2.connect, self.server.host,
self.server.port)
finally:
c1.close()
c2.close()
c3.close()
class _TestNetworkProtocols(unittest.TestCase):
"""Test PASV, EPSV, PORT and EPRT commands.
Do not use this class directly. Let TestIPv4Environment and
TestIPv6Environment classes use it instead.
"""
HOST = HOST
def setUp(self):
self.server = FTPd(self.HOST)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
self.client.login(USER, PASSWD)
if self.client.af == socket.AF_INET:
self.proto = "1"
self.other_proto = "2"
else:
self.proto = "2"
self.other_proto = "1"
def tearDown(self):
self.client.close()
self.server.stop()
def cmdresp(self, cmd):
"""Send a command and return response, also if the command failed."""
try:
return self.client.sendcmd(cmd)
except ftplib.Error, err:
return str(err)
def test_eprt(self):
# test wrong proto
try:
self.client.sendcmd('eprt |%s|%s|%s|' %(self.other_proto,
self.server.host, self.server.port))
except ftplib.error_perm, err:
self.assertEqual(str(err)[0:3], "522")
else:
self.fail("Exception not raised")
# test bad args
msg = "501 Invalid EPRT format."
# len('|') > 3
self.assertEqual(self.cmdresp('eprt ||||'), msg)
# len('|') < 3
self.assertEqual(self.cmdresp('eprt ||'), msg)
# port > 65535
self.assertEqual(self.cmdresp('eprt |%s|%s|65536|' %(self.proto,
self.HOST)), msg)
# port < 0
self.assertEqual(self.cmdresp('eprt |%s|%s|-1|' %(self.proto,
self.HOST)), msg)
# port < 1024
self.assertEqual(self.cmdresp('eprt |%s|%s|222|' %(self.proto,
self.HOST)), "501 Can't connect over a privileged port.")
# test connection
sock = socket.socket(self.client.af, socket.SOCK_STREAM)
sock.bind((self.client.sock.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
self.client.sendcmd('eprt |%s|%s|%s|' %(self.proto, ip, port))
try:
try:
sock.accept()
except socket.timeout:
self.fail("Server didn't connect to passive socket")
finally:
sock.close()
def test_epsv(self):
# test wrong proto
try:
self.client.sendcmd('epsv ' + self.other_proto)
except ftplib.error_perm, err:
self.assertEqual(str(err)[0:3], "522")
else:
self.fail("Exception not raised")
# test connection
for cmd in ('EPSV', 'EPSV ' + self.proto):
host, port = ftplib.parse229(self.client.sendcmd(cmd),
self.client.sock.getpeername())
s = socket.socket(self.client.af, socket.SOCK_STREAM)
s.settimeout(2)
try:
s.connect((host, port))
self.client.sendcmd('abor')
finally:
s.close()
def test_epsv_all(self):
self.client.sendcmd('epsv all')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'pasv')
self.assertRaises(ftplib.error_perm, self.client.sendport, self.HOST, 2000)
self.assertRaises(ftplib.error_perm, self.client.sendcmd,
'eprt |%s|%s|%s|' %(self.proto, self.HOST, 2000))
class TestIPv4Environment(_TestNetworkProtocols):
"""Test PASV, EPSV, PORT and EPRT commands.
Runs tests contained in _TestNetworkProtocols class by using IPv4
plus some additional specific tests.
"""
HOST = '127.0.0.1'
def test_port_v4(self):
# test connection
self.client.makeport()
self.client.sendcmd('abor')
# test bad arguments
ae = self.assertEqual
msg = "501 Invalid PORT format."
ae(self.cmdresp('port 127,0,0,1,1.1'), msg) # sep != ','
ae(self.cmdresp('port X,0,0,1,1,1'), msg) # value != int
ae(self.cmdresp('port 127,0,0,1,1,1,1'), msg) # len(args) > 6
ae(self.cmdresp('port 127,0,0,1'), msg) # len(args) < 6
ae(self.cmdresp('port 256,0,0,1,1,1'), msg) # oct > 255
ae(self.cmdresp('port 127,0,0,1,256,1'), msg) # port > 65535
ae(self.cmdresp('port 127,0,0,1,-1,0'), msg) # port < 0
msg = "501 Can't connect over a privileged port."
ae(self.cmdresp('port %s,1,1' %self.HOST.replace('.',',')),msg) # port < 1024
if "1.2.3.4" != self.HOST:
msg = "501 Can't connect to a foreign address."
ae(self.cmdresp('port 1,2,3,4,4,4'), msg)
def test_eprt_v4(self):
self.assertEqual(self.cmdresp('eprt |1|0.10.10.10|2222|'),
"501 Can't connect to a foreign address.")
def test_pasv_v4(self):
host, port = ftplib.parse227(self.client.sendcmd('pasv'))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
try:
s.connect((host, port))
finally:
s.close()
class TestIPv6Environment(_TestNetworkProtocols):
"""Test PASV, EPSV, PORT and EPRT commands.
Runs tests contained in _TestNetworkProtocols class by using IPv6
plus some additional specific tests.
"""
HOST = '::1'
def test_port_v6(self):
# 425 expected
self.assertRaises(ftplib.error_temp, self.client.sendport,
self.server.host, self.server.port)
def test_pasv_v6(self):
# 425 expected
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'pasv')
def test_eprt_v6(self):
self.assertEqual(self.cmdresp('eprt |2|::xxx|2222|'),
"501 Can't connect to a foreign address.")
class FTPd(threading.Thread):
"""A threaded FTP server used for running tests."""
def __init__(self, host=HOST, port=0, verbose=False):
threading.Thread.__init__(self)
self.active = False
if not verbose:
ftpserver.log = ftpserver.logline = lambda x: x
self.authorizer = ftpserver.DummyAuthorizer()
self.authorizer.add_user(USER, PASSWD, HOME, perm='elradfmw') # full perms
self.authorizer.add_anonymous(HOME)
self.handler = ftpserver.FTPHandler
self.handler.authorizer = self.authorizer
self.server = ftpserver.FTPServer((host, port), self.handler)
self.host, self.port = self.server.socket.getsockname()[:2]
self.active_lock = threading.Lock()
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active:
self.active_lock.acquire()
self.server.serve_forever(timeout=0.001, count=1)
self.active_lock.release()
self.server.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def remove_test_files():
"Convenience function for removing temporary test files"
for file in [TESTFN, TESTFN2, TESTFN3]:
try:
os.remove(file)
except os.error:
pass
def test_main(tests=None):
test_suite = unittest.TestSuite()
if tests is None:
tests = [
TestAbstractedFS,
TestDummyAuthorizer,
TestCallLater,
TestFtpAuthentication,
TestFtpDummyCmds,
TestFtpCmdsSemantic,
TestFtpFsOperations,
TestFtpRetrieveData,
TestFtpAbort,
TestFtpStoreData,
TestTimeouts,
TestMaxConnections
]
if SUPPORTS_IPV4:
tests.append(TestIPv4Environment)
if SUPPORTS_IPV6:
tests.append(TestIPv6Environment)
for test in tests:
test_suite.addTest(unittest.makeSuite(test))
remove_test_files()
unittest.TextTestRunner(verbosity=2).run(test_suite)
remove_test_files()
if __name__ == '__main__':
test_main()
| bsd-3-clause |
hoosteeno/fjord | vendor/src/html5lib-python/html5lib/tests/test_treewalkers.py | 4 | 10495 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import unittest
import warnings
from difflib import unified_diff
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
from .support import get_data_files, TestData, convertExpected
from html5lib import html5parser, treewalkers, treebuilders, constants
def PullDOMAdapter(node):
from xml.dom import Node
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, COMMENT, CHARACTERS
if node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise NotImplementedError('DOCTYPE nodes are not supported by PullDOM')
elif node.nodeType == Node.COMMENT_NODE:
yield COMMENT, node
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
yield CHARACTERS, node
elif node.nodeType == Node.ELEMENT_NODE:
yield START_ELEMENT, node
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
yield END_ELEMENT, node
else:
raise NotImplementedError('Node type not supported: ' + str(node.nodeType))
treeTypes = {
'DOM': {'builder': treebuilders.getTreeBuilder('dom'),
'walker': treewalkers.getTreeWalker('dom')},
'PullDOM': {'builder': treebuilders.getTreeBuilder('dom'),
'adapter': PullDOMAdapter,
'walker': treewalkers.getTreeWalker('pulldom')},
}
# Try whatever etree implementations are available from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['ElementTree'] = \
{'builder': treebuilders.getTreeBuilder('etree', ElementTree),
'walker': treewalkers.getTreeWalker('etree', ElementTree)}
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['cElementTree'] = \
{'builder': treebuilders.getTreeBuilder('etree', ElementTree),
'walker': treewalkers.getTreeWalker('etree', ElementTree)}
try:
import lxml.etree as ElementTree # flake8: noqa
except ImportError:
pass
else:
treeTypes['lxml_native'] = \
{'builder': treebuilders.getTreeBuilder('lxml'),
'walker': treewalkers.getTreeWalker('lxml')}
try:
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
except ImportError:
pass
else:
def GenshiAdapter(tree):
text = None
for token in treewalkers.getTreeWalker('dom')(tree):
type = token['type']
if type in ('Characters', 'SpaceCharacters'):
if text is None:
text = token['data']
else:
text += token['data']
elif text is not None:
yield TEXT, text, (None, -1, -1)
text = None
if type in ('StartTag', 'EmptyTag'):
if token['namespace']:
name = '{%s}%s' % (token['namespace'], token['name'])
else:
name = token['name']
attrs = Attrs([(QName('{%s}%s' % attr if attr[0] is not None else attr[1]), value)
for attr, value in token['data'].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == 'EmptyTag':
type = 'EndTag'
if type == 'EndTag':
if token['namespace']:
name = '{%s}%s' % (token['namespace'], token['name'])
else:
name = token['name']
yield END, QName(name), (None, -1, -1)
elif type == 'Comment':
yield COMMENT, token['data'], (None, -1, -1)
elif type == 'Doctype':
yield DOCTYPE, (token['name'], token['publicId'],
token['systemId']), (None, -1, -1)
else:
pass # FIXME: What to do?
if text is not None:
yield TEXT, text, (None, -1, -1)
treeTypes['genshi'] = \
{'builder': treebuilders.getTreeBuilder('dom'),
'adapter': GenshiAdapter,
'walker': treewalkers.getTreeWalker('genshi')}
import re
attrlist = re.compile(r'^(\s+)\w+=.*(\n\1\w+=.*)+', re.M)
def sortattrs(x):
lines = x.group(0).split('\n')
lines.sort()
return '\n'.join(lines)
class TokenTestCase(unittest.TestCase):
def test_all_tokens(self):
expected = [
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': 'a', 'type': 'Characters'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'b', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'c', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'}
]
for treeName, treeCls in treeTypes.items():
p = html5parser.HTMLParser(tree=treeCls['builder'])
document = p.parse('<html><head></head><body>a<div>b</div>c</body></html>')
document = treeCls.get('adapter', lambda x: x)(document)
output = treeCls['walker'](document)
for expectedToken, outputToken in zip(expected, output):
self.assertEqual(expectedToken, outputToken)
def runTreewalkerTest(innerHTML, input, expected, errors, treeClass):
warnings.resetwarnings()
warnings.simplefilter('error')
try:
p = html5parser.HTMLParser(tree=treeClass['builder'])
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
document = p.parse(input)
except constants.DataLossWarning:
# Ignore testcases we know we don't pass
return
document = treeClass.get('adapter', lambda x: x)(document)
try:
output = treewalkers.pprint(treeClass['walker'](document))
output = attrlist.sub(sortattrs, output)
expected = attrlist.sub(sortattrs, convertExpected(expected))
diff = ''.join(unified_diff([line + '\n' for line in expected.splitlines()],
[line + '\n' for line in output.splitlines()],
'Expected', 'Received'))
assert expected == output, '\n'.join([
'', 'Input:', input,
'', 'Expected:', expected,
'', 'Received:', output,
'', 'Diff:', diff,
])
except NotImplementedError:
pass # Amnesty for those that confess...
def test_treewalker():
sys.stdout.write('Testing tree walkers ' + ' '.join(list(treeTypes.keys())) + '\n')
for treeName, treeCls in treeTypes.items():
files = get_data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace('.dat', '')
if testName in ('template',):
continue
tests = TestData(filename, 'data')
for index, test in enumerate(tests):
(input, errors,
innerHTML, expected) = [test[key] for key in ('data', 'errors',
'document-fragment',
'document')]
errors = errors.split('\n')
yield runTreewalkerTest, innerHTML, input, expected, errors, treeCls
def set_attribute_on_first_child(docfrag, name, value, treeName):
"""naively sets an attribute on the first child of the document
fragment passed in"""
setter = {'ElementTree': lambda d: d[0].set,
'DOM': lambda d: d.firstChild.setAttribute}
setter['cElementTree'] = setter['ElementTree']
try:
setter.get(treeName, setter['DOM'])(docfrag)(name, value)
except AttributeError:
setter['ElementTree'](docfrag)(name, value)
def runTreewalkerEditTest(intext, expected, attrs_to_add, tree):
"""tests what happens when we add attributes to the intext"""
treeName, treeClass = tree
parser = html5parser.HTMLParser(tree=treeClass['builder'])
document = parser.parseFragment(intext)
for nom, val in attrs_to_add:
set_attribute_on_first_child(document, nom, val, treeName)
document = treeClass.get('adapter', lambda x: x)(document)
output = treewalkers.pprint(treeClass['walker'](document))
output = attrlist.sub(sortattrs, output)
if not output in expected:
raise AssertionError('TreewalkerEditTest: %s\nExpected:\n%s\nReceived:\n%s' % (treeName, expected, output))
def test_treewalker_six_mix():
"""Str/Unicode mix. If str attrs added to tree"""
# On Python 2.x string literals are of type str. Unless, like this
# file, the programmer imports unicode_literals from __future__.
# In that case, string literals become objects of type unicode.
# This test simulates a Py2 user, modifying attributes on a document
# fragment but not using the u'' syntax nor importing unicode_literals
sm_tests = [
('<a href="http://example.com">Example</a>',
[(str('class'), str('test123'))],
'<a>\n class="test123"\n href="http://example.com"\n "Example"'),
('<link href="http://example.com/cow">',
[(str('rel'), str('alternate'))],
'<link>\n href="http://example.com/cow"\n rel="alternate"\n "Example"')
]
for tree in treeTypes.items():
for intext, attrs, expected in sm_tests:
yield runTreewalkerEditTest, intext, expected, attrs, tree
| bsd-3-clause |
akevinlee/buildmeister | public/include/fckeditor/editor/filemanager/connectors/py/upload.py | 93 | 3123 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = getCurrentFolder(self.request.get("CurrentFolder",""))
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| apache-2.0 |
gmatteo/pymatgen | pymatgen/entries/tests/test_entry_tools.py | 5 | 1991 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
from pathlib import Path
from monty.serialization import dumpfn, loadfn
from pymatgen.core.periodic_table import Element
from pymatgen.entries.entry_tools import EntrySet, group_entries_by_structure
from pymatgen.util.testing import PymatgenTest
class FuncTest(unittest.TestCase):
def test_group_entries_by_structure(self):
entries = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_entries.json"))
groups = group_entries_by_structure(entries)
self.assertEqual(sorted([len(g) for g in groups]), [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 4])
self.assertLess(len(groups), len(entries))
# Make sure no entries are left behind
self.assertEqual(sum([len(g) for g in groups]), len(entries))
class EntrySetTest(unittest.TestCase):
def setUp(self):
entries = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, "Li-Fe-P-O_entries.json"))
self.entry_set = EntrySet(entries)
def test_chemsys(self):
self.assertEqual(self.entry_set.chemsys, {"Fe", "Li", "O", "P"})
def test_get_subset(self):
entries = self.entry_set.get_subset_in_chemsys(["Li", "O"])
for e in entries:
self.assertTrue(set([Element.Li, Element.O]).issuperset(e.composition.keys()))
self.assertRaises(ValueError, self.entry_set.get_subset_in_chemsys, ["Fe", "F"])
def test_remove_non_ground_states(self):
l = len(self.entry_set)
self.entry_set.remove_non_ground_states()
self.assertLess(len(self.entry_set), l)
def test_as_dict(self):
dumpfn(self.entry_set, "temp_entry_set.json")
entry_set = loadfn("temp_entry_set.json")
self.assertEqual(len(entry_set), len(self.entry_set))
os.remove("temp_entry_set.json")
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| mit |
rebost/django | tests/modeltests/user_commands/tests.py | 8 | 1681 | import sys
from StringIO import StringIO
from django.core import management
from django.core.management.base import CommandError
from django.test import TestCase
from django.utils import translation
class CommandTests(TestCase):
def test_command(self):
out = StringIO()
management.call_command('dance', stdout=out)
self.assertEqual(out.getvalue(),
"I don't feel like dancing Rock'n'Roll.\n")
def test_command_style(self):
out = StringIO()
management.call_command('dance', style='Jive', stdout=out)
self.assertEqual(out.getvalue(),
"I don't feel like dancing Jive.\n")
def test_language_preserved(self):
out = StringIO()
with translation.override('fr'):
management.call_command('dance', stdout=out)
self.assertEqual(translation.get_language(), 'fr')
def test_explode(self):
""" Test that an unknown command raises CommandError """
self.assertRaises(CommandError, management.call_command, ('explode',))
def test_system_exit(self):
""" Exception raised in a command should raise CommandError with
call_command, but SystemExit when run from command line
"""
with self.assertRaises(CommandError):
management.call_command('dance', example="raise")
old_stderr = sys.stderr
sys.stderr = err = StringIO()
try:
with self.assertRaises(SystemExit):
management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute()
finally:
sys.stderr = old_stderr
self.assertIn("CommandError", err.getvalue())
| bsd-3-clause |
brian-l/django-1.4.10 | tests/regressiontests/introspection/tests.py | 27 | 6202 | from __future__ import absolute_import
from functools import update_wrapper
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature
from .models import Reporter, Article
#
# The introspection module is optional, so methods tested here might raise
# NotImplementedError. This is perfectly acceptable behavior for the backend
# in question, but the tests need to handle this without failing. Ideally we'd
# skip these tests, but until #4788 is done we'll just ignore them.
#
# The easiest way to accomplish this is to decorate every test case with a
# wrapper that ignores the exception.
#
# The metaclass is just for fun.
#
def ignore_not_implemented(func):
def _inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except NotImplementedError:
return None
update_wrapper(_inner, func)
return _inner
class IgnoreNotimplementedError(type):
def __new__(cls, name, bases, attrs):
for k,v in attrs.items():
if k.startswith('test'):
attrs[k] = ignore_not_implemented(v)
return type.__new__(cls, name, bases, attrs)
class IntrospectionTests(TestCase):
__metaclass__ = IgnoreNotimplementedError
def test_table_names(self):
tl = connection.introspection.table_names()
self.assertTrue(Reporter._meta.db_table in tl,
"'%s' isn't in table_list()." % Reporter._meta.db_table)
self.assertTrue(Article._meta.db_table in tl,
"'%s' isn't in table_list()." % Article._meta.db_table)
def test_django_table_names(self):
cursor = connection.cursor()
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);')
tl = connection.introspection.django_table_names()
cursor.execute("DROP TABLE django_ixn_test_table;")
self.assertTrue('django_ixn_testcase_table' not in tl,
"django_table_names() returned a non-Django table")
def test_django_table_names_retval_type(self):
# Ticket #15216
cursor = connection.cursor()
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);')
tl = connection.introspection.django_table_names(only_existing=True)
self.assertIs(type(tl), list)
tl = connection.introspection.django_table_names(only_existing=False)
self.assertIs(type(tl), list)
def test_installed_models(self):
tables = [Article._meta.db_table, Reporter._meta.db_table]
models = connection.introspection.installed_models(tables)
self.assertEqual(models, set([Article, Reporter]))
def test_sequence_list(self):
sequences = connection.introspection.sequence_list()
expected = {'table': Reporter._meta.db_table, 'column': 'id'}
self.assertTrue(expected in sequences,
'Reporter sequence not found in sequence_list()')
def test_get_table_description_names(self):
cursor = connection.cursor()
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual([r[0] for r in desc],
[f.column for f in Reporter._meta.fields])
def test_get_table_description_types(self):
cursor = connection.cursor()
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[datatype(r[1], r) for r in desc],
['IntegerField', 'CharField', 'CharField', 'CharField', 'BigIntegerField']
)
# Oracle forces null=True under the hood in some cases (see
# https://docs.djangoproject.com/en/dev/ref/databases/#null-and-empty-strings)
# so its idea about null_ok in cursor.description is different from ours.
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_get_table_description_nullable(self):
cursor = connection.cursor()
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[r[6] for r in desc],
[False, False, False, False, True]
)
# Regression test for #9991 - 'real' types in postgres
@skipUnlessDBFeature('has_real_datatype')
def test_postgresql_real_type(self):
cursor = connection.cursor()
cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);")
desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table')
cursor.execute('DROP TABLE django_ixn_real_test_table;')
self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField')
def test_get_relations(self):
cursor = connection.cursor()
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
# Older versions of MySQL don't have the chops to report on this stuff,
# so just skip it if no relations come back. If they do, though, we
# should test that the response is correct.
if relations:
# That's {field_index: (field_index_other_table, other_table)}
self.assertEqual(relations, {3: (0, Reporter._meta.db_table)})
def test_get_key_columns(self):
cursor = connection.cursor()
key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table)
self.assertEqual(key_columns, [(u'reporter_id', Reporter._meta.db_table, u'id')])
def test_get_primary_key_column(self):
cursor = connection.cursor()
primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table)
self.assertEqual(primary_key_column, u'id')
def test_get_indexes(self):
cursor = connection.cursor()
indexes = connection.introspection.get_indexes(cursor, Article._meta.db_table)
self.assertEqual(indexes['reporter_id'], {'unique': False, 'primary_key': False})
def datatype(dbtype, description):
"""Helper to convert a data type into a string."""
dt = connection.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt
| bsd-3-clause |
slightstone/SickRage | lib/sqlalchemy/orm/evaluator.py | 79 | 4345 | # orm/evaluator.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import operator
from ..sql import operators
class UnevaluatableError(Exception):
pass
_straight_ops = set(getattr(operators, op)
for op in ('add', 'mul', 'sub',
'div',
'mod', 'truediv',
'lt', 'le', 'ne', 'gt', 'ge', 'eq'))
_notimplemented_ops = set(getattr(operators, op)
for op in ('like_op', 'notlike_op', 'ilike_op',
'notilike_op', 'between_op', 'in_op',
'notin_op', 'endswith_op', 'concat_op'))
class EvaluatorCompiler(object):
def process(self, clause):
meth = getattr(self, "visit_%s" % clause.__visit_name__, None)
if not meth:
raise UnevaluatableError(
"Cannot evaluate %s" % type(clause).__name__)
return meth(clause)
def visit_grouping(self, clause):
return self.process(clause.element)
def visit_null(self, clause):
return lambda obj: None
def visit_false(self, clause):
return lambda obj: False
def visit_true(self, clause):
return lambda obj: True
def visit_column(self, clause):
if 'parentmapper' in clause._annotations:
key = clause._annotations['parentmapper'].\
_columntoproperty[clause].key
else:
key = clause.key
get_corresponding_attr = operator.attrgetter(key)
return lambda obj: get_corresponding_attr(obj)
def visit_clauselist(self, clause):
evaluators = list(map(self.process, clause.clauses))
if clause.operator is operators.or_:
def evaluate(obj):
has_null = False
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value:
return True
has_null = has_null or value is None
if has_null:
return None
return False
elif clause.operator is operators.and_:
def evaluate(obj):
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if not value:
if value is None:
return None
return False
return True
else:
raise UnevaluatableError(
"Cannot evaluate clauselist with operator %s" %
clause.operator)
return evaluate
def visit_binary(self, clause):
eval_left, eval_right = list(map(self.process,
[clause.left, clause.right]))
operator = clause.operator
if operator is operators.is_:
def evaluate(obj):
return eval_left(obj) == eval_right(obj)
elif operator is operators.isnot:
def evaluate(obj):
return eval_left(obj) != eval_right(obj)
elif operator in _straight_ops:
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is None or right_val is None:
return None
return operator(eval_left(obj), eval_right(obj))
else:
raise UnevaluatableError(
"Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
return evaluate
def visit_unary(self, clause):
eval_inner = self.process(clause.element)
if clause.operator is operators.inv:
def evaluate(obj):
value = eval_inner(obj)
if value is None:
return None
return not value
return evaluate
raise UnevaluatableError(
"Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
def visit_bindparam(self, clause):
val = clause.value
return lambda obj: val
| gpl-3.0 |
almarklein/flexx | flexx/app/serve.py | 1 | 10199 | """ flexx.ui client serving a web page using Tornado.
"""
import sys
import os
import logging
import urllib
import traceback
import tornado.web
import tornado.websocket
from ..webruntime.common import default_icon
from .proxy import manager, call_later
from .clientcode import clientCode
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
HTML_DIR = os.path.join(os.path.dirname(THIS_DIR), 'html')
def _flexx_run_callback(self, callback, *args, **kwargs):
""" Patched version of Tornado's _run_callback that sets traceback
info when an exception occurs, so that we can do PM debugging.
"""
def _callback(*args, **kwargs):
try:
callback(*args, **kwargs)
except Exception:
type, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
del tb # Get rid of it in this namespace
raise
return self._orig_run_callback(_callback, *args, **kwargs)
def _patch_tornado():
WebSocketProtocol = tornado.websocket.WebSocketProtocol
if not hasattr(WebSocketProtocol, '_orig_run_callback'):
if not hasattr(WebSocketProtocol, 'async_callback'):
WebSocketProtocol._orig_run_callback = WebSocketProtocol._run_callback
WebSocketProtocol._run_callback = _flexx_run_callback
else:
WebSocketProtocol._orig_run_callback = WebSocketProtocol.async_callback
WebSocketProtocol.async_callback = _flexx_run_callback
_patch_tornado()
class FlexxTornadoApplication(tornado.web.Application):
""" Simple subclass of tornado Application.
Has functionality for serving our html/css/js files, and caching them.
"""
def __init__(self):
tornado.web.Application.__init__(self,
[(r"/(.*)/ws", WSHandler), (r"/(.*)", MainHandler), ])
self._cache = {}
# def load(self, fname):
# """ Load a file with the given name. Returns bytes.
# """
# if fname not in self._cache:
# filename = os.path.join(HTML_DIR, fname)
# blob = open(filename, 'rb').read()
# return blob # todo: bypasse cache
# self._cache[fname] = blob
# return self._cache[fname]
class MainHandler(tornado.web.RequestHandler):
""" Handler for http requests: serve pages
"""
def initialize(self, **kwargs):
# kwargs == dict set as third arg in url spec
# print('init request')
pass
def get(self, path=None):
print('get', path)
# Analyze path to derive components
# app_name - class name of the app, must be a valid identifier
# app_id - optional id to associate connection to a specific instance
# file_name - path (can have slashes) to a file
parts = [p for p in path.split('/') if p]
if parts and parts[0].split('-')[0].isidentifier():
app_name, _, app_id = parts[0].partition('-')
file_name = '/'.join(parts[1:])
else:
app_name, app_id = None, None
file_name = '/'.join(parts)
# todo: maybe when app_id is given, redirect to normal name, but
# modify flexx.app_id in index.html, so that the websocket can connect
# with id ... (mmm also not very nice)
if not path:
# Not a path, index / home page
all_apps = ['<a href="%s">%s</a>' % (name, name) for name in
manager.get_app_names()]
all_apps = ', '.join(all_apps)
self.write('Root selected, apps available: %s' % all_apps)
elif app_name:
# App name given. But is it the app, or a resource for it?
if not file_name:
# This looks like an app, redirect, serve app, or error
if not '/' in path:
self.redirect('/%s/' % app_name)
elif app_id:
proxy = manager.get_proxy_by_id(app_name, app_id)
if proxy:
self.write(clientCode.get_page().encode())
else:
self.write('App %r with id %r is not available' %
(app_name, app_id))
elif manager.has_app_name(app_name):
#self.write(self.application.load('index.html'))
#self.serve_index()
self.write(clientCode.get_page().encode())
else:
self.write('No app %r is hosted right now' % app_name)
elif file_name.endswith('.ico'):
# Icon, look it up from the app instance
id = file_name.split('.')[0]
if manager.has_app_name(app_name):
client = manager.get_proxy_by_id(app_name, id)
# todo: serve icon stored on app widget
#if app:
# self.write(app._config.icon.to_bytes())
elif file_name:
# A resource, e.g. js/css/icon
if file_name.endswith('.css'):
self.set_header("Content-Type", 'text/css')
elif file_name.endswith('.js'):
self.set_header("Content-Type", 'application/x-javascript')
try:
#raise RuntimeError('This is for page_light, but we might never implement that')
#res = self.application.load(file_name)
res = clientCode.load(file_name).encode()
except IOError:
#self.write('invalid resource')
super().write_error(404)
else:
self.write(res)
elif file_name:
# filename in root. We don't support that yet
self.write('Invalid file % r' % file_name)
else:
# In theory this cannot happen
self.write('This should not happen')
def write_error(self, status_code, **kwargs):
if status_code == 404: # does not work?
self.write('flexx.ui wants you to connect to root (404)')
else:
msg = 'Flexx.ui encountered an error: <br /><br />'
try: # try providing a useful message; tough luck if this fails
type, value, tb = kwargs['exc_info']
tb_str = ''.join(traceback.format_tb(tb))
msg += '<pre>%s\n%s</pre>' % (tb_str, str(value))
except Exception:
pass
self.write(msg)
super().write_error(status_code, **kwargs)
def on_finish(self):
pass # print('finish request')
class WSHandler(tornado.websocket.WebSocketHandler):
""" Handler for websocket.
"""
# https://tools.ietf.org/html/rfc6455#section-7.4.1
known_reasons = {1000: 'client done',
1001: 'client closed',
1002: 'protocol error',
1003: 'could not accept data',
}
# --- callbacks
# todo: use ping() and close()
def open(self, path=None):
""" Called when a new connection is made.
"""
if not hasattr(self, 'close_code'): # old version of Tornado?
self.close_code, self.close_reason = None, None
# Don't collect messages to send them more efficiently, just send asap
self.set_nodelay(True)
if isinstance(path, bytes):
path = path.decode()
print('new ws connection', path)
app_name, _, app_id = path.strip('/').partition('-')
if manager.has_app_name(app_name):
try:
self._proxy = manager.connect_client(self, app_name, app_id)
except Exception as err:
self.close(1003, "Could not launch app: %r" % err)
raise
self.write_message("PRINT Hello World from server", binary=True)
else:
self.close(1003, "Could not associate socket with an app.")
def on_message(self, message):
""" Called when a new message is received from JS.
We now have a very basic protocol for receiving messages,
we should at some point define a real formalized protocol.
"""
self._proxy._receive_command(message)
def on_close(self):
""" Called when the connection is closed.
"""
self.close_code = code = self.close_code or 0
reason = self.close_reason or self.known_reasons.get(code, '')
print('detected close: %s (%i)' % (reason, code))
if hasattr(self, '_proxy'):
manager.disconnect_client(self._proxy)
self._proxy = None # Allow cleaning up
def on_pong(self, data):
""" Called when our ping is returned.
"""
print('PONG', data)
# --- methdos
def command(self, cmd):
self.write_message(cmd, binary=True)
def close(self, *args):
try:
tornado.websocket.WebSocketHandler.close(self, *args)
except TypeError:
tornado.websocket.WebSocketHandler.close(self) # older Tornado
def close_this(self):
""" Call this to close the websocket
"""
self.close(1000, 'closed by server')
def check_origin(self, origin):
""" Handle cross-domain access; override default same origin
policy. By default the hostname and port must be equal. We only
requirde de portname to be equal. This allows us to embed in
the IPython notebook.
"""
host, port = self.application.serving_at # set by us
incoming_host = urllib.parse.urlparse(origin).hostname
if host == 'localhost' and origin.startswith('localhost:'):
return True # With nodejs origin is "localhost:52452"
elif host == incoming_host:
return True
else:
print('Connection refused from %s' % origin)
return False
| bsd-2-clause |
xaviercobain88/framework-python | openerp/addons/survey/__init__.py | 66 | 1099 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import survey
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mdavid/horizon | openstack_dashboard/test/integration_tests/tests/test_projects.py | 32 | 1209 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
PROJECT_NAME = helpers.gen_random_resource_name("project")
class TestCreateDeleteProject(helpers.AdminTestCase):
def setUp(self):
super(TestCreateDeleteProject, self).setUp()
self.projects_page = self.home_pg.go_to_identity_projectspage()
def test_create_delete_project(self):
self.projects_page.create_project(PROJECT_NAME)
self.assertTrue(self.projects_page.is_project_present(PROJECT_NAME))
self.projects_page.delete_project(PROJECT_NAME)
self.assertFalse(self.projects_page.is_project_present(PROJECT_NAME))
| apache-2.0 |
gautam1858/tensorflow | tensorflow/contrib/predictor/testing_common.py | 93 | 4328 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common code used for testing `Predictor`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator as contrib_estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as contrib_model_fn
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.export import export_lib
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.saved_model import signature_constants
def get_arithmetic_estimator(core=True, model_dir=None):
"""Returns an `Estimator` that performs basic arithmetic.
Args:
core: if `True`, returns a `tensorflow.python.estimator.Estimator`.
Otherwise, returns a `tensorflow.contrib.learn.Estimator`.
model_dir: directory in which to export checkpoints and saved models.
Returns:
An `Estimator` that performs arithmetic operations on its inputs.
"""
def _model_fn(features, labels, mode):
_ = labels
x = features['x']
y = features['y']
with ops.name_scope('outputs'):
predictions = {'sum': math_ops.add(x, y, name='sum'),
'product': math_ops.multiply(x, y, name='product'),
'difference': math_ops.subtract(x, y, name='difference')}
if core:
export_outputs = {k: export_output.PredictOutput({k: v})
for k, v in predictions.items()}
export_outputs[signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY] = export_outputs['sum']
return model_fn.EstimatorSpec(mode=mode,
predictions=predictions,
export_outputs=export_outputs,
loss=constant_op.constant(0),
train_op=control_flow_ops.no_op())
else:
output_alternatives = {k: (constants.ProblemType.UNSPECIFIED, {k: v})
for k, v in predictions.items()}
return contrib_model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
output_alternatives=output_alternatives,
loss=constant_op.constant(0),
train_op=control_flow_ops.no_op())
if core:
return core_estimator.Estimator(_model_fn)
else:
return contrib_estimator.Estimator(_model_fn, model_dir=model_dir)
def get_arithmetic_input_fn(core=True, train=False):
"""Returns a input functions or serving input receiver function."""
def _input_fn():
with ops.name_scope('inputs'):
x = array_ops.placeholder_with_default(0.0, shape=[], name='x')
y = array_ops.placeholder_with_default(0.0, shape=[], name='y')
label = constant_op.constant(0.0)
features = {'x': x, 'y': y}
if core:
if train:
return features, label
return export_lib.ServingInputReceiver(
features=features,
receiver_tensors=features)
else:
if train:
return features, label
return input_fn_utils.InputFnOps(
features=features,
labels={},
default_inputs=features)
return _input_fn
| apache-2.0 |
tms/node-gyp | gyp/test/standalone-static-library/gyptest-standalone-static-library.py | 46 | 1838 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of a static_library with the standalone_static_library flag set.
"""
import os
import subprocess
import sys
import TestGyp
test = TestGyp.TestGyp()
# Verify that types other than static_library cause a failure.
test.run_gyp('invalid.gyp', status=1, stderr=None)
target_str = 'invalid.gyp:bad#target'
if test.format == 'scons':
target_str = os.path.join(os.path.realpath(os.curdir), target_str)
err = ['gyp: Target %s has type executable but standalone_static_library flag '
'is only valid for static_library type.' % target_str]
test.must_contain_all_lines(test.stderr(), err)
# Build a valid standalone_static_library.
test.run_gyp('mylib.gyp')
test.build('mylib.gyp', target='prog')
# Verify that the static library is copied to the correct location.
if test.format == 'scons':
# For scons, we expect the library to be copied to the shared lib dir.
standalone_static_library_dir = test.SHARED_LIB
else:
# Otherwise, we expect the library to be copied to $PRODUCT_DIR.
standalone_static_library_dir = test.EXECUTABLE
path_to_lib = os.path.split(
test.built_file_path('mylib', type=standalone_static_library_dir))[0]
lib_name = test.built_file_basename('mylib', type=test.STATIC_LIB)
path = os.path.join(path_to_lib, lib_name)
test.must_exist(path)
# Verify that the program runs properly.
expect = 'hello from mylib.c\n'
test.run_built_executable('prog', stdout=expect)
# Verify that libmylib.a contains symbols. "ar -x" fails on a 'thin' archive.
if test.format in ('make', 'ninja') and sys.platform.startswith('linux'):
retcode = subprocess.call(['ar', '-x', path])
assert retcode == 0
test.pass_test() | mit |
rbalda/neural_ocr | env/lib/python2.7/site-packages/django/templatetags/static.py | 197 | 4052 | from django import template
from django.utils.encoding import iri_to_uri
from django.utils.six.moves.urllib.parse import urljoin
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
# token.split_contents() isn't useful here because tags using this method don't accept variable as arguments
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
class StaticNode(template.Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
return urljoin(PrefixNode.handle_simple("STATIC_URL"), path)
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static(parser, token):
"""
Joins the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
return StaticNode.handle_token(parser, token)
def static(path):
return StaticNode.handle_simple(path)
| mit |
BlueBrain/RenderingResourceManager | rendering_resource_manager_service/service/settings.py | 2 | 5563 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2015, Human Brain Project
# Cyrille Favreau <cyrille.favreau@epfl.ch>
#
# This file is part of RenderingResourceManager
# <https://github.com/BlueBrain/RenderingResourceManager>
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License version 3.0 as published
# by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# All rights reserved. Do not distribute without further notice.
"""
Django settings for rendering_resource_manager_service project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Application name
APPLICATION_NAME = 'rendering-resource-manager'
# API version
API_VERSION = 'v1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = (
'localhost',
'127.0.0.1',
'0.0.0.0',
)
# Quick-start development config - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'aS3cr3tK3y'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'corsheaders',
'rest_framework',
'rest_framework_swagger',
'django_filters',
'admin',
'session',
'config',
'service',
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Cors headers
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = False
CORS_ORIGIN_WHITELIST = (
'localhost:8095',
'127.0.0.1:8095',
'127.0.0.1:4200',
'0.0.0.0:8095',
)
ROOT_URLCONF = 'rendering_resource_manager_service.service.urls'
WSGI_APPLICATION = 'rendering_resource_manager_service.service.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/config/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'OPTIONS': {'timeout': 20},
'NAME': os.path.join(BASE_DIR + '/tests', 'db.sqlite3'),
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/django_cache',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
SWAGGER_SETTINGS = {
'exclude_namespaces': [],
'api_version': '0.1',
'api_path': '/',
'enabled_methods': [
'get',
'post',
'put',
'patch',
'delete'
],
'api_key': '',
'is_authenticated': False,
'is_superuser': False,
'permission_denied_handler': None,
'info': {
'contact': 'cyrille.favreau@epfl.ch',
'description': 'This is the Rendering Resource Manager Service. ',
'title': 'Rendering Resource Manager',
},
'doc_expansion': 'full',
}
# Base URL prefix
BASE_URL_PREFIX = r'^' + APPLICATION_NAME + '/' + API_VERSION
# Needed by unit testing
sys.path.append(BASE_DIR)
sys.path.append(BASE_DIR + '/rendering_resource_manager_service')
# Job allocator
RESOURCE_ALLOCATOR_SLURM = 'SLURM'
RESOURCE_ALLOCATOR_UNICORE = 'UNICORE'
RESOURCE_ALLOCATOR = RESOURCE_ALLOCATOR_UNICORE
# Slurm (To be modified by deployment process)
SLURM_USERNAME = 'TO_BE_MODIFIED'
SLURM_SSH_KEY = 'TO_BE_MODIFIED'
SLURM_PROJECT = 'TO_BE_MODIFIED'
SLURM_HOSTS = ['TO_BE_MODIFIED']
SLURM_DEFAULT_QUEUE = 'TO_BE_MODIFIED'
SLURM_DEFAULT_TIME = 'TO_BE_MODIFIED'
# Unicore
UNICORE_DEFAULT_REGISTRY_URL = 'TO_BE_MODIFIED'
UNICORE_DEFAULT_SITE = 'TO_BE_MODIFIED'
UNICORE_DEFAULT_HTTP_PROXIES = {
'http': 'TO_BE_MODIFIED',
'ftp': 'TO_BE_MODIFIED',
'https': 'TO_BE_MODIFIED',
}
# ClientID needed by the HBP collab project browser
SOCIAL_AUTH_HBP_KEY = 'TO_BE_MODIFIED'
# Image Streaming Service URL
IMAGE_STREAMING_SERVICE_URL = 'TO_BE_MODIFIED'
REQUEST_TIMEOUT = 5
try:
from local_settings import * # pylint: disable=F0401,W0403,W0401,W0614
except ImportError as e:
pass
# Static files
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR + '/../apps/AngularVwsViewer',
]
| lgpl-3.0 |
polarbear611/wxPolarbear | reply.py | 1 | 1381 | # -*- coding: utf-8 -*-
# filename: reply.py
import time
class Msg(object):
def __init__(self):
pass
def send(self):
return "success"
class TextMsg(Msg):
def __init__(self, toUserName, fromUserName, content):
self.__dict = dict()
self.__dict['ToUserName'] = toUserName
self.__dict['FromUserName'] = fromUserName
self.__dict['CreateTime'] = int(time.time())
self.__dict['Content'] = content
def send(self):
XmlForm = """
<xml>
<ToUserName><![CDATA[{ToUserName}]]></ToUserName>
<FromUserName><![CDATA[{FromUserName}]]></FromUserName>
<CreateTime>{CreateTime}</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[{Content}]]></Content>
</xml>
"""
return XmlForm.format(**self.__dict)
class ImageMsg(Msg):
def __init__(self, toUserName, fromUserName, mediaId):
self.__dict = dict()
self.__dict['ToUserName'] = toUserName
self.__dict['FromUserName'] = fromUserName
self.__dict['CreateTime'] = int(time.time())
self.__dict['MediaId'] = mediaId
def send(self):
XmlForm = """
<xml>
<ToUserName><![CDATA[{ToUserName}]]></ToUserName>
<FromUserName><![CDATA[{FromUserName}]]></FromUserName>
<CreateTime>{CreateTime}</CreateTime>
<MsgType><![CDATA[image]]></MsgType>
<Image>
<MediaId><![CDATA[{MediaId}]]></MediaId>
</Image>
</xml>
"""
return XmlForm.format(**self.__dict)
| gpl-3.0 |
OwnROM-Devices/OwnKernel-sprout | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
ArcherCraftStore/ArcherVMPeridot | Python/Lib/site-packages/setuptools/command/build_py.py | 207 | 8440 | import os
import sys
import fnmatch
import textwrap
from distutils.command.build_py import build_py as _build_py
from distutils.util import convert_path
from glob import glob
try:
from setuptools.lib2to3_ex import Mixin2to3
except ImportError:
class Mixin2to3:
def run_2to3(self, files, doctests=True):
"do nothing"
class build_py(_build_py, Mixin2to3):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
_build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = self.distribution.exclude_package_data or {}
if 'data_files' in self.__dict__: del self.__dict__['data_files']
self.__updated_files = []
self.__doctests_2to3 = []
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(_build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self, attr):
if attr=='data_files': # lazily compute data files
self.data_files = files = self._get_data_files()
return files
return _build_py.__getattr__(self,attr)
def build_module(self, module, module_file, package):
outfile, copied = _build_py.build_module(self, module, module_file, package)
if copied:
self.__updated_files.append(outfile)
return outfile, copied
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
data = []
for package in self.packages or ():
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = self.manifest_files.get(package, [])[:]
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
files.extend(glob(os.path.join(src_dir, convert_path(pattern))))
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
srcfile = os.path.abspath(srcfile)
if copied and srcfile in self.distribution.convert_2to3_doctests:
self.__doctests_2to3.append(outf)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
for path in ei_cmd.filelist.files:
d,f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d!=prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith('.py') and f==oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d],[]).append(path)
def get_data_files(self): pass # kludge 2.4 for lazy computation
if sys.version<"2.4": # Python 2.4 already has this code
def get_outputs(self, include_bytecode=1):
"""Return complete list of files copied to the build directory
This includes both '.py' files and data files, as well as '.pyc'
and '.pyo' files if 'include_bytecode' is true. (This method is
needed for the 'install_lib' command to do its job properly, and to
generate a correct installation manifest.)
"""
return _build_py.get_outputs(self, include_bytecode) + [
os.path.join(build_dir, filename)
for package, src_dir, build_dir,filenames in self.data_files
for filename in filenames
]
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = _build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg==package or pkg.startswith(package+'.'):
break
else:
return init_py
f = open(init_py,'rbU')
if 'declare_namespace'.encode() not in f.read():
from distutils import log
log.warn(
"WARNING: %s is a namespace package, but its __init__.py does\n"
"not declare_namespace(); setuptools 0.7 will REQUIRE this!\n"
'(See the setuptools manual under "Namespace Packages" for '
"details.)\n", package
)
f.close()
return init_py
def initialize_options(self):
self.packages_checked={}
_build_py.initialize_options(self)
def get_package_dir(self, package):
res = _build_py.get_package_dir(self, package)
if self.distribution.src_root is not None:
return os.path.join(self.distribution.src_root, res)
return res
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
globs = (self.exclude_package_data.get('', [])
+ self.exclude_package_data.get(package, []))
bad = []
for pattern in globs:
bad.extend(
fnmatch.filter(
files, os.path.join(src_dir, convert_path(pattern))
)
)
bad = dict.fromkeys(bad)
seen = {}
return [
f for f in files if f not in bad
and f not in seen and seen.setdefault(f,1) # ditch dupes
]
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
msg = textwrap.dedent("""
Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
""").lstrip() % path
raise DistutilsSetupError(msg)
| apache-2.0 |
Achuth17/scikit-learn | sklearn/linear_model/theil_sen.py | 42 | 14404 | # -*- coding: utf-8 -*-
"""
A Theil-Sen Estimator for Multiple Linear Regression Model
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
#
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import warnings
from itertools import combinations
import numpy as np
from scipy import linalg
from scipy.special import binom
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import check_array, check_random_state, ConvergenceWarning
from ..utils import check_consistent_length, _get_n_jobs
from ..utils.random import choice
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange as range
_EPSILON = np.finfo(np.double).eps
def _modified_weiszfeld_step(X, x_old):
"""Modified Weiszfeld step.
This function defines one iteration step in order to approximate the
spatial median (L1 median). It is a form of an iteratively re-weighted
least squares method.
Parameters
----------
X : array, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
x_old : array, shape = [n_features]
Current start vector.
Returns
-------
x_new : array, shape = [n_features]
New iteration step.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
diff = X - x_old
diff_norm = np.sqrt(np.sum(diff ** 2, axis=1))
mask = diff_norm >= _EPSILON
# x_old equals one of our samples
is_x_old_in_X = int(mask.sum() < X.shape[0])
diff = diff[mask]
diff_norm = diff_norm[mask][:, np.newaxis]
quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0))
if quotient_norm > _EPSILON: # to avoid division by zero
new_direction = (np.sum(X[mask, :] / diff_norm, axis=0)
/ np.sum(1 / diff_norm, axis=0))
else:
new_direction = 1.
quotient_norm = 1.
return (max(0., 1. - is_x_old_in_X / quotient_norm) * new_direction
+ min(1., is_x_old_in_X / quotient_norm) * x_old)
def _spatial_median(X, max_iter=300, tol=1.e-3):
"""Spatial median (L1 median).
The spatial median is member of a class of so-called M-estimators which
are defined by an optimization problem. Given a number of p points in an
n-dimensional space, the point x minimizing the sum of all distances to the
p other points is called spatial median.
Parameters
----------
X : array, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
max_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if spatial_median has converged. Default is 1.e-3.
Returns
-------
spatial_median : array, shape = [n_features]
Spatial median.
n_iter: int
Number of iterations needed.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
if X.shape[1] == 1:
return 1, np.median(X.ravel())
tol **= 2 # We are computing the tol on the squared norm
spatial_median_old = np.mean(X, axis=0)
for n_iter in range(max_iter):
spatial_median = _modified_weiszfeld_step(X, spatial_median_old)
if np.sum((spatial_median_old - spatial_median) ** 2) < tol:
break
else:
spatial_median_old = spatial_median
else:
warnings.warn("Maximum number of iterations {max_iter} reached in "
"spatial median for TheilSen regressor."
"".format(max_iter=max_iter), ConvergenceWarning)
return n_iter, spatial_median
def _breakdown_point(n_samples, n_subsamples):
"""Approximation of the breakdown point.
Parameters
----------
n_samples : int
Number of samples.
n_subsamples : int
Number of subsamples to consider.
Returns
-------
breakdown_point : float
Approximation of breakdown point.
"""
return 1 - (0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1) +
n_subsamples - 1) / n_samples
def _lstsq(X, y, indices, fit_intercept):
"""Least Squares Estimator for TheilSenRegressor class.
This function calculates the least squares method on a subset of rows of X
and y defined by the indices array. Optionally, an intercept column is
added if intercept is set to true.
Parameters
----------
X : array, shape = [n_samples, n_features]
Design matrix, where n_samples is the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector, where n_samples is the number of samples.
indices : array, shape = [n_subpopulation, n_subsamples]
Indices of all subsamples with respect to the chosen subpopulation.
fit_intercept : bool
Fit intercept or not.
Returns
-------
weights : array, shape = [n_subpopulation, n_features + intercept]
Solution matrix of n_subpopulation solved least square problems.
"""
fit_intercept = int(fit_intercept)
n_features = X.shape[1] + fit_intercept
n_subsamples = indices.shape[1]
weights = np.empty((indices.shape[0], n_features))
X_subpopulation = np.ones((n_subsamples, n_features))
# gelss need to pad y_subpopulation to be of the max dim of X_subpopulation
y_subpopulation = np.zeros((max(n_subsamples, n_features)))
lstsq, = get_lapack_funcs(('gelss',), (X_subpopulation, y_subpopulation))
for index, subset in enumerate(indices):
X_subpopulation[:, fit_intercept:] = X[subset, :]
y_subpopulation[:n_subsamples] = y[subset]
weights[index] = lstsq(X_subpopulation,
y_subpopulation)[1][:n_features]
return weights
class TheilSenRegressor(LinearModel, RegressorMixin):
"""Theil-Sen Estimator: robust multivariate regression model.
The algorithm calculates least square solutions on subsets with size
n_subsamples of the samples in X. Any value of n_subsamples between the
number of features and samples leads to an estimator with a compromise
between robustness and efficiency. Since the number of least square
solutions is "n_samples choose n_subsamples", it can be extremely large
and can therefore be limited with max_subpopulation. If this limit is
reached, the subsets are chosen randomly. In a final step, the spatial
median (or L1 median) is calculated of all least square solutions.
Read more in the :ref:`User Guide <theil_sen_regression>`.
Parameters
----------
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_subpopulation : int, optional, default 1e4
Instead of computing with a set of cardinality 'n choose k', where n is
the number of samples and k is the number of subsamples (at least
number of features), consider only a stochastic subpopulation of a
given maximal size if 'n choose k' is larger than max_subpopulation.
For other than small problem sizes this parameter will determine
memory usage and runtime if n_subsamples is not changed.
n_subsamples : int, optional, default None
Number of samples to calculate the parameters. This is at least the
number of features (plus 1 if fit_intercept=True) and the number of
samples as a maximum. A lower number leads to a higher breakdown
point and a low efficiency while a high number leads to a low
breakdown point and a high efficiency. If None, take the
minimum number of subsamples leading to maximal robustness.
If n_subsamples is set to n_samples, Theil-Sen is identical to least
squares.
max_iter : int, optional, default 300
Maximum number of iterations for the calculation of spatial median.
tol : float, optional, default 1.e-3
Tolerance when calculating spatial median.
random_state : RandomState or an int seed, optional, default None
A random number generator instance to define the state of the
random permutations generator.
n_jobs : integer, optional, default 1
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (median of distribution).
intercept_ : float
Estimated intercept of regression model.
breakdown_ : float
Approximated breakdown point.
n_iter_ : int
Number of iterations needed for the spatial median.
n_subpopulation_ : int
Number of combinations taken into account from 'n choose k', where n is
the number of samples and k is the number of subsamples.
References
----------
- Theil-Sen Estimators in a Multiple Linear Regression Model, 2009
Xin Dang, Hanxiang Peng, Xueqin Wang and Heping Zhang
http://www.math.iupui.edu/~hpeng/MTSE_0908.pdf
"""
def __init__(self, fit_intercept=True, copy_X=True,
max_subpopulation=1e4, n_subsamples=None, max_iter=300,
tol=1.e-3, random_state=None, n_jobs=1, verbose=False):
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.max_subpopulation = int(max_subpopulation)
self.n_subsamples = n_subsamples
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.verbose = verbose
def _check_subparams(self, n_samples, n_features):
n_subsamples = self.n_subsamples
if self.fit_intercept:
n_dim = n_features + 1
else:
n_dim = n_features
if n_subsamples is not None:
if n_subsamples > n_samples:
raise ValueError("Invalid parameter since n_subsamples > "
"n_samples ({0} > {1}).".format(n_subsamples,
n_samples))
if n_samples >= n_features:
if n_dim > n_subsamples:
plus_1 = "+1" if self.fit_intercept else ""
raise ValueError("Invalid parameter since n_features{0} "
"> n_subsamples ({1} > {2})."
"".format(plus_1, n_dim, n_samples))
else: # if n_samples < n_features
if n_subsamples != n_samples:
raise ValueError("Invalid parameter since n_subsamples != "
"n_samples ({0} != {1}) while n_samples "
"< n_features.".format(n_subsamples,
n_samples))
else:
n_subsamples = min(n_dim, n_samples)
if self.max_subpopulation <= 0:
raise ValueError("Subpopulation must be strictly positive "
"({0} <= 0).".format(self.max_subpopulation))
all_combinations = max(1, np.rint(binom(n_samples, n_subsamples)))
n_subpopulation = int(min(self.max_subpopulation, all_combinations))
return n_subsamples, n_subpopulation
def fit(self, X, y):
"""Fit linear model.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
n_subsamples, self.n_subpopulation_ = self._check_subparams(n_samples,
n_features)
self.breakdown_ = _breakdown_point(n_samples, n_subsamples)
if self.verbose:
print("Breakdown point: {0}".format(self.breakdown_))
print("Number of samples: {0}".format(n_samples))
tol_outliers = int(self.breakdown_ * n_samples)
print("Tolerable outliers: {0}".format(tol_outliers))
print("Number of subpopulations: {0}".format(
self.n_subpopulation_))
# Determine indices of subpopulation
if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation:
indices = list(combinations(range(n_samples), n_subsamples))
else:
indices = [choice(n_samples,
size=n_subsamples,
replace=False,
random_state=random_state)
for _ in range(self.n_subpopulation_)]
n_jobs = _get_n_jobs(self.n_jobs)
index_list = np.array_split(indices, n_jobs)
weights = Parallel(n_jobs=n_jobs,
verbose=self.verbose)(
delayed(_lstsq)(X, y, index_list[job], self.fit_intercept)
for job in range(n_jobs))
weights = np.vstack(weights)
self.n_iter_, coefs = _spatial_median(weights,
max_iter=self.max_iter,
tol=self.tol)
if self.fit_intercept:
self.intercept_ = coefs[0]
self.coef_ = coefs[1:]
else:
self.intercept_ = 0.
self.coef_ = coefs
return self
| bsd-3-clause |
fullfanta/mxnet | example/rnn/word_lm/train.py | 13 | 5649 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import mxnet as mx, math
import argparse, math
import logging
from data import Corpus, CorpusIter
from model import *
from module import *
from mxnet.model import BatchEndParam
parser = argparse.ArgumentParser(description='PennTreeBank LSTM Language Model')
parser.add_argument('--data', type=str, default='./data/ptb.',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=650,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=650,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
parser.add_argument('--lr', type=float, default=1.0,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.2,
help='gradient clipping by global norm')
parser.add_argument('--epochs', type=int, default=40,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=32,
help='batch size')
parser.add_argument('--dropout', type=float, default=0.5,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--log-interval', type=int, default=200,
help='report interval')
parser.add_argument('--seed', type=int, default=3,
help='random seed')
args = parser.parse_args()
best_loss = 9999
def evaluate(valid_module, data_iter, epoch, mode, bptt, batch_size):
total_loss = 0.0
nbatch = 0
for batch in data_iter:
valid_module.forward(batch, is_train=False)
outputs = valid_module.get_loss()
total_loss += mx.nd.sum(outputs[0]).asscalar()
nbatch += 1
data_iter.reset()
loss = total_loss / bptt / batch_size / nbatch
logging.info('Iter[%d] %s loss:\t%.7f, Perplexity: %.7f' % \
(epoch, mode, loss, math.exp(loss)))
return loss
if __name__ == '__main__':
# args
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
args = parser.parse_args()
logging.info(args)
ctx = mx.gpu()
batch_size = args.batch_size
bptt = args.bptt
mx.random.seed(args.seed)
# data
corpus = Corpus(args.data)
ntokens = len(corpus.dictionary)
train_data = CorpusIter(corpus.train, batch_size, bptt)
valid_data = CorpusIter(corpus.valid, batch_size, bptt)
test_data = CorpusIter(corpus.test, batch_size, bptt)
# model
pred, states, state_names = rnn(bptt, ntokens, args.emsize, args.nhid,
args.nlayers, args.dropout, batch_size, args.tied)
loss = softmax_ce_loss(pred)
# module
module = CustomStatefulModule(loss, states, state_names=state_names, context=ctx)
module.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label)
module.init_params(initializer=mx.init.Xavier())
optimizer = mx.optimizer.create('sgd', learning_rate=args.lr, rescale_grad=1.0/batch_size)
module.init_optimizer(optimizer=optimizer)
# metric
speedometer = mx.callback.Speedometer(batch_size, args.log_interval)
# train
logging.info("Training started ... ")
for epoch in range(args.epochs):
# train
total_loss = 0.0
nbatch = 0
for batch in train_data:
module.forward(batch)
module.backward()
module.update(max_norm=args.clip * bptt * batch_size)
# update metric
outputs = module.get_loss()
total_loss += mx.nd.sum(outputs[0]).asscalar()
speedometer_param = BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=None, locals=locals())
speedometer(speedometer_param)
if nbatch % args.log_interval == 0 and nbatch > 0:
cur_loss = total_loss / bptt / batch_size / args.log_interval
logging.info('Iter[%d] Batch [%d]\tLoss: %.7f,\tPerplexity:\t%.7f' % \
(epoch, nbatch, cur_loss, math.exp(cur_loss)))
total_loss = 0.0
nbatch += 1
# validation
valid_loss = evaluate(module, valid_data, epoch, 'Valid', bptt, batch_size)
if valid_loss < best_loss:
best_loss = valid_loss
# test
test_loss = evaluate(module, test_data, epoch, 'Test', bptt, batch_size)
else:
optimizer.lr *= 0.25
train_data.reset()
logging.info("Training completed. ")
| apache-2.0 |
ShownX/incubator-mxnet | python/mxnet/gluon/data/sampler.py | 26 | 4273 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Dataset sampler."""
__all__ = ['Sampler', 'SequentialSampler', 'RandomSampler', 'BatchSampler']
import random
class Sampler(object):
"""Base class for samplers.
All samplers should subclass `Sampler` and define `__iter__` and `__len__`
methods.
"""
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class SequentialSampler(Sampler):
"""Samples elements from [0, length) sequentially.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, length):
self._length = length
def __iter__(self):
return iter(range(self._length))
def __len__(self):
return self._length
class RandomSampler(Sampler):
"""Samples elements from [0, length) randomly without replacement.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, length):
self._length = length
def __iter__(self):
indices = list(range(self._length))
random.shuffle(indices)
return iter(indices)
def __len__(self):
return self._length
class BatchSampler(Sampler):
"""Wraps over another `Sampler` and return mini-batches of samples.
Parameters
----------
sampler : Sampler
The source Sampler.
batch_size : int
Size of mini-batch.
last_batch : {'keep', 'discard', 'rollover'}
Specifies how the last batch is handled if batch_size does not evenly
divide sequence length.
If 'keep', the last batch will be returned directly, but will contain
less element than `batch_size` requires.
If 'discard', the last batch will be discarded.
If 'rollover', the remaining elements will be rolled over to the next
iteration.
Examples
--------
>>> sampler = gluon.data.SequentialSampler(10)
>>> batch_sampler = gluon.data.BatchSampler(sampler, 3, 'keep')
>>> list(batch_sampler)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
def __init__(self, sampler, batch_size, last_batch='keep'):
self._sampler = sampler
self._batch_size = batch_size
self._last_batch = last_batch
self._prev = []
def __iter__(self):
batch, self._prev = self._prev, []
for i in self._sampler:
batch.append(i)
if len(batch) == self._batch_size:
yield batch
batch = []
if batch:
if self._last_batch == 'keep':
yield batch
elif self._last_batch == 'discard':
return
elif self._last_batch == 'rollover':
self._prev = batch
else:
raise ValueError(
"last_batch must be one of 'keep', 'discard', or 'rollover', " \
"but got %s"%self._last_batch)
def __len__(self):
if self._last_batch == 'keep':
return (len(self._sampler) + self._batch_size - 1) // self._batch_size
if self._last_batch == 'discard':
return len(self._sampler) // self._batch_size
if self._last_batch == 'rollover':
return (len(self._prev) + len(self._sampler)) // self._batch_size
raise ValueError(
"last_batch must be one of 'keep', 'discard', or 'rollover', " \
"but got %s"%self._last_batch)
| apache-2.0 |
skonto/spark | python/pyspark/serializers.py | 9 | 21830 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses :class:`PickleSerializer` to serialize objects using Python's
`cPickle` serializer, which can serialize nearly any Python object.
Other serializers, like :class:`MarshalSerializer`, support fewer datatypes but can be
faster.
The serializer is chosen when creating :class:`SparkContext`:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serializes objects in batches; by default, the batch size is chosen based
on the size of objects and is also configurable by SparkContext's `batchSize`
parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
from itertools import izip as zip, imap as map
else:
import pickle
basestring = unicode = str
xrange = range
pickle_protocol = pickle.HIGHEST_PROTOCOL
from pyspark import cloudpickle
from pyspark.util import _exception_message, print_exec
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
If the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where `length` is a 32-bit integer and data is `length` bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hack namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with the new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple.
# Those created in other modules can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, pickle_protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
try:
return cloudpickle.dumps(obj, pickle_protocol)
except pickle.PickleError:
raise
except Exception as e:
emsg = _exception_message(e)
if "'i' format requires" in emsg:
msg = "Object too large to serialize: %s" % emsg
else:
msg = "Could not serialize object: %s: %s" % (e.__class__.__name__, emsg)
print_exec(sys.stderr)
raise pickle.PicklingError(msg)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid serialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def read_bool(stream):
length = stream.read(1)
if not length:
raise EOFError
return struct.unpack("!?", length)[0]
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
class ChunkedStream(object):
"""
This is a file-like object takes a stream of data, of unknown length, and breaks it into fixed
length frames. The intended use case is serializing large data and sending it immediately over
a socket -- we do not want to buffer the entire data before sending it, but the receiving end
needs to know whether or not there is more data coming.
It works by buffering the incoming data in some fixed-size chunks. If the buffer is full, it
first sends the buffer size, then the data. This repeats as long as there is more data to send.
When this is closed, it sends the length of whatever data is in the buffer, then that data, and
finally a "length" of -1 to indicate the stream has completed.
"""
def __init__(self, wrapped, buffer_size):
self.buffer_size = buffer_size
self.buffer = bytearray(buffer_size)
self.current_pos = 0
self.wrapped = wrapped
def write(self, bytes):
byte_pos = 0
byte_remaining = len(bytes)
while byte_remaining > 0:
new_pos = byte_remaining + self.current_pos
if new_pos < self.buffer_size:
# just put it in our buffer
self.buffer[self.current_pos:new_pos] = bytes[byte_pos:]
self.current_pos = new_pos
byte_remaining = 0
else:
# fill the buffer, send the length then the contents, and start filling again
space_left = self.buffer_size - self.current_pos
new_byte_pos = byte_pos + space_left
self.buffer[self.current_pos:self.buffer_size] = bytes[byte_pos:new_byte_pos]
write_int(self.buffer_size, self.wrapped)
self.wrapped.write(self.buffer)
byte_remaining -= space_left
byte_pos = new_byte_pos
self.current_pos = 0
def close(self):
# if there is anything left in the buffer, write it out first
if self.current_pos > 0:
write_int(self.current_pos, self.wrapped)
self.wrapped.write(self.buffer[:self.current_pos])
# -1 length indicates to the receiving end that we're done.
write_int(-1, self.wrapped)
self.wrapped.close()
@property
def closed(self):
"""
Return True if the `wrapped` object has been closed.
NOTE: this property is required by pyarrow to be used as a file-like object in
pyarrow.RecordBatchStreamWriter from ArrowStreamSerializer
"""
return self.wrapped.closed
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
| apache-2.0 |
civisanalytics/ansible | lib/ansible/modules/web_infrastructure/apache2_mod_proxy.py | 22 | 17512 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Olivier Boukili <boukili.olivier@gmail.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: apache2_mod_proxy
version_added: "2.2"
short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
description:
- Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
status page has to be enabled and accessible, as this module relies on parsing
this page. This module supports ansible check_mode, and requires BeautifulSoup
python module.
options:
balancer_url_suffix:
default: /balancer-manager/
description:
- Suffix of the balancer pool url required to access the balancer pool
status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
required: false
balancer_vhost:
default: None
description:
- (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
required: true
member_host:
default: None
description:
- (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
Port number is autodetected and should not be specified here.
If undefined, apache2_mod_proxy module will return a members list of
dictionaries of all the current balancer pool members' attributes.
required: false
state:
default: None
description:
- Desired state of the member host.
(absent|disabled),drained,hot_standby,ignore_errors can be
simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
required: false
choices: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]
tls:
default: false
description:
- Use https to access balancer management page.
choices: ["true", "false"]
validate_certs:
default: true
description:
- Validate ssl/tls certificates.
choices: ["true", "false"]
'''
EXAMPLES = '''
# Get all current balancer pool members' attributes:
- apache2_mod_proxy:
balancer_vhost: 10.0.0.2
# Get a specific member's attributes:
- apache2_mod_proxy:
balancer_vhost: myws.mydomain.org
balancer_suffix: /lb/
member_host: node1.myws.mydomain.org
# Enable all balancer pool members:
- apache2_mod_proxy:
balancer_vhost: '{{ myloadbalancer_host }}'
register: result
- apache2_mod_proxy:
balancer_vhost: '{{ myloadbalancer_host }}'
member_host: '{{ item.host }}'
state: present
with_items: '{{ result.members }}'
# Gracefully disable a member from a loadbalancer node:
- apache2_mod_proxy:
balancer_vhost: '{{ vhost_host }}'
member_host: '{{ member.host }}'
state: drained
delegate_to: myloadbalancernode
- wait_for:
host: '{{ member.host }}'
port: '{{ member.port }}'
state: drained
delegate_to: myloadbalancernode
- apache2_mod_proxy:
balancer_vhost: '{{ vhost_host }}'
member_host: '{{ member.host }}'
state: absent
delegate_to: myloadbalancernode
'''
RETURN = '''
member:
description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
type: dict
returned: success
sample:
{"attributes":
{"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false
}
}
members:
description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
returned: success
type: list
sample:
[{"attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false
}
},
{"attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.21",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false}
}
]
'''
import re
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
HAS_BEAUTIFULSOUP = False
else:
HAS_BEAUTIFULSOUP = True
# balancer member attributes extraction regexp:
EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
# Apache2 server version extraction regexp:
APACHE_VERSION_EXPRESSION = r"Server Version: Apache/([\d.]+) \(([\w]+)\)"
def regexp_extraction(string, _regexp, groups=1):
""" Returns the capture group (default=1) specified in the regexp, applied to the string """
regexp_search = re.search(string=str(string), pattern=str(_regexp))
if regexp_search:
if regexp_search.group(groups) != '':
return str(regexp_search.group(groups))
return None
class BalancerMember(object):
""" Apache 2.4 mod_proxy LB balancer member.
attributes:
read-only:
host -> member host (string),
management_url -> member management url (string),
protocol -> member protocol (string)
port -> member port (string),
path -> member location (string),
balancer_url -> url of this member's parent balancer (string),
attributes -> whole member attributes (dictionary)
module -> ansible module instance (AnsibleModule object).
writable:
status -> status of the member (dictionary)
"""
def __init__(self, management_url, balancer_url, module):
self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
self.management_url = str(management_url)
self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
self.port = regexp_extraction(management_url, EXPRESSION, 5)
self.path = regexp_extraction(management_url, EXPRESSION, 6)
self.balancer_url = str(balancer_url)
self.module = module
def get_member_attributes(self):
""" Returns a dictionary of a balancer member's attributes."""
balancer_member_page = fetch_url(self.module, self.management_url)
try:
assert balancer_member_page[1]['status'] == 200
except AssertionError:
self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
else:
try:
soup = BeautifulSoup(balancer_member_page[0])
except TypeError:
self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup))
else:
subsoup = soup.findAll('table')[1].findAll('tr')
keys = subsoup[0].findAll('th')
for valuesset in subsoup[1::1]:
if re.search(pattern=self.host, string=str(valuesset)):
values = valuesset.findAll('td')
return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
def get_member_status(self):
""" Returns a dictionary of a balancer member's status attributes."""
status_mapping = {'disabled':'Dis',
'drained':'Drn',
'hot_standby':'Stby',
'ignore_errors':'Ign'}
status = {}
actual_status = str(self.attributes['Status'])
for mode in status_mapping.keys():
if re.search(pattern=status_mapping[mode], string=actual_status):
status[mode] = True
else:
status[mode] = False
return status
def set_member_status(self, values):
""" Sets a balancer member's status attributes amongst pre-mapped values."""
values_mapping = {'disabled':'&w_status_D',
'drained':'&w_status_N',
'hot_standby':'&w_status_H',
'ignore_errors':'&w_status_I'}
request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
for k in values_mapping.keys():
if values[str(k)]:
request_body = request_body + str(values_mapping[k]) + '=1'
else:
request_body = request_body + str(values_mapping[k]) + '=0'
response = fetch_url(self.module, self.management_url, data=str(request_body))
try:
assert response[1]['status'] == 200
except AssertionError:
self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
attributes = property(get_member_attributes)
status = property(get_member_status, set_member_status)
class Balancer(object):
""" Apache httpd 2.4 mod_proxy balancer object"""
def __init__(self, host, suffix, module, members=None, tls=False):
if tls:
self.base_url = str(str('https://') + str(host))
self.url = str(str('https://') + str(host) + str(suffix))
else:
self.base_url = str(str('http://') + str(host))
self.url = str(str('http://') + str(host) + str(suffix))
self.module = module
self.page = self.fetch_balancer_page()
if members is None:
self._members = []
def fetch_balancer_page(self):
""" Returns the balancer management html page as a string for later parsing."""
page = fetch_url(self.module, str(self.url))
try:
assert page[1]['status'] == 200
except AssertionError:
self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
else:
content = page[0].read()
apache_version = regexp_extraction(content, APACHE_VERSION_EXPRESSION, 1)
if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
return content
def get_balancer_members(self):
""" Returns members of the balancer as a generator object for later iteration."""
try:
soup = BeautifulSoup(self.page)
except TypeError:
self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
else:
for element in soup.findAll('a')[1::1]:
balancer_member_suffix = str(element.get('href'))
try:
assert balancer_member_suffix is not ''
except AssertionError:
self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
else:
yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
members = property(get_balancer_members)
def main():
""" Initiates module."""
module = AnsibleModule(
argument_spec=dict(
balancer_vhost=dict(required=True, default=None, type='str'),
balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
member_host=dict(type='str'),
state=dict(type='str'),
tls=dict(default=False, type='bool'),
validate_certs=dict(default=True, type='bool')
),
supports_check_mode=True
)
if HAS_BEAUTIFULSOUP is False:
module.fail_json(msg="python module 'BeautifulSoup' is required!")
if module.params['state'] is not None:
states = module.params['state'].split(',')
if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
else:
for _state in states:
if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
module.fail_json(msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'.")
else:
states = ['None']
mybalancer = Balancer(module.params['balancer_vhost'],
module.params['balancer_url_suffix'],
module=module,
tls=module.params['tls'])
if module.params['member_host'] is None:
json_output_list = []
for member in mybalancer.members:
json_output_list.append({
"host": member.host,
"status": member.status,
"protocol": member.protocol,
"port": member.port,
"path": member.path,
"attributes": member.attributes,
"management_url": member.management_url,
"balancer_url": member.balancer_url
})
module.exit_json(
changed=False,
members=json_output_list
)
else:
changed = False
member_exists = False
member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors':False}
for mode in member_status.keys():
for state in states:
if mode == state:
member_status[mode] = True
elif mode == 'disabled' and state == 'absent':
member_status[mode] = True
for member in mybalancer.members:
if str(member.host) == str(module.params['member_host']):
member_exists = True
if module.params['state'] is not None:
member_status_before = member.status
if not module.check_mode:
member_status_after = member.status = member_status
else:
member_status_after = member_status
if member_status_before != member_status_after:
changed = True
json_output = {
"host": member.host,
"status": member.status,
"protocol": member.protocol,
"port": member.port,
"path": member.path,
"attributes": member.attributes,
"management_url": member.management_url,
"balancer_url": member.balancer_url
}
if member_exists:
module.exit_json(
changed=changed,
member=json_output
)
else:
module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
if __name__ == '__main__':
main()
| gpl-3.0 |
olemis/sqlalchemy | lib/sqlalchemy/testing/plugin/noseplugin.py | 58 | 2847 | # plugin/noseplugin.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Enhance nose with extra options and behaviors for running SQLAlchemy tests.
Must be run via ./sqla_nose.py so that it is imported in the expected
way (e.g. as a package-less import).
"""
try:
# installed by bootstrap.py
import sqla_plugin_base as plugin_base
except ImportError:
# assume we're a package, use traditional import
from . import plugin_base
import os
import sys
from nose.plugins import Plugin
import nose
fixtures = None
py3k = sys.version_info >= (3, 0)
class NoseSQLAlchemy(Plugin):
enabled = True
name = 'sqla_testing'
score = 100
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
opt = parser.add_option
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
def wrap_(option, opt_str, value, parser):
callback_(opt_str, value, parser)
kw["callback"] = wrap_
opt(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def configure(self, options, conf):
super(NoseSQLAlchemy, self).configure(options, conf)
plugin_base.pre_begin(options)
plugin_base.set_coverage_flag(options.enable_plugin_coverage)
plugin_base.set_skip_test(nose.SkipTest)
def begin(self):
global fixtures
from sqlalchemy.testing import fixtures # noqa
plugin_base.post_begin()
def describeTest(self, test):
return ""
def wantFunction(self, fn):
return False
def wantMethod(self, fn):
if py3k:
if not hasattr(fn.__self__, 'cls'):
return False
cls = fn.__self__.cls
else:
cls = fn.im_class
return plugin_base.want_method(cls, fn)
def wantClass(self, cls):
return plugin_base.want_class(cls)
def beforeTest(self, test):
if not hasattr(test.test, 'cls'):
return
plugin_base.before_test(
test,
test.test.cls.__module__,
test.test.cls, test.test.method.__name__)
def afterTest(self, test):
plugin_base.after_test(test)
def startContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.start_test_class(ctx)
def stopContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.stop_test_class(ctx)
| mit |
hazrpg/calibre | src/calibre/gui2/preferences/tweaks.py | 14 | 16616 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from functools import partial
import textwrap
from collections import OrderedDict
from calibre.gui2.preferences import ConfigWidgetBase, test_widget, AbortCommit
from calibre.gui2.preferences.tweaks_ui import Ui_Form
from calibre.gui2 import error_dialog, info_dialog
from calibre.utils.config import read_raw_tweaks, write_tweaks
from calibre.gui2.widgets import PythonHighlighter
from calibre import isbytestring
from calibre.utils.icu import lower
from calibre.utils.search_query_parser import (ParseException,
SearchQueryParser)
from PyQt5.Qt import (QAbstractListModel, Qt, QStyledItemDelegate, QStyle,
QStyleOptionViewItem, QFont, QDialogButtonBox, QDialog, QApplication,
QVBoxLayout, QPlainTextEdit, QLabel, QModelIndex, QMenu, QIcon)
ROOT = QModelIndex()
class AdaptSQP(SearchQueryParser):
def __init__(self, *args, **kwargs):
pass
class Delegate(QStyledItemDelegate): # {{{
def __init__(self, view):
QStyledItemDelegate.__init__(self, view)
self.view = view
def paint(self, p, opt, idx):
copy = QStyleOptionViewItem(opt)
copy.showDecorationSelected = True
if self.view.currentIndex() == idx:
copy.state |= QStyle.State_HasFocus
QStyledItemDelegate.paint(self, p, copy, idx)
# }}}
class Tweak(object): # {{{
def __init__(self, name, doc, var_names, defaults, custom):
translate = _
self.name = translate(name)
self.doc = doc.strip()
if self.doc:
self.doc = translate(self.doc)
self.var_names = var_names
if self.var_names:
self.doc = u"%s: %s\n\n%s"%(_('ID'), self.var_names[0], self.doc)
self.default_values = OrderedDict()
for x in var_names:
self.default_values[x] = defaults[x]
self.custom_values = OrderedDict()
for x in var_names:
if x in custom:
self.custom_values[x] = custom[x]
def __str__(self):
ans = ['#: ' + self.name]
for line in self.doc.splitlines():
if line:
ans.append('# ' + line)
for key, val in self.default_values.iteritems():
val = self.custom_values.get(key, val)
ans.append('%s = %r'%(key, val))
ans = '\n'.join(ans)
if isinstance(ans, unicode):
ans = ans.encode('utf-8')
return ans
def __cmp__(self, other):
return -1 * cmp(self.is_customized,
getattr(other, 'is_customized', False))
@property
def is_customized(self):
for x, val in self.default_values.iteritems():
if self.custom_values.get(x, val) != val:
return True
return False
@property
def edit_text(self):
ans = ['# %s'%self.name]
for x, val in self.default_values.iteritems():
val = self.custom_values.get(x, val)
ans.append('%s = %r'%(x, val))
return '\n\n'.join(ans)
def restore_to_default(self):
self.custom_values.clear()
def update(self, varmap):
self.custom_values.update(varmap)
# }}}
class Tweaks(QAbstractListModel, AdaptSQP): # {{{
def __init__(self, parent=None):
QAbstractListModel.__init__(self, parent)
SearchQueryParser.__init__(self, ['all'])
raw_defaults, raw_custom = read_raw_tweaks()
self.parse_tweaks(raw_defaults, raw_custom)
def rowCount(self, *args):
return len(self.tweaks)
def data(self, index, role):
row = index.row()
try:
tweak = self.tweaks[row]
except:
return None
if role == Qt.DisplayRole:
return textwrap.fill(tweak.name, 40)
if role == Qt.FontRole and tweak.is_customized:
ans = QFont()
ans.setBold(True)
return ans
if role == Qt.ToolTipRole:
tt = _('This tweak has it default value')
if tweak.is_customized:
tt = '<p>'+_('This tweak has been customized')
tt += '<pre>'
for varn, val in tweak.custom_values.iteritems():
tt += '%s = %r\n\n'%(varn, val)
return textwrap.fill(tt)
if role == Qt.UserRole:
return tweak
return None
def parse_tweaks(self, defaults, custom):
l, g = {}, {}
try:
exec(custom, g, l)
except:
print 'Failed to load custom tweaks file'
import traceback
traceback.print_exc()
dl, dg = {}, {}
exec(defaults, dg, dl)
lines = defaults.splitlines()
pos = 0
self.tweaks = []
while pos < len(lines):
line = lines[pos]
if line.startswith('#:'):
pos = self.read_tweak(lines, pos, dl, l)
pos += 1
self.tweaks.sort()
default_keys = set(dl.iterkeys())
custom_keys = set(l.iterkeys())
self.plugin_tweaks = {}
for key in custom_keys - default_keys:
self.plugin_tweaks[key] = l[key]
def read_tweak(self, lines, pos, defaults, custom):
name = lines[pos][2:].strip()
doc, var_names = [], []
while True:
pos += 1
line = lines[pos]
if not line.startswith('#'):
break
doc.append(line[1:].strip())
doc = '\n'.join(doc)
while True:
try:
line = lines[pos]
except IndexError:
break
if not line.strip():
break
spidx1 = line.find(' ')
spidx2 = line.find('=')
spidx = spidx1 if spidx1 > 0 and (spidx2 == 0 or spidx2 > spidx1) else spidx2
if spidx > 0:
var = line[:spidx]
if var not in defaults:
raise ValueError('%r not in default tweaks dict'%var)
var_names.append(var)
pos += 1
if not var_names:
raise ValueError('Failed to find any variables for %r'%name)
self.tweaks.append(Tweak(name, doc, var_names, defaults, custom))
# print '\n\n', self.tweaks[-1]
return pos
def restore_to_default(self, idx):
tweak = self.data(idx, Qt.UserRole)
if tweak is not None:
tweak.restore_to_default()
self.dataChanged.emit(idx, idx)
def restore_to_defaults(self):
for r in range(self.rowCount()):
self.restore_to_default(self.index(r))
self.plugin_tweaks = {}
def update_tweak(self, idx, varmap):
tweak = self.data(idx, Qt.UserRole)
if tweak is not None:
tweak.update(varmap)
self.dataChanged.emit(idx, idx)
def to_string(self):
ans = ['#!/usr/bin/env python',
'# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai', '',
'# This file was automatically generated by calibre, do not'
' edit it unless you know what you are doing.', '',
]
for tweak in self.tweaks:
ans.extend(['', str(tweak), ''])
if self.plugin_tweaks:
ans.extend(['', '',
'# The following are tweaks for installed plugins', ''])
for key, val in self.plugin_tweaks.iteritems():
ans.extend(['%s = %r'%(key, val), '', ''])
return '\n'.join(ans)
@property
def plugin_tweaks_string(self):
ans = []
for key, val in self.plugin_tweaks.iteritems():
ans.extend(['%s = %r'%(key, val), '', ''])
ans = '\n'.join(ans)
if isbytestring(ans):
ans = ans.decode('utf-8')
return ans
def set_plugin_tweaks(self, d):
self.plugin_tweaks = d
def universal_set(self):
return set(xrange(self.rowCount()))
def get_matches(self, location, query, candidates=None):
if candidates is None:
candidates = self.universal_set()
ans = set()
if not query:
return ans
query = lower(query)
for r in candidates:
dat = self.data(self.index(r), Qt.UserRole)
var_names = u' '.join(dat.default_values)
if query in lower(dat.name) or query in lower(var_names):
ans.add(r)
return ans
def find(self, query):
query = query.strip()
if not query:
return ROOT
matches = self.parse(query)
if not matches:
return ROOT
matches = list(sorted(matches))
return self.index(matches[0])
def find_next(self, idx, query, backwards=False):
query = query.strip()
if not query:
return idx
matches = self.parse(query)
if not matches:
return idx
loc = idx.row()
if loc not in matches:
return self.find(query)
if len(matches) == 1:
return ROOT
matches = list(sorted(matches))
i = matches.index(loc)
if backwards:
ans = i - 1 if i - 1 >= 0 else len(matches)-1
else:
ans = i + 1 if i + 1 < len(matches) else 0
ans = matches[ans]
return self.index(ans)
# }}}
class PluginTweaks(QDialog): # {{{
def __init__(self, raw, parent=None):
QDialog.__init__(self, parent)
self.setWindowTitle(_('Plugin tweaks'))
self.edit = QPlainTextEdit(self)
self.highlighter = PythonHighlighter(self.edit.document())
self.l = QVBoxLayout()
self.setLayout(self.l)
self.msg = QLabel(
_('Add/edit tweaks for any custom plugins you have installed. '
'Documentation for these tweaks should be available '
'on the website from where you downloaded the plugins.'))
self.msg.setWordWrap(True)
self.l.addWidget(self.msg)
self.l.addWidget(self.edit)
self.edit.setPlainText(raw)
self.bb = QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel,
Qt.Horizontal, self)
self.bb.accepted.connect(self.accept)
self.bb.rejected.connect(self.reject)
self.l.addWidget(self.bb)
self.resize(550, 300)
# }}}
class ConfigWidget(ConfigWidgetBase, Ui_Form):
def genesis(self, gui):
self.gui = gui
self.delegate = Delegate(self.tweaks_view)
self.tweaks_view.setItemDelegate(self.delegate)
self.tweaks_view.currentChanged = self.current_changed
self.view = self.tweaks_view
self.highlighter = PythonHighlighter(self.edit_tweak.document())
self.restore_default_button.clicked.connect(self.restore_to_default)
self.apply_button.clicked.connect(self.apply_tweak)
self.plugin_tweaks_button.clicked.connect(self.plugin_tweaks)
self.splitter.setStretchFactor(0, 1)
self.splitter.setStretchFactor(1, 100)
self.next_button.clicked.connect(self.find_next)
self.previous_button.clicked.connect(self.find_previous)
self.search.initialize('tweaks_search_history', help_text=_('Search for tweak'))
self.search.search.connect(self.find)
self.view.setContextMenuPolicy(Qt.CustomContextMenu)
self.view.customContextMenuRequested.connect(self.show_context_menu)
self.copy_icon = QIcon(I('edit-copy.png'))
def show_context_menu(self, point):
idx = self.tweaks_view.currentIndex()
if not idx.isValid():
return True
tweak = self.tweaks.data(idx, Qt.UserRole)
self.context_menu = QMenu(self)
self.context_menu.addAction(self.copy_icon,
_('Copy to clipboard'),
partial(self.copy_item_to_clipboard,
val=u"%s (%s: %s)"%(tweak.name,
_('ID'),
tweak.var_names[0])))
self.context_menu.popup(self.mapToGlobal(point))
return True
def copy_item_to_clipboard(self, val):
cb = QApplication.clipboard()
cb.clear()
cb.setText(val)
def plugin_tweaks(self):
raw = self.tweaks.plugin_tweaks_string
d = PluginTweaks(raw, self)
if d.exec_() == d.Accepted:
g, l = {}, {}
try:
exec(unicode(d.edit.toPlainText()), g, l)
except:
import traceback
return error_dialog(self, _('Failed'),
_('There was a syntax error in your tweak. Click '
'the show details button for details.'), show=True,
det_msg=traceback.format_exc())
self.tweaks.set_plugin_tweaks(l)
self.changed()
def current_changed(self, current, previous):
self.tweaks_view.scrollTo(current)
tweak = self.tweaks.data(current, Qt.UserRole)
self.help.setPlainText(tweak.doc)
self.edit_tweak.setPlainText(tweak.edit_text)
def changed(self, *args):
self.changed_signal.emit()
def initialize(self):
self.tweaks = self._model = Tweaks()
self.tweaks_view.setModel(self.tweaks)
def restore_to_default(self, *args):
idx = self.tweaks_view.currentIndex()
if idx.isValid():
self.tweaks.restore_to_default(idx)
tweak = self.tweaks.data(idx, Qt.UserRole)
self.edit_tweak.setPlainText(tweak.edit_text)
self.changed()
def restore_defaults(self):
ConfigWidgetBase.restore_defaults(self)
self.tweaks.restore_to_defaults()
self.changed()
def apply_tweak(self):
idx = self.tweaks_view.currentIndex()
if idx.isValid():
l, g = {}, {}
try:
exec(unicode(self.edit_tweak.toPlainText()), g, l)
except:
import traceback
error_dialog(self.gui, _('Failed'),
_('There was a syntax error in your tweak. Click '
'the show details button for details.'),
det_msg=traceback.format_exc(), show=True)
return
self.tweaks.update_tweak(idx, l)
self.changed()
def commit(self):
raw = self.tweaks.to_string()
try:
exec(raw)
except:
import traceback
error_dialog(self, _('Invalid tweaks'),
_('The tweaks you entered are invalid, try resetting the'
' tweaks to default and changing them one by one until'
' you find the invalid setting.'),
det_msg=traceback.format_exc(), show=True)
raise AbortCommit('abort')
write_tweaks(raw)
ConfigWidgetBase.commit(self)
return True
def find(self, query):
if not query:
return
try:
idx = self._model.find(query)
except ParseException:
self.search.search_done(False)
return
self.search.search_done(True)
if not idx.isValid():
info_dialog(self, _('No matches'),
_('Could not find any shortcuts matching %s')%query,
show=True, show_copy_button=False)
return
self.highlight_index(idx)
def highlight_index(self, idx):
if not idx.isValid():
return
self.view.scrollTo(idx)
self.view.selectionModel().select(idx,
self.view.selectionModel().ClearAndSelect)
self.view.setCurrentIndex(idx)
def find_next(self, *args):
idx = self.view.currentIndex()
if not idx.isValid():
idx = self._model.index(0)
idx = self._model.find_next(idx,
unicode(self.search.currentText()))
self.highlight_index(idx)
def find_previous(self, *args):
idx = self.view.currentIndex()
if not idx.isValid():
idx = self._model.index(0)
idx = self._model.find_next(idx,
unicode(self.search.currentText()), backwards=True)
self.highlight_index(idx)
if __name__ == '__main__':
app = QApplication([])
# Tweaks()
# test_widget
test_widget('Advanced', 'Tweaks')
| gpl-3.0 |
40223101/2015cda_0505 | static/Brython3.1.1-20150328-091302/Lib/ui/slider.py | 603 | 2394 | from . import widget
from browser import doc,html
class Slider(widget.Widget):
def __init__(self, id=None, label=False):
self._div_shell=html.DIV(Class="ui-slider ui-slider-horizontal ui-widget ui-widget-content ui-corner-all")
widget.Widget.__init__(self, self._div_shell, 'slider', id)
self._handle=html.A(Class="ui-slider-handle ui-state-default ui-corner-all",
Href='#', style={'left': '0px'})
self._value=0
self._isMouseDown=False
self.m0 = [None, None]
def startSlide(ev):
self._isMouseDown=True
self._upperBound = self._div_shell.offsetWidth - self._handle.offsetWidth
pos = widget.getMousePosition(ev)
self._startMouseX=pos['x']
print('left', self._handle.style.left,'ev.x',ev.x)
self._lastElementLeft = int(self._handle.left)
print('left', self._lastElementLeft)
updatePosition(ev)
def updatePosition(ev):
#pos = widget.getMousePosition(ev)
#print('mose pos',pos)
_newPos = self._lastElementLeft + ev.x - self._startMouseX
_newPos = max(0, _newPos)
_newPos = min(_newPos, self._upperBound)
self._handle.left = _newPos
print('new position',self._handle.style.left)
self._lastElementLeft = _newPos
def moving(e):
if self._isMouseDown:
updatePosition(e)
def dropCallback(e):
self._isMouseDown=False
self._handle.unbind('mousemove', moving)
self._handle.bind('mousemove', moving)
self._handle.bind('mouseup', dropCallback)
#self._handle.bind('mouseout', dropCallback)
self._handle.bind('mousedown', startSlide)
def mouseover(e):
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', '%s %s' % (_class, 'ui-state-hover'))
def mouseout(e):
self._isMouseDown=False
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', _class.replace('ui-state-hover', ''))
self._handle.bind('mouseover', mouseover)
self._handle.bind('mouseout', mouseout)
self._div_shell <= self._handle
def get_value(self):
return self._value
#def set_value(self, value):
# self._value=value
# self._handle.style.left='%spx' % value
| gpl-3.0 |
Arundhatii/erpnext | erpnext/patches/v7_2/arrear_leave_encashment_as_salary_component.py | 33 | 1140 | import frappe
def execute():
frappe.reload_doctype('Salary Slip', 'Salary Component')
salary_components = [['Arrear', "ARR"], ['Leave Encashment', 'LENC']]
for salary_component, salary_abbr in salary_components:
if not frappe.db.exists('Salary Component', salary_component):
sal_comp = frappe.get_doc({
"doctype": "Salary Component",
"salary_component": salary_component,
"type": "Earning",
"salary_component_abbr": salary_abbr
}).insert()
salary_slips = frappe.db.sql("""select name, arrear_amount, leave_encashment_amount from `tabSalary Slip`
where docstatus !=2 and (arrear_amount > 0 or leave_encashment_amount > 0)""", as_dict=True)
for salary_slip in salary_slips:
doc = frappe.get_doc('Salary Slip', salary_slip.name)
if salary_slip.get("arrear_amount") > 0:
r = doc.append('earnings', {
'salary_component': 'Arrear',
'amount': salary_slip.arrear_amount
})
r.db_update()
if salary_slip.get("leave_encashment_amount") > 0:
r = doc.append('earnings', {
'salary_component': 'Leave Encashment',
'amount': salary_slip.leave_encashment_amount
})
r.db_update() | gpl-3.0 |
s0enke/boto | boto/directconnect/__init__.py | 145 | 1679 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the AWS DirectConnect service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.directconnect.layer1 import DirectConnectConnection
return get_regions('directconnect', connection_cls=DirectConnectConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
ryancoleman/traveldistance | src/getMembranePdb.py | 1 | 1509 | #!/usr/bin/env python
#output a pdb file of just the residues between the membrane barrier
import string
import sys
import comparePaths
import os
import pdb
import geometry
def getJustMembranePdb(inputFileName, outputFileName):
pdbBarriers = pdb.pdbData(inputFileName)
#get the barriers read in and defined
barrierAtomList = [[], []]
for index, resName in enumerate(pdbBarriers.resNames):
if resName == "DUM":
if pdbBarriers.atoms[index][0] == "O":
barrierAtomList[0].append(pdbBarriers.coords[index])
elif pdbBarriers.atoms[index][0] == "N":
barrierAtomList[1].append(pdbBarriers.coords[index])
barrierZ = [barrierAtomList[0][0][2], barrierAtomList[1][0][2]]
barrierZ.sort()
barrierSep = geometry.distL2(barrierAtomList[0][0], barrierAtomList[1][0])
zCoord = barrierZ[1]
goodResChain = []
for index, thisResNum in enumerate(pdbBarriers.resNums):
chain = pdbBarriers.chains[index]
resChain = str(thisResNum) + str(chain)
if resChain not in goodResChain:
#otherwise don't need to check, already in
zTest = pdbBarriers.coords[index][2]
if abs(zTest) <= zCoord:
goodResChain.append(resChain)
newPdb = pdbBarriers.getListResiduesChains(goodResChain)
newPdb.write(outputFileName)
if -1 != string.find(sys.argv[0], "getMembranePdb"):
if len(sys.argv) >= 2:
for prefix in sys.argv[1:]:
getJustMembranePdb(prefix, "just_membrane_" + prefix)
else:
print "getMembranePdb.py file_planes.pdb [more pdbs]"
| gpl-2.0 |
Santhosh114/Hadoop-Fundamentals-for-Data-Scientists-Oreilly | recommender/framework.py | 17 | 1300 | import os
import sys
from itertools import groupby
from operator import itemgetter
SEPARATOR = "\t"
class Streaming(object):
@staticmethod
def get_job_conf(name):
name = name.replace(".", "_").upper()
return os.environ.get(name)
def __init__(self, infile=sys.stdin, separator=SEPARATOR):
self.infile = infile
self.sep = separator
def status(self, message):
sys.stderr.write("reporter:status:%s" % message)
def counter(self, counter, amount=1, group="Python Streaming"):
sys.stderr.write("reporter:counter:%s,%s,%i" % group, counter, amount)
def emit(self, key, value):
sys.stdout.write("%s%s%s\n" % (key, self.sep, value))
def read(self):
for line in self.infile:
yield line.rstrip()
def __iter__(self):
for line in self.read():
yield line
class Mapper(Streaming):
def map(self):
raise NotImplementedError("Mappers must implement a map method")
class Reducer(Streaming):
def reduce(self):
raise NotImplementedError("Reducers must implement a reduce method")
def __iter__(self):
generator = (line.split(self.sep, 1) for line in self.read())
for item in groupby(generator, itemgetter(0)):
yield item
| mit |
Tejal011089/med2-app | projects/doctype/time_log_batch/test_time_log_batch.py | 30 | 1221 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes, unittest
class TimeLogBatchTest(unittest.TestCase):
def test_time_log_status(self):
from projects.doctype.time_log.test_time_log import test_records as time_log_records
time_log = webnotes.bean(copy=time_log_records[0])
time_log.doc.fields.update({
"from_time": "2013-01-02 10:00:00",
"to_time": "2013-01-02 11:00:00",
"docstatus": 0
})
time_log.insert()
time_log.submit()
self.assertEquals(webnotes.conn.get_value("Time Log", time_log.doc.name, "status"), "Submitted")
tlb = webnotes.bean(copy=test_records[0])
tlb.doclist[1].time_log = time_log.doc.name
tlb.insert()
tlb.submit()
self.assertEquals(webnotes.conn.get_value("Time Log", time_log.doc.name, "status"), "Batched for Billing")
tlb.cancel()
self.assertEquals(webnotes.conn.get_value("Time Log", time_log.doc.name, "status"), "Submitted")
test_records = [[
{
"doctype": "Time Log Batch",
"rate": "500"
},
{
"doctype": "Time Log Batch Detail",
"parenttype": "Time Log Batch",
"parentfield": "time_log_batch_details",
"time_log": "_T-Time Log-00001",
}
]] | agpl-3.0 |
drakuna/odoo | openerp/addons/base/ir/ir_actions.py | 10 | 63271 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from functools import partial
import logging
import operator
import os
import time
import datetime
import dateutil
import openerp
from openerp import SUPERUSER_ID
from openerp import tools
from openerp import workflow
import openerp.api
from openerp.osv import fields, osv
from openerp.osv.orm import browse_record
import openerp.report.interface
from openerp.report.report_sxw import report_sxw, report_rml
from openerp.tools import ormcache
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import openerp.workflow
from openerp.exceptions import MissingError, UserError
_logger = logging.getLogger(__name__)
class actions(osv.osv):
_name = 'ir.actions.actions'
_table = 'ir_actions'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
'type': fields.char('Action Type', required=True),
'usage': fields.char('Action Usage'),
'xml_id': fields.function(osv.osv.get_external_id, type='char', string="External ID"),
'help': fields.html('Action description',
help='Optional help text for the users with a description of the target view, such as its usage and purpose.',
translate=True),
}
_defaults = {
'usage': lambda *a: False,
}
def create(self, cr, uid, vals, context=None):
res = super(actions, self).create(cr, uid, vals, context=context)
# ir_values.get_actions() depends on action records
self.pool['ir.values'].clear_caches()
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(actions, self).write(cr, uid, ids, vals, context=context)
# ir_values.get_actions() depends on action records
self.pool['ir.values'].clear_caches()
return res
def unlink(self, cr, uid, ids, context=None):
"""unlink ir.action.todo which are related to actions which will be deleted.
NOTE: ondelete cascade will not work on ir.actions.actions so we will need to do it manually."""
todo_obj = self.pool.get('ir.actions.todo')
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
todo_ids = todo_obj.search(cr, uid, [('action_id', 'in', ids)], context=context)
todo_obj.unlink(cr, uid, todo_ids, context=context)
res = super(actions, self).unlink(cr, uid, ids, context=context)
# ir_values.get_actions() depends on action records
self.pool['ir.values'].clear_caches()
return res
def _get_eval_context(self, cr, uid, action=None, context=None):
""" evaluation context to pass to safe_eval """
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return {
'uid': uid,
'user': user,
'time': time,
'datetime': datetime,
'dateutil': dateutil,
}
class ir_actions_report_xml(osv.osv):
def _report_content(self, cursor, user, ids, name, arg, context=None):
res = {}
for report in self.browse(cursor, user, ids, context=context):
data = report[name + '_data']
if not data and report[name[:-8]]:
fp = None
try:
fp = tools.file_open(report[name[:-8]], mode='rb')
data = fp.read()
except:
data = False
finally:
if fp:
fp.close()
res[report.id] = data
return res
def _report_content_inv(self, cursor, user, id, name, value, arg, context=None):
self.write(cursor, user, id, {name+'_data': value}, context=context)
def _report_sxw(self, cursor, user, ids, name, arg, context=None):
res = {}
for report in self.browse(cursor, user, ids, context=context):
if report.report_rml:
res[report.id] = report.report_rml.replace('.rml', '.sxw')
else:
res[report.id] = False
return res
def _lookup_report(self, cr, name):
"""
Look up a report definition.
"""
opj = os.path.join
# First lookup in the deprecated place, because if the report definition
# has not been updated, it is more likely the correct definition is there.
# Only reports with custom parser sepcified in Python are still there.
if 'report.' + name in openerp.report.interface.report_int._reports:
new_report = openerp.report.interface.report_int._reports['report.' + name]
else:
cr.execute("SELECT * FROM ir_act_report_xml WHERE report_name=%s", (name,))
r = cr.dictfetchone()
if r:
if r['report_type'] in ['qweb-pdf', 'qweb-html']:
return r['report_name']
elif r['report_rml'] or r['report_rml_content_data']:
if r['parser']:
kwargs = { 'parser': operator.attrgetter(r['parser'])(openerp.addons) }
else:
kwargs = {}
new_report = report_sxw('report.'+r['report_name'], r['model'],
opj('addons',r['report_rml'] or '/'), header=r['header'], register=False, **kwargs)
elif r['report_xsl'] and r['report_xml']:
new_report = report_rml('report.'+r['report_name'], r['model'],
opj('addons',r['report_xml']),
r['report_xsl'] and opj('addons',r['report_xsl']), register=False)
else:
raise Exception, "Unhandled report type: %s" % r
else:
raise Exception, "Required report does not exist: %s" % name
return new_report
def create_action(self, cr, uid, ids, context=None):
""" Create a contextual action for each of the report."""
for ir_actions_report_xml in self.browse(cr, uid, ids, context=context):
ir_values_id = self.pool['ir.values'].create(cr, SUPERUSER_ID, {
'name': ir_actions_report_xml.name,
'model': ir_actions_report_xml.model,
'key2': 'client_print_multi',
'value': "ir.actions.report.xml,%s" % ir_actions_report_xml.id,
}, context)
ir_actions_report_xml.write({
'ir_values_id': ir_values_id,
})
return True
def unlink_action(self, cr, uid, ids, context=None):
""" Remove the contextual actions created for the reports."""
self.check_access_rights(cr , uid, 'write', raise_exception=True)
for ir_actions_report_xml in self.browse(cr, uid, ids, context=context):
if ir_actions_report_xml.ir_values_id:
try:
self.pool['ir.values'].unlink(
cr, SUPERUSER_ID, ir_actions_report_xml.ir_values_id.id, context
)
except Exception:
raise UserError(_('Deletion of the action record failed.'))
return True
def render_report(self, cr, uid, res_ids, name, data, context=None):
"""
Look up a report definition and render the report for the provided IDs.
"""
new_report = self._lookup_report(cr, name)
if isinstance(new_report, (str, unicode)): # Qweb report
# The only case where a QWeb report is rendered with this method occurs when running
# yml tests originally written for RML reports.
if openerp.tools.config['test_enable'] and not tools.config['test_report_directory']:
# Only generate the pdf when a destination folder has been provided.
return self.pool['report'].get_html(cr, uid, res_ids, new_report, data=data, context=context), 'html'
else:
return self.pool['report'].get_pdf(cr, uid, res_ids, new_report, data=data, context=context), 'pdf'
else:
return new_report.create(cr, uid, res_ids, data, context)
_name = 'ir.actions.report.xml'
_inherit = 'ir.actions.actions'
_table = 'ir_act_report_xml'
_sequence = 'ir_actions_id_seq'
_order = 'name'
_columns = {
'type': fields.char('Action Type', required=True),
'name': fields.char('Name', required=True, translate=True),
'model': fields.char('Model', required=True),
'report_type': fields.selection([('qweb-pdf', 'PDF'),
('qweb-html', 'HTML'),
('controller', 'Controller'),
('pdf', 'RML pdf (deprecated)'),
('sxw', 'RML sxw (deprecated)'),
('webkit', 'Webkit (deprecated)'),
], 'Report Type', required=True, help="HTML will open the report directly in your browser, PDF will use wkhtmltopdf to render the HTML into a PDF file and let you download it, Controller allows you to define the url of a custom controller outputting any kind of report."),
'report_name': fields.char('Template Name', required=True, help="For QWeb reports, name of the template used in the rendering. The method 'render_html' of the model 'report.template_name' will be called (if any) to give the html. For RML reports, this is the LocalService name."),
'groups_id': fields.many2many('res.groups', 'res_groups_report_rel', 'uid', 'gid', 'Groups'),
'ir_values_id': fields.many2one('ir.values', 'More Menu entry', readonly=True,
help='More menu entry.', copy=False),
# options
'multi': fields.boolean('On Multiple Doc.', help="If set to true, the action will not be displayed on the right toolbar of a form view."),
'attachment_use': fields.boolean('Reload from Attachment', help='If you check this, then the second time the user prints with same attachment name, it returns the previous report.'),
'attachment': fields.char('Save as Attachment Prefix', help='This is the filename of the attachment used to store the printing result. Keep empty to not save the printed reports. You can use a python expression with the object and time variables.'),
# Deprecated rml stuff
'usage': fields.char('Action Usage'),
'header': fields.boolean('Add RML Header', help="Add or not the corporate RML header"),
'parser': fields.char('Parser Class'),
'auto': fields.boolean('Custom Python Parser'),
'report_xsl': fields.char('XSL Path'),
'report_xml': fields.char('XML Path'),
'report_rml': fields.char('Main Report File Path/controller', help="The path to the main report file/controller (depending on Report Type) or empty if the content is in another data field"),
'report_file': fields.related('report_rml', type="char", required=False, readonly=False, string='Report File', help="The path to the main report file (depending on Report Type) or empty if the content is in another field", store=True),
'report_sxw': fields.function(_report_sxw, type='char', string='SXW Path'),
'report_sxw_content_data': fields.binary('SXW Content'),
'report_rml_content_data': fields.binary('RML Content'),
'report_sxw_content': fields.function(_report_content, fnct_inv=_report_content_inv, type='binary', string='SXW Content',),
'report_rml_content': fields.function(_report_content, fnct_inv=_report_content_inv, type='binary', string='RML Content'),
}
_defaults = {
'type': 'ir.actions.report.xml',
'multi': False,
'auto': True,
'header': True,
'report_sxw_content': False,
'report_type': 'pdf',
'attachment': False,
}
class ir_actions_act_window(osv.osv):
_name = 'ir.actions.act_window'
_table = 'ir_act_window'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'name'
def _check_model(self, cr, uid, ids, context=None):
for action in self.browse(cr, uid, ids, context):
if action.res_model not in self.pool:
return False
if action.src_model and action.src_model not in self.pool:
return False
return True
def _invalid_model_msg(self, cr, uid, ids, context=None):
return _('Invalid model name in the action definition.')
_constraints = [
(_check_model, _invalid_model_msg, ['res_model','src_model'])
]
def _views_get_fnc(self, cr, uid, ids, name, arg, context=None):
"""Returns an ordered list of the specific view modes that should be
enabled when displaying the result of this action, along with the
ID of the specific view to use for each mode, if any were required.
This function hides the logic of determining the precedence between
the view_modes string, the view_ids o2m, and the view_id m2o that can
be set on the action.
:rtype: dict in the form { action_id: list of pairs (tuples) }
:return: { action_id: [(view_id, view_mode), ...], ... }, where view_mode
is one of the possible values for ir.ui.view.type and view_id
is the ID of a specific view to use for this mode, or False for
the default one.
"""
res = {}
for act in self.browse(cr, uid, ids):
res[act.id] = [(view.view_id.id, view.view_mode) for view in act.view_ids]
view_ids_modes = [view.view_mode for view in act.view_ids]
modes = act.view_mode.split(',')
missing_modes = [mode for mode in modes if mode not in view_ids_modes]
if missing_modes:
if act.view_id and act.view_id.type in missing_modes:
# reorder missing modes to put view_id first if present
missing_modes.remove(act.view_id.type)
res[act.id].append((act.view_id.id, act.view_id.type))
res[act.id].extend([(False, mode) for mode in missing_modes])
return res
def _search_view(self, cr, uid, ids, name, arg, context=None):
res = {}
for act in self.browse(cr, uid, ids, context=context):
field_get = self.pool[act.res_model].fields_view_get(cr, uid,
act.search_view_id and act.search_view_id.id or False,
'search', context=context)
res[act.id] = str(field_get)
return res
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'type': fields.char('Action Type', required=True),
'view_id': fields.many2one('ir.ui.view', 'View Ref.', ondelete='set null'),
'domain': fields.char('Domain Value',
help="Optional domain filtering of the destination data, as a Python expression"),
'context': fields.char('Context Value', required=True,
help="Context dictionary as Python expression, empty by default (Default: {})"),
'res_id': fields.integer('Record ID', help="Database ID of record to open in form view, when ``view_mode`` is set to 'form' only"),
'res_model': fields.char('Destination Model', required=True,
help="Model name of the object to open in the view window"),
'src_model': fields.char('Source Model',
help="Optional model name of the objects on which this action should be visible"),
'target': fields.selection([('current','Current Window'),('new','New Window'),('inline','Inline Edit'),('inlineview','Inline View')], 'Target Window'),
'view_mode': fields.char('View Mode', required=True,
help="Comma-separated list of allowed view modes, such as 'form', 'tree', 'calendar', etc. (Default: tree,form)"),
'view_type': fields.selection((('tree','Tree'),('form','Form')), string='View Type', required=True,
help="View type: Tree type to use for the tree view, set to 'tree' for a hierarchical tree view, or 'form' for a regular list view"),
'usage': fields.char('Action Usage',
help="Used to filter menu and home actions from the user form."),
'view_ids': fields.one2many('ir.actions.act_window.view', 'act_window_id', 'Views'),
'views': fields.function(_views_get_fnc, type='binary', string='Views',
help="This function field computes the ordered list of views that should be enabled " \
"when displaying the result of an action, federating view mode, views and " \
"reference view. The result is returned as an ordered list of pairs (view_id,view_mode)."),
'limit': fields.integer('Limit', help='Default limit for the list view'),
'auto_refresh': fields.integer('Auto-Refresh',
help='Add an auto-refresh on the view'),
'groups_id': fields.many2many('res.groups', 'ir_act_window_group_rel',
'act_id', 'gid', 'Groups'),
'search_view_id': fields.many2one('ir.ui.view', 'Search View Ref.'),
'filter': fields.boolean('Filter'),
'auto_search':fields.boolean('Auto Search'),
'search_view' : fields.function(_search_view, type='text', string='Search View'),
'multi': fields.boolean('Restrict to lists', help="If checked and the action is bound to a model, it will only appear in the More menu on list views"),
}
_defaults = {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'tree,form',
'context': '{}',
'limit': 80,
'target': 'current',
'auto_refresh': 0,
'auto_search':True,
'multi': False,
}
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
""" call the method get_empty_list_help of the model and set the window action help message
"""
ids_int = isinstance(ids, (int, long))
if ids_int:
ids = [ids]
results = super(ir_actions_act_window, self).read(cr, uid, ids, fields=fields, context=context, load=load)
if not fields or 'help' in fields:
for res in results:
model = res.get('res_model')
if model and self.pool.get(model):
ctx = dict(context or {})
res['help'] = self.pool[model].get_empty_list_help(cr, uid, res.get('help', ""), context=ctx)
if ids_int:
return results[0]
return results
def for_xml_id(self, cr, uid, module, xml_id, context=None):
""" Returns the act_window object created for the provided xml_id
:param module: the module the act_window originates in
:param xml_id: the namespace-less id of the action (the @id
attribute from the XML file)
:return: A read() view of the ir.actions.act_window
"""
dataobj = self.pool.get('ir.model.data')
data_id = dataobj._get_id (cr, SUPERUSER_ID, module, xml_id)
res_id = dataobj.browse(cr, uid, data_id, context).res_id
return self.read(cr, uid, [res_id], [], context)[0]
@openerp.api.model
def create(self, vals):
self.clear_caches()
return super(ir_actions_act_window, self).create(vals)
@openerp.api.multi
def unlink(self):
self.clear_caches()
return super(ir_actions_act_window, self).unlink()
@openerp.api.multi
def exists(self):
ids = self._existing()
existing = self.filtered(lambda rec: rec.id in ids)
if len(existing) < len(self):
# mark missing records in cache with a failed value
exc = MissingError(_("Record does not exist or has been deleted."))
(self - existing)._cache.update(openerp.fields.FailedValue(exc))
return existing
@openerp.api.model
@ormcache()
def _existing(self):
self._cr.execute("SELECT id FROM %s" % self._table)
return set(row[0] for row in self._cr.fetchall())
VIEW_TYPES = [
('tree', 'Tree'),
('form', 'Form'),
('graph', 'Graph'),
('pivot', 'Pivot'),
('calendar', 'Calendar'),
('gantt', 'Gantt'),
('kanban', 'Kanban')]
class ir_actions_act_window_view(osv.osv):
_name = 'ir.actions.act_window.view'
_table = 'ir_act_window_view'
_rec_name = 'view_id'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence'),
'view_id': fields.many2one('ir.ui.view', 'View'),
'view_mode': fields.selection(VIEW_TYPES, string='View Type', required=True),
'act_window_id': fields.many2one('ir.actions.act_window', 'Action', ondelete='cascade'),
'multi': fields.boolean('On Multiple Doc.',
help="If set to true, the action will not be displayed on the right toolbar of a form view."),
}
_defaults = {
'multi': False,
}
def _auto_init(self, cr, context=None):
super(ir_actions_act_window_view, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'act_window_view_unique_mode_per_action\'')
if not cr.fetchone():
cr.execute('CREATE UNIQUE INDEX act_window_view_unique_mode_per_action ON ir_act_window_view (act_window_id, view_mode)')
class ir_actions_act_window_close(osv.osv):
_name = 'ir.actions.act_window_close'
_inherit = 'ir.actions.actions'
_table = 'ir_actions'
_defaults = {
'type': 'ir.actions.act_window_close',
}
class ir_actions_act_url(osv.osv):
_name = 'ir.actions.act_url'
_table = 'ir_act_url'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'name'
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'type': fields.char('Action Type', required=True),
'url': fields.text('Action URL',required=True),
'target': fields.selection((
('new', 'New Window'),
('self', 'This Window')),
'Action Target', required=True
)
}
_defaults = {
'type': 'ir.actions.act_url',
'target': 'new'
}
class ir_actions_server(osv.osv):
""" Server actions model. Server action work on a base model and offer various
type of actions that can be executed automatically, for example using base
action rules, of manually, by adding the action in the 'More' contextual
menu.
Since OpenERP 8.0 a button 'Create Menu Action' button is available on the
action form view. It creates an entry in the More menu of the base model.
This allows to create server actions and run them in mass mode easily through
the interface.
The available actions are :
- 'Execute Python Code': a block of python code that will be executed
- 'Trigger a Workflow Signal': send a signal to a workflow
- 'Run a Client Action': choose a client action to launch
- 'Create or Copy a new Record': create a new record with new values, or
copy an existing record in your database
- 'Write on a Record': update the values of a record
- 'Execute several actions': define an action that triggers several other
server actions
"""
_name = 'ir.actions.server'
_table = 'ir_act_server'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'sequence,name'
def _select_objects(self, cr, uid, context=None):
model_pool = self.pool.get('ir.model')
ids = model_pool.search(cr, uid, [], limit=None)
res = model_pool.read(cr, uid, ids, ['model', 'name'])
return [(r['model'], r['name']) for r in res] + [('', '')]
def _get_states(self, cr, uid, context=None):
""" Override me in order to add new states in the server action. Please
note that the added key length should not be higher than already-existing
ones. """
return [('code', 'Execute Python Code'),
('trigger', 'Trigger a Workflow Signal'),
('client_action', 'Run a Client Action'),
('object_create', 'Create or Copy a new Record'),
('object_write', 'Write on a Record'),
('multi', 'Execute several actions')]
def _get_states_wrapper(self, cr, uid, context=None):
return self._get_states(cr, uid, context)
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'condition': fields.char('Condition',
help="Condition verified before executing the server action. If it "
"is not verified, the action will not be executed. The condition is "
"a Python expression, like 'object.list_price > 5000'. A void "
"condition is considered as always True. Help about python expression "
"is given in the help tab."),
'state': fields.selection(_get_states_wrapper, 'Action To Do', required=True,
help="Type of server action. The following values are available:\n"
"- 'Execute Python Code': a block of python code that will be executed\n"
"- 'Trigger a Workflow Signal': send a signal to a workflow\n"
"- 'Run a Client Action': choose a client action to launch\n"
"- 'Create or Copy a new Record': create a new record with new values, or copy an existing record in your database\n"
"- 'Write on a Record': update the values of a record\n"
"- 'Execute several actions': define an action that triggers several other server actions\n"
"- 'Send Email': automatically send an email (available in email_template)"),
'usage': fields.char('Action Usage'),
'type': fields.char('Action Type', required=True),
# Generic
'sequence': fields.integer('Sequence',
help="When dealing with multiple actions, the execution order is "
"based on the sequence. Low number means high priority."),
'model_id': fields.many2one('ir.model', 'Base Model', required=True, ondelete='cascade',
help="Base model on which the server action runs."),
'model_name': fields.related('model_id', 'model', type='char',
string='Model Name', readonly=True),
'menu_ir_values_id': fields.many2one('ir.values', 'More Menu entry', readonly=True,
help='More menu entry.', copy=False),
# Client Action
'action_id': fields.many2one('ir.actions.actions', 'Client Action',
help="Select the client action that has to be executed."),
# Python code
'code': fields.text('Python Code',
help="Write Python code that the action will execute. Some variables are "
"available for use; help about pyhon expression is given in the help tab."),
# Workflow signal
'use_relational_model': fields.selection([('base', 'Use the base model of the action'),
('relational', 'Use a relation field on the base model')],
string='Target Model', required=True),
'wkf_transition_id': fields.many2one('workflow.transition', string='Signal to Trigger',
help="Select the workflow signal to trigger."),
'wkf_model_id': fields.many2one('ir.model', 'Target Model',
help="The model that will receive the workflow signal. Note that it should have a workflow associated with it."),
'wkf_model_name': fields.related('wkf_model_id', 'model', type='char', string='Target Model Name', store=True, readonly=True),
'wkf_field_id': fields.many2one('ir.model.fields', string='Relation Field',
oldname='trigger_obj_id',
help="The field on the current object that links to the target object record (must be a many2one, or an integer field with the record ID)"),
# Multi
'child_ids': fields.many2many('ir.actions.server', 'rel_server_actions',
'server_id', 'action_id',
string='Child Actions',
help='Child server actions that will be executed. Note that the last return returned action value will be used as global return value.'),
# Create/Copy/Write
'use_create': fields.selection([('new', 'Create a new record in the Base Model'),
('new_other', 'Create a new record in another model'),
('copy_current', 'Copy the current record'),
('copy_other', 'Choose and copy a record in the database')],
string="Creation Policy", required=True,
help=""),
'crud_model_id': fields.many2one('ir.model', 'Target Model',
oldname='srcmodel_id',
help="Model for record creation / update. Set this field only to specify a different model than the base model."),
'crud_model_name': fields.related('crud_model_id', 'model', type='char',
string='Create/Write Target Model Name',
store=True, readonly=True),
'ref_object': fields.reference('Reference record', selection=_select_objects, size=128,
oldname='copy_object'),
'link_new_record': fields.boolean('Attach the new record',
help="Check this if you want to link the newly-created record "
"to the current record on which the server action runs."),
'link_field_id': fields.many2one('ir.model.fields', 'Link using field',
oldname='record_id',
help="Provide the field where the record id is stored after the operations."),
'use_write': fields.selection([('current', 'Update the current record'),
('expression', 'Update a record linked to the current record using python'),
('other', 'Choose and Update a record in the database')],
string='Update Policy', required=True,
help=""),
'write_expression': fields.char('Expression',
oldname='write_id',
help="Provide an expression that, applied on the current record, gives the field to update."),
'fields_lines': fields.one2many('ir.server.object.lines', 'server_id',
string='Value Mapping',
copy=True),
# Fake fields used to implement the placeholder assistant
'model_object_field': fields.many2one('ir.model.fields', string="Field",
help="Select target field from the related document model.\n"
"If it is a relationship field you will be able to select "
"a target field at the destination of the relationship."),
'sub_object': fields.many2one('ir.model', 'Sub-model', readonly=True,
help="When a relationship field is selected as first field, "
"this field shows the document model the relationship goes to."),
'sub_model_object_field': fields.many2one('ir.model.fields', 'Sub-field',
help="When a relationship field is selected as first field, "
"this field lets you select the target field within the "
"destination document model (sub-model)."),
'copyvalue': fields.char('Placeholder Expression', help="Final placeholder expression, to be copy-pasted in the desired template field."),
# Fake fields used to implement the ID finding assistant
'id_object': fields.reference('Record', selection=_select_objects, size=128),
'id_value': fields.char('Record ID'),
}
_defaults = {
'state': 'code',
'condition': 'True',
'type': 'ir.actions.server',
'sequence': 5,
'code': """# Available locals:
# - time, datetime, dateutil: Python libraries
# - env: Odoo Environement
# - model: Model of the record on which the action is triggered
# - object: Record on which the action is triggered if there is one, otherwise None
# - workflow: Workflow engine
# - log : log(message), function to log debug information in logging table
# - Warning: Warning Exception to use with raise
# To return an action, assign: action = {...}""",
'use_relational_model': 'base',
'use_create': 'new',
'use_write': 'current',
}
def _check_expression(self, cr, uid, expression, model_id, context):
""" Check python expression (condition, write_expression). Each step of
the path must be a valid many2one field, or an integer field for the last
step.
:param str expression: a python expression, beginning by 'obj' or 'object'
:param int model_id: the base model of the server action
:returns tuple: (is_valid, target_model_name, error_msg)
"""
if not model_id:
return (False, None, 'Your expression cannot be validated because the Base Model is not set.')
# fetch current model
current_model_name = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
# transform expression into a path that should look like 'object.many2onefield.many2onefield'
path = expression.split('.')
initial = path.pop(0)
if initial not in ['obj', 'object']:
return (False, None, 'Your expression should begin with obj or object.\nAn expression builder is available in the help tab.')
# analyze path
while path:
step = path.pop(0)
field = self.pool[current_model_name]._fields.get(step)
if not field:
return (False, None, 'Part of the expression (%s) is not recognized as a column in the model %s.' % (step, current_model_name))
ftype = field.type
if ftype not in ['many2one', 'int']:
return (False, None, 'Part of the expression (%s) is not a valid column type (is %s, should be a many2one or an int)' % (step, ftype))
if ftype == 'int' and path:
return (False, None, 'Part of the expression (%s) is an integer field that is only allowed at the end of an expression' % (step))
if ftype == 'many2one':
current_model_name = field.comodel_name
return (True, current_model_name, None)
def _check_write_expression(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.write_expression and record.model_id:
correct, model_name, message = self._check_expression(cr, uid, record.write_expression, record.model_id.id, context=context)
if not correct:
_logger.warning('Invalid expression: %s' % message)
return False
return True
_constraints = [
(_check_write_expression,
'Incorrect Write Record Expression',
['write_expression']),
(partial(osv.Model._check_m2m_recursion, field_name='child_ids'),
'Recursion found in child server actions',
['child_ids']),
]
def on_change_model_id(self, cr, uid, ids, model_id, wkf_model_id, crud_model_id, context=None):
""" When changing the action base model, reset workflow and crud config
to ease value coherence. """
values = {
'use_create': 'new',
'use_write': 'current',
'use_relational_model': 'base',
'wkf_model_id': model_id,
'wkf_field_id': False,
'crud_model_id': model_id,
}
if model_id:
values['model_name'] = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
return {'value': values}
def on_change_wkf_wonfig(self, cr, uid, ids, use_relational_model, wkf_field_id, wkf_model_id, model_id, context=None):
""" Update workflow type configuration
- update the workflow model (for base (model_id) /relational (field.relation))
- update wkf_transition_id to False if workflow model changes, to force
the user to choose a new one
"""
values = {}
if use_relational_model == 'relational' and wkf_field_id:
field = self.pool['ir.model.fields'].browse(cr, uid, wkf_field_id, context=context)
new_wkf_model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', field.relation)], context=context)[0]
values['wkf_model_id'] = new_wkf_model_id
else:
values['wkf_model_id'] = model_id
return {'value': values}
def on_change_wkf_model_id(self, cr, uid, ids, wkf_model_id, context=None):
""" When changing the workflow model, update its stored name also """
wkf_model_name = False
if wkf_model_id:
wkf_model_name = self.pool.get('ir.model').browse(cr, uid, wkf_model_id, context).model
values = {'wkf_transition_id': False, 'wkf_model_name': wkf_model_name}
return {'value': values}
def on_change_crud_config(self, cr, uid, ids, state, use_create, use_write, ref_object, crud_model_id, model_id, context=None):
""" Wrapper on CRUD-type (create or write) on_change """
if state == 'object_create':
return self.on_change_create_config(cr, uid, ids, use_create, ref_object, crud_model_id, model_id, context=context)
elif state == 'object_write':
return self.on_change_write_config(cr, uid, ids, use_write, ref_object, crud_model_id, model_id, context=context)
else:
return {}
def on_change_create_config(self, cr, uid, ids, use_create, ref_object, crud_model_id, model_id, context=None):
""" When changing the object_create type configuration:
- `new` and `copy_current`: crud_model_id is the same as base model
- `new_other`: user choose crud_model_id
- `copy_other`: disassemble the reference object to have its model
- if the target model has changed, then reset the link field that is
probably not correct anymore
"""
values = {}
if use_create == 'new':
values['crud_model_id'] = model_id
elif use_create == 'new_other':
pass
elif use_create == 'copy_current':
values['crud_model_id'] = model_id
elif use_create == 'copy_other' and ref_object:
ref_model, ref_id = ref_object.split(',')
ref_model_id = self.pool['ir.model'].search(cr, uid, [('model', '=', ref_model)], context=context)[0]
values['crud_model_id'] = ref_model_id
if values.get('crud_model_id') != crud_model_id:
values['link_field_id'] = False
return {'value': values}
def on_change_write_config(self, cr, uid, ids, use_write, ref_object, crud_model_id, model_id, context=None):
""" When changing the object_write type configuration:
- `current`: crud_model_id is the same as base model
- `other`: disassemble the reference object to have its model
- `expression`: has its own on_change, nothing special here
"""
values = {}
if use_write == 'current':
values['crud_model_id'] = model_id
elif use_write == 'other' and ref_object:
ref_model, ref_id = ref_object.split(',')
ref_model_id = self.pool['ir.model'].search(cr, uid, [('model', '=', ref_model)], context=context)[0]
values['crud_model_id'] = ref_model_id
elif use_write == 'expression':
pass
if values.get('crud_model_id') != crud_model_id:
values['link_field_id'] = False
return {'value': values}
def on_change_write_expression(self, cr, uid, ids, write_expression, model_id, context=None):
""" Check the write_expression and update crud_model_id accordingly """
values = {}
if write_expression:
valid, model_name, message = self._check_expression(cr, uid, write_expression, model_id, context=context)
else:
valid, model_name, message = True, None, False
if model_id:
model_name = self.pool['ir.model'].browse(cr, uid, model_id, context).model
if not valid:
return {
'warning': {
'title': 'Incorrect expression',
'message': message or 'Invalid expression',
}
}
if model_name:
ref_model_id = self.pool['ir.model'].search(cr, uid, [('model', '=', model_name)], context=context)[0]
values['crud_model_id'] = ref_model_id
return {'value': values}
return {'value': {}}
def on_change_crud_model_id(self, cr, uid, ids, crud_model_id, context=None):
""" When changing the CRUD model, update its stored name also """
crud_model_name = False
if crud_model_id:
crud_model_name = self.pool.get('ir.model').browse(cr, uid, crud_model_id, context).model
values = {'link_field_id': False, 'crud_model_name': crud_model_name}
return {'value': values}
def _build_expression(self, field_name, sub_field_name):
""" Returns a placeholder expression for use in a template field,
based on the values provided in the placeholder assistant.
:param field_name: main field name
:param sub_field_name: sub field name (M2O)
:return: final placeholder expression
"""
expression = ''
if field_name:
expression = "object." + field_name
if sub_field_name:
expression += "." + sub_field_name
return expression
def onchange_sub_model_object_value_field(self, cr, uid, ids, model_object_field, sub_model_object_field=False, context=None):
result = {
'sub_object': False,
'copyvalue': False,
'sub_model_object_field': False,
}
if model_object_field:
fields_obj = self.pool.get('ir.model.fields')
field_value = fields_obj.browse(cr, uid, model_object_field, context)
if field_value.ttype in ['many2one', 'one2many', 'many2many']:
res_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', field_value.relation)], context=context)
sub_field_value = False
if sub_model_object_field:
sub_field_value = fields_obj.browse(cr, uid, sub_model_object_field, context)
if res_ids:
result.update({
'sub_object': res_ids[0],
'copyvalue': self._build_expression(field_value.name, sub_field_value and sub_field_value.name or False),
'sub_model_object_field': sub_model_object_field or False,
})
else:
result.update({
'copyvalue': self._build_expression(field_value.name, False),
})
return {'value': result}
def onchange_id_object(self, cr, uid, ids, id_object, context=None):
if id_object:
ref_model, ref_id = id_object.split(',')
return {'value': {'id_value': ref_id}}
return {'value': {'id_value': False}}
def create_action(self, cr, uid, ids, context=None):
""" Create a contextual action for each of the server actions. """
for action in self.browse(cr, uid, ids, context=context):
ir_values_id = self.pool.get('ir.values').create(cr, SUPERUSER_ID, {
'name': _('Run %s') % action.name,
'model': action.model_id.model,
'key2': 'client_action_multi',
'value': "ir.actions.server,%s" % action.id,
}, context)
action.write({
'menu_ir_values_id': ir_values_id,
})
return True
def unlink_action(self, cr, uid, ids, context=None):
""" Remove the contextual actions created for the server actions. """
self.check_access_rights(cr , uid, 'write', raise_exception=True)
for action in self.browse(cr, uid, ids, context=context):
if action.menu_ir_values_id:
try:
self.pool.get('ir.values').unlink(cr, SUPERUSER_ID, action.menu_ir_values_id.id, context)
except Exception:
raise UserError(_('Deletion of the action record failed.'))
return True
def run_action_client_action(self, cr, uid, action, eval_context=None, context=None):
if not action.action_id:
raise UserError(_("Please specify an action to launch!"))
return self.pool[action.action_id.type].read(cr, uid, [action.action_id.id], context=context)[0]
def run_action_code_multi(self, cr, uid, action, eval_context=None, context=None):
eval(action.code.strip(), eval_context, mode="exec", nocopy=True) # nocopy allows to return 'action'
if 'action' in eval_context:
return eval_context['action']
def run_action_trigger(self, cr, uid, action, eval_context=None, context=None):
""" Trigger a workflow signal, depending on the use_relational_model:
- `base`: base_model_pool.signal_workflow(cr, uid, context.get('active_id'), <TRIGGER_NAME>)
- `relational`: find the related model and object, using the relational
field, then target_model_pool.signal_workflow(cr, uid, target_id, <TRIGGER_NAME>)
"""
# weird signature and calling -> no self.env, use action param's
record = action.env[action.model_id.model].browse(context['active_id'])
if action.use_relational_model == 'relational':
record = getattr(record, action.wkf_field_id.name)
if not isinstance(record, openerp.models.BaseModel):
record = action.env[action.wkf_model_id.model].browse(record)
record.signal_workflow(action.wkf_transition_id.signal)
def run_action_multi(self, cr, uid, action, eval_context=None, context=None):
res = False
for act in action.child_ids:
result = self.run(cr, uid, [act.id], context=context)
if result:
res = result
return res
def run_action_object_write(self, cr, uid, action, eval_context=None, context=None):
""" Write server action.
- 1. evaluate the value mapping
- 2. depending on the write configuration:
- `current`: id = active_id
- `other`: id = from reference object
- `expression`: id = from expression evaluation
"""
res = {}
for exp in action.fields_lines:
res[exp.col1.name] = exp.eval_value(eval_context=eval_context)[exp.id]
if action.use_write == 'current':
model = action.model_id.model
ref_id = context.get('active_id')
elif action.use_write == 'other':
model = action.crud_model_id.model
ref_id = action.ref_object.id
elif action.use_write == 'expression':
model = action.crud_model_id.model
ref = eval(action.write_expression, eval_context)
if isinstance(ref, browse_record):
ref_id = getattr(ref, 'id')
else:
ref_id = int(ref)
obj_pool = self.pool[model]
obj_pool.write(cr, uid, [ref_id], res, context=context)
def run_action_object_create(self, cr, uid, action, eval_context=None, context=None):
""" Create and Copy server action.
- 1. evaluate the value mapping
- 2. depending on the write configuration:
- `new`: new record in the base model
- `copy_current`: copy the current record (id = active_id) + gives custom values
- `new_other`: new record in target model
- `copy_other`: copy the current record (id from reference object)
+ gives custom values
"""
res = {}
for exp in action.fields_lines:
res[exp.col1.name] = exp.eval_value(eval_context=eval_context)[exp.id]
if action.use_create in ['new', 'copy_current']:
model = action.model_id.model
elif action.use_create in ['new_other', 'copy_other']:
model = action.crud_model_id.model
obj_pool = self.pool[model]
if action.use_create == 'copy_current':
ref_id = context.get('active_id')
res_id = obj_pool.copy(cr, uid, ref_id, res, context=context)
elif action.use_create == 'copy_other':
ref_id = action.ref_object.id
res_id = obj_pool.copy(cr, uid, ref_id, res, context=context)
else:
res_id = obj_pool.create(cr, uid, res, context=context)
if action.link_new_record and action.link_field_id:
self.pool[action.model_id.model].write(cr, uid, [context.get('active_id')], {action.link_field_id.name: res_id})
def _get_eval_context(self, cr, uid, action=None, context=None):
""" Prepare the context used when evaluating python code, like the
condition or code server actions.
:param action: the current server action
:type action: browse record
:returns: dict -- evaluation context given to (safe_)eval """
def log(message, level="info"):
val = (uid, 'server', cr.dbname, __name__, level, message, "action", action.id, action.name)
cr.execute("""
INSERT INTO ir_logging(create_date, create_uid, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s, %s)
""", val)
eval_context = super(ir_actions_server, self)._get_eval_context(cr, uid, action=action, context=context)
obj_pool = self.pool[action.model_id.model]
env = openerp.api.Environment(cr, uid, context)
model = env[action.model_id.model]
obj = None
if context.get('active_model') == action.model_id.model and context.get('active_id'):
obj = model.browse(context['active_id'])
if context.get('onchange_self'):
obj = context['onchange_self']
eval_context.update({
# orm
'env': env,
'model': model,
'workflow': workflow,
# Exceptions
'Warning': openerp.exceptions.Warning,
# record
# TODO: When porting to master move badly named obj and object to
# deprecated and define record (active_id) and records (active_ids)
'object': obj,
'obj': obj,
# Deprecated use env or model instead
'self': obj_pool,
'pool': self.pool,
'cr': cr,
'context': context,
'user': env.user,
# helpers
'log': log,
})
return eval_context
def run(self, cr, uid, ids, context=None):
""" Runs the server action. For each server action, the condition is
checked. Note that a void (``False``) condition is considered as always
valid. If it is verified, the run_action_<STATE> method is called. This
allows easy overriding of the server actions.
:param dict context: context should contain following keys
- active_id: id of the current object (single mode)
- active_model: current model that should equal the action's model
The following keys are optional:
- active_ids: ids of the current records (mass mode). If active_ids
and active_id are present, active_ids is given precedence.
:return: an action_id to be executed, or False is finished correctly without
return action
"""
if context is None:
context = {}
res = False
for action in self.browse(cr, uid, ids, context):
eval_context = self._get_eval_context(cr, uid, action, context=context)
condition = action.condition
if condition is False:
# Void (aka False) conditions are considered as True
condition = True
if hasattr(self, 'run_action_%s_multi' % action.state):
run_context = eval_context['context']
expr = eval(str(condition), eval_context)
if not expr:
continue
# call the multi method
func = getattr(self, 'run_action_%s_multi' % action.state)
res = func(cr, uid, action, eval_context=eval_context, context=run_context)
elif hasattr(self, 'run_action_%s' % action.state):
func = getattr(self, 'run_action_%s' % action.state)
active_id = context.get('active_id')
active_ids = context.get('active_ids', [active_id] if active_id else [])
for active_id in active_ids:
# run context dedicated to a particular active_id
run_context = dict(context, active_ids=[active_id], active_id=active_id)
eval_context["context"] = run_context
expr = eval(str(condition), eval_context)
if not expr:
continue
# call the single method related to the action: run_action_<STATE>
res = func(cr, uid, action, eval_context=eval_context, context=run_context)
return res
class ir_server_object_lines(osv.osv):
_name = 'ir.server.object.lines'
_description = 'Server Action value mapping'
_sequence = 'ir_actions_id_seq'
_columns = {
'server_id': fields.many2one('ir.actions.server', 'Related Server Action', ondelete='cascade'),
'col1': fields.many2one('ir.model.fields', 'Field', required=True),
'value': fields.text('Value', required=True, help="Expression containing a value specification. \n"
"When Formula type is selected, this field may be a Python expression "
" that can use the same values as for the condition field on the server action.\n"
"If Value type is selected, the value will be used directly without evaluation."),
'type': fields.selection([
('value', 'Value'),
('equation', 'Python expression')
], 'Evaluation Type', required=True, change_default=True),
}
_defaults = {
'type': 'value',
}
def eval_value(self, cr, uid, ids, eval_context=None, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
expr = line.value
if line.type == 'equation':
expr = eval(line.value, eval_context)
elif line.col1.ttype in ['many2one', 'integer']:
try:
expr = int(line.value)
except Exception:
pass
res[line.id] = expr
return res
TODO_STATES = [('open', 'To Do'),
('done', 'Done')]
TODO_TYPES = [('manual', 'Launch Manually'),('once', 'Launch Manually Once'),
('automatic', 'Launch Automatically')]
class ir_actions_todo(osv.osv):
"""
Configuration Wizards
"""
_name = 'ir.actions.todo'
_description = "Configuration Wizards"
_columns={
'action_id': fields.many2one(
'ir.actions.actions', 'Action', select=True, required=True),
'sequence': fields.integer('Sequence'),
'state': fields.selection(TODO_STATES, string='Status', required=True),
'name': fields.char('Name'),
'type': fields.selection(TODO_TYPES, 'Type', required=True,
help="""Manual: Launched manually.
Automatic: Runs whenever the system is reconfigured.
Launch Manually Once: after having been launched manually, it sets automatically to Done."""),
'groups_id': fields.many2many('res.groups', 'res_groups_action_rel', 'uid', 'gid', 'Groups'),
'note': fields.text('Text', translate=True),
}
_defaults={
'state': 'open',
'sequence': 10,
'type': 'manual',
}
_order="sequence,id"
def name_get(self, cr, uid, ids, context=None):
return [(rec.id, rec.action_id.name) for rec in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if name:
ids = self.search(cr, user, [('action_id', operator, name)] + args, limit=limit)
return self.name_get(cr, user, ids, context=context)
return super(ir_actions_todo, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit)
def action_launch(self, cr, uid, ids, context=None):
""" Launch Action of Wizard"""
wizard_id = ids and ids[0] or False
wizard = self.browse(cr, uid, wizard_id, context=context)
if wizard.type in ('automatic', 'once'):
wizard.write({'state': 'done'})
# Load action
act_type = wizard.action_id.type
res = self.pool[act_type].read(cr, uid, [wizard.action_id.id], [], context=context)[0]
if act_type != 'ir.actions.act_window':
return res
res.setdefault('context','{}')
# Open a specific record when res_id is provided in the context
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
ctx = eval(res['context'], {'user': user})
if ctx.get('res_id'):
res.update({'res_id': ctx.pop('res_id')})
# disable log for automatic wizards
if wizard.type == 'automatic':
ctx.update({'disable_log': True})
res.update({'context': ctx})
return res
def action_open(self, cr, uid, ids, context=None):
""" Sets configuration wizard in TODO state"""
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def progress(self, cr, uid, context=None):
""" Returns a dict with 3 keys {todo, done, total}.
These keys all map to integers and provide the number of todos
marked as open, the total number of todos and the number of
todos not open (which is basically a shortcut to total-todo)
:rtype: dict
"""
user_groups = set(map(
lambda x: x.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
def groups_match(todo):
""" Checks if the todo's groups match those of the current user
"""
return not todo.groups_id \
or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
done = filter(
groups_match,
self.browse(cr, uid,
self.search(cr, uid, [('state', '!=', 'open')], context=context),
context=context))
total = filter(
groups_match,
self.browse(cr, uid,
self.search(cr, uid, [], context=context),
context=context))
return {
'done': len(done),
'total': len(total),
'todo': len(total) - len(done)
}
class ir_actions_act_client(osv.osv):
_name = 'ir.actions.client'
_inherit = 'ir.actions.actions'
_table = 'ir_act_client'
_sequence = 'ir_actions_id_seq'
_order = 'name'
def _get_params(self, cr, uid, ids, field_name, arg, context):
result = {}
# Need to remove bin_size from context, to obtains the binary and not the length.
context = dict(context, bin_size_params_store=False)
for record in self.browse(cr, uid, ids, context=context):
result[record.id] = record.params_store and eval(record.params_store, {'uid': uid}) or False
return result
def _set_params(self, cr, uid, id, field_name, field_value, arg, context):
if isinstance(field_value, dict):
self.write(cr, uid, id, {'params_store': repr(field_value)}, context=context)
else:
self.write(cr, uid, id, {'params_store': field_value}, context=context)
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'tag': fields.char('Client action tag', required=True,
help="An arbitrary string, interpreted by the client"
" according to its own needs and wishes. There "
"is no central tag repository across clients."),
'res_model': fields.char('Destination Model',
help="Optional model, mostly used for needactions."),
'context': fields.char('Context Value', required=True,
help="Context dictionary as Python expression, empty by default (Default: {})"),
'params': fields.function(_get_params, fnct_inv=_set_params,
type='binary',
string="Supplementary arguments",
help="Arguments sent to the client along with"
"the view tag"),
'params_store': fields.binary("Params storage", readonly=True)
}
_defaults = {
'type': 'ir.actions.client',
'context': '{}',
}
| gpl-3.0 |
elopio/snapcraft | tests/unit/commands/test_enable_ci.py | 1 | 2204 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
from testtools.matchers import Contains, Equals
from snapcraft.integrations import travis
from . import CommandBaseTestCase
class EnableCITestCase(CommandBaseTestCase):
def test_enable_ci_empty(self):
result = self.run_command(['enable-ci'])
self.assertThat(result.exit_code, Equals(2))
self.assertThat(result.output, Contains(
'Missing argument "ci-system". Choose from travis.'))
def test_enable_ci_unknown(self):
result = self.run_command(['enable-ci', 'bazinga'])
self.assertThat(result.exit_code, Equals(2))
self.assertThat(result.output, Contains(
'invalid choice: bazinga. (choose from travis)'))
@mock.patch.object(travis, '__doc__')
@mock.patch.object(travis, 'enable')
def test_enable_ci_travis(self, mock_enable, mock_doc):
mock_doc.__str__.return_value = '<module docstring>'
result = self.run_command(['enable-ci', 'travis'], input='y\n')
self.assertThat(result.exit_code, Equals(0))
self.assertThat(result.output, Contains(
'<module docstring>'))
self.assertThat(mock_enable.call_count, Equals(1))
@mock.patch.object(travis, 'refresh')
def test_enable_ci_travis_refresh(self, mock_refresh):
result = self.run_command(['enable-ci', 'travis', '--refresh'])
self.assertThat(result.exit_code, Equals(0))
self.assertThat(result.output, Equals(''))
self.assertThat(mock_refresh.call_count, Equals(1))
| gpl-3.0 |
gouzongmei/t1 | src/kimchi/model/libvirtconnection.py | 1 | 5615 | #
# Project Kimchi
#
# Copyright IBM, Corp. 2014
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import threading
import time
import cherrypy
import libvirt
from kimchi.utils import kimchi_log
class LibvirtConnection(object):
_connections = {}
_connectionLock = threading.Lock()
def __init__(self, uri):
self.uri = uri
if self.uri not in LibvirtConnection._connections:
LibvirtConnection._connections[self.uri] = {}
self._connections = LibvirtConnection._connections[self.uri]
self.wrappables = self.get_wrappable_objects()
def get_wrappable_objects(self):
"""
When a wrapped function returns an instance of another libvirt object,
we also want to wrap that object so we can catch errors that happen
when calling its methods.
"""
objs = []
for name in ('virDomain', 'virDomainSnapshot', 'virInterface',
'virNWFilter', 'virNetwork', 'virNodeDevice', 'virSecret',
'virStoragePool', 'virStorageVol', 'virStream'):
try:
attr = getattr(libvirt, name)
except AttributeError:
pass
objs.append(attr)
return tuple(objs)
def get(self, conn_id=0):
"""
Return current connection to libvirt or open a new one. Wrap all
callable libvirt methods so we can catch connection errors and handle
them by restarting the server.
"""
def wrapMethod(f):
def wrapper(*args, **kwargs):
try:
ret = f(*args, **kwargs)
return ret
except libvirt.libvirtError as e:
edom = e.get_error_domain()
ecode = e.get_error_code()
EDOMAINS = (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)
ECODES = (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR,
libvirt.VIR_ERR_NO_CONNECT,
libvirt.VIR_ERR_INVALID_CONN)
if edom in EDOMAINS and ecode in ECODES:
kimchi_log.error('Connection to libvirt broken. '
'Recycling. ecode: %d edom: %d' %
(ecode, edom))
with LibvirtConnection._connectionLock:
self._connections[conn_id] = None
raise
wrapper.__name__ = f.__name__
wrapper.__doc__ = f.__doc__
return wrapper
with LibvirtConnection._connectionLock:
conn = self._connections.get(conn_id)
if not conn:
retries = 5
while True:
retries = retries - 1
try:
conn = libvirt.open(self.uri)
break
except libvirt.libvirtError:
kimchi_log.error('Unable to connect to libvirt.')
if not retries:
kimchi_log.error("Unable to establish connection "
"with libvirt. Please check "
"your libvirt URI which is often "
"defined in "
"/etc/libvirt/libvirt.conf")
cherrypy.engine.stop()
exit(1)
time.sleep(2)
for name in dir(libvirt.virConnect):
method = getattr(conn, name)
if callable(method) and not name.startswith('_'):
setattr(conn, name, wrapMethod(method))
for cls in self.wrappables:
for name in dir(cls):
method = getattr(cls, name)
if callable(method) and not name.startswith('_'):
setattr(cls, name, wrapMethod(method))
self._connections[conn_id] = conn
# In case we're running into troubles with keeping the
# connections alive we should place here:
# conn.setKeepAlive(interval=5, count=3)
# However the values need to be considered wisely to not affect
# hosts which are hosting a lot of virtual machines
return conn
def isQemuURI(self):
"""
This method will return True or Value when the system libvirt
URI is a qemu based URI. For example:
qemu:///system or qemu+tcp://someipaddress/system
"""
if self.get().getURI().startswith('qemu'):
return True
else:
return False
| lgpl-3.0 |
Alidron/demo-nao | alidron-env/lib/python2.7/site-packages/pip/_vendor/_markerlib/markers.py | 1769 | 3979 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| mpl-2.0 |
lythien/pokemongo | pokemongo_bot/cell_workers/follow_path.py | 1 | 7604 | # -*- coding: utf-8 -*-
import gpxpy
import gpxpy.gpx
import json
import time
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.cell_workers.utils import distance, i2f, format_dist
from pokemongo_bot.human_behaviour import sleep
from pokemongo_bot.walkers.walker_factory import walker_factory
from pokemongo_bot.worker_result import WorkerResult
from pgoapi.utilities import f2i
from random import uniform
from utils import getSeconds
from datetime import datetime as dt, timedelta
STATUS_MOVING = 0
STATUS_LOITERING = 1
STATUS_FINISHED = 2
class FollowPath(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def initialize(self):
self._process_config()
self.points = self.load_path()
self.status = STATUS_MOVING
self.loiter_end_time = 0
if self.path_start_mode == 'closest':
self.ptr = self.find_closest_point_idx(self.points)
else:
self.ptr = 0
def _process_config(self):
self.path_file = self.config.get("path_file", None)
self.path_mode = self.config.get("path_mode", "linear")
self.path_start_mode = self.config.get("path_start_mode", "first")
self.number_lap_max = self.config.get("number_lap", -1) # if < 0, then the number is inf.
self.timer_restart_min = getSeconds(self.config.get("timer_restart_min", "00:20:00"))
self.timer_restart_max = getSeconds(self.config.get("timer_restart_max", "02:00:00"))
self.walker = self.config.get('walker', 'StepWalker')
if self.timer_restart_min > self.timer_restart_max:
raise ValueError('path timer_restart_min is bigger than path timer_restart_max') #TODO there must be a more elegant way to do it...
#var not related to configs
self.number_lap = 0
def load_path(self):
if self.path_file is None:
raise RuntimeError('You need to specify a path file (json or gpx)')
if self.path_file.endswith('.json'):
return self.load_json()
elif self.path_file.endswith('.gpx'):
return self.load_gpx()
def load_json(self):
with open(self.path_file) as data_file:
points=json.load(data_file)
# Replace Verbal Location with lat&lng.
for _, point in enumerate(points):
point_tuple = self.bot.get_pos_by_name(point['location'])
self.emit_event(
'location_found',
level='debug',
formatted="Location found: {location} {position}",
data={
'location': point,
'position': point_tuple
}
)
# Keep point['location']
point["lat"] = float(point_tuple[0])
point["lng"] = float(point_tuple[1])
point["alt"] = float(point_tuple[2])
return points
def load_gpx(self):
gpx_file = open(self.path_file, 'r')
gpx = gpxpy.parse(gpx_file)
if len(gpx.tracks) == 0:
raise RuntimeError('GPX file does not contain a track')
points = []
track = gpx.tracks[0]
for segment in track.segments:
for point in segment.points:
points.append({"lat": point.latitude, "lng": point.longitude,
"alt": point.elevation, "location": point.name})
return points
def find_closest_point_idx(self, points):
return_idx = 0
min_distance = float("inf");
for index in range(len(points)):
point = points[index]
botlat, botlng, _ = self.bot.api.get_position()
lat = point['lat']
lng = point['lng']
dist = distance(
botlat,
botlng,
lat,
lng
)
if dist < min_distance:
min_distance = dist
return_idx = index
return return_idx
def endLaps(self):
duration = int(uniform(self.timer_restart_min, self.timer_restart_max))
resume = dt.now() + timedelta(seconds=duration)
self.emit_event(
'path_lap_end',
formatted="Great job, lot of calories burned! Taking a break now for {duration}, will resume at {resume}.",
data={
'duration': str(timedelta(seconds=duration)),
'resume': resume.strftime("%H:%M:%S")
}
)
self.number_lap = 0 # at the end of the break, start again
sleep(duration)
self.bot.login()
def work(self):
# If done or loitering allow the next task to run
if self.status == STATUS_FINISHED:
return WorkerResult.SUCCESS
if self.status == STATUS_LOITERING and time.time() < self.loiter_end_time:
return WorkerResult.SUCCESS
last_lat, last_lng, last_alt = self.bot.api.get_position()
point = self.points[self.ptr]
lat = point['lat']
lng = point['lng']
if 'alt' in point:
alt = float(point['alt'])
else:
alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max)
if self.bot.config.walk_max > 0:
step_walker = walker_factory(self.walker,
self.bot,
lat,
lng,
alt
)
is_at_destination = False
if step_walker.step():
is_at_destination = True
else:
self.bot.api.set_position(lat, lng, alt)
dist = distance(
last_lat,
last_lng,
lat,
lng
)
self.emit_event(
'position_update',
formatted="Walking from {last_position} to {current_position}, distance left: ({distance} {distance_unit}) ..",
data={
'last_position': (last_lat, last_lng, last_alt),
'current_position': (lat, lng, alt),
'distance': dist,
'distance_unit': 'm'
}
)
if dist <= 1 or (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_LOITERING and time.time() >= self.loiter_end_time):
if "loiter" in point and self.status != STATUS_LOITERING:
print("Loitering {} seconds".format(point["loiter"]))
self.status = STATUS_LOITERING
self.loiter_end_time = time.time() + point["loiter"]
return WorkerResult.SUCCESS
if (self.ptr + 1) == len(self.points):
if self.path_mode == 'single':
self.status = STATUS_FINISHED
return WorkerResult.SUCCESS
self.ptr = 0
if self.path_mode == 'linear':
self.points = list(reversed(self.points))
if self.number_lap_max >= 0:
self.number_lap+=1
self.emit_event(
'path_lap_update',
formatted="number lap : {number_lap} / {number_lap_max}",
data={
'number_lap': str(self.number_lap),
'number_lap_max': str(self.number_lap_max)
}
)
if self.number_lap >= self.number_lap_max:
self.endLaps()
else:
self.ptr += 1
self.status = STATUS_MOVING
return WorkerResult.RUNNING
| mit |
brianmckenna/sci-wms | wms/wms_handler.py | 1 | 7363 | # -*- coding: utf-8 -*-
from datetime import datetime, date
from dateutil.parser import parse
from dateutil.tz import tzutc
import pyproj
from wms.utils import DotDict, split
from wms import logger
def get_bbox(request):
"""
Return the [lonmin, latmin, lonmax, lonmax] - [lower (x,y), upper(x,y)]
Units will be specified by projection.
"""
elements = [ float(el) for el in request.GET["bbox"].split(",") ]
return DotDict(minx=elements[0], miny=elements[1], maxx=elements[2], maxy=elements[3])
def get_wgs84_bbox(request):
"""
Return the [lonmin, latmin, lonmax, lonmax] - [lower (x,y), upper(x,y)]
in WGS84
"""
EPSG4326 = pyproj.Proj(init='EPSG:4326')
crs = get_projection(request)
bbox = get_bbox(request)
wgs84_minx, wgs84_miny = pyproj.transform(crs, EPSG4326, bbox.minx, bbox.miny)
wgs84_maxx, wgs84_maxy = pyproj.transform(crs, EPSG4326, bbox.maxx, bbox.maxy)
return DotDict(minx=wgs84_minx, miny=wgs84_miny, maxx=wgs84_maxx, maxy=wgs84_maxy, bbox=(wgs84_minx, wgs84_miny, wgs84_maxx, wgs84_maxy))
def get_format(request):
"""
Return the FORMAT for GetLegendGraphic requests
"""
try:
return 'image/png' # request.GET['format'].lower()
except KeyError:
return 'image/png'
def get_show_label(request):
"""
Return the SHOWLABEL for GetLegendGraphic requests
"""
try:
if 'colorbaronly' in request.GET and request.GET['colorbaronly'].lower() == 'true':
return False
else:
return request.GET['showlabel'].lower() == 'true'
except KeyError:
return True
def get_units(request, units):
"""
Return the UNITS for GetLegendGraphic requests
"""
try:
return request.GET['unitlabel'].lower()
except KeyError:
return units
def get_logscale(request, default_logscale):
"""
Return the LOGSCALE for GetLegendGraphic requests
"""
try:
return request.GET['logscale'].lower() == 'true'
except KeyError:
return default_logscale
def get_horizontal(request):
"""
Return the horizontal for GetLegendGraphic requests
"""
try:
return request.GET['horizontal'].lower() == 'true'
except KeyError:
return False
def get_show_values(request):
"""
Return the SHOWVALUES for GetLegendGraphic requests
"""
try:
if 'colorbaronly' in request.GET and request.GET['colorbaronly'].lower() == 'true':
return False
else:
return request.GET['showvalues'].lower() == 'true'
except KeyError:
return True
def get_num_contours(request, default=None):
"""
Return the NUMCONTOURS for GetLegendGraphic requests
"""
default = default or 8
try:
return int(float(request.GET['numcontours'].lower()))
except (KeyError, ValueError):
return default
def get_info_format(request):
"""
Return the INFO_FORMAT for GetFeatureInfo requests
"""
try:
return request.GET['info_format'].lower()
except KeyError:
return None
def get_projection(request):
"""
Return the projection string passed into the request.
Can be specified by \"SRS\" or \"CRS\" key (string).
If \"SRS\" or \"CRS\" is not available, default to mercator.
"""
projstr = request.GET.get("srs")
if not projstr:
projstr = request.GET.get("crs")
if not projstr:
projstr = "EPSG:3857"
logger.debug("SRS or CRS no available in requst, defaulting to EPSG:3857 (mercator)")
return pyproj.Proj(init=projstr)
def get_xy(request):
"""
Returns list of floats
"""
try:
x = float(request.GET.get('x'))
except ValueError:
x = None
try:
y = float(request.GET.get('y'))
except ValueError:
y = None
return DotDict(x=x, y=y)
def get_elevation(request):
"""
Return the elevation
"""
try:
elev = request.GET["elevation"]
return float(elev)
except (TypeError, KeyError):
return 0
def get_time(request):
"""
Return the min and max times
"""
time = request.GET.get('time')
if time is None:
return datetime.utcnow()
else:
dt = parse(time)
if dt.tzinfo is not None:
utc_dt = dt.astimezone(tzutc()) # convert UTC if tzinfo is available
utc_tz_naive = utc_dt.replace(tzinfo=None)
else:
utc_tz_naive = dt
return utc_tz_naive
def get_times(request):
"""
Return the min and max times
"""
time = request.GET.get('time')
if not time:
time = date.today().isoformat() + "T00:00:00"
times = sorted([ parse(t) for t in time.split("/") ])
return DotDict(min=times[0], max=times[-1])
def get_colormap(request, parameter=None, default=None):
parameter = parameter or 'styles'
default = default or 'cubehelix'
try:
from matplotlib.pyplot import colormaps
requested_cm = split(request.GET.get(parameter).split(',')[0], '_', maxsplit=1)[1]
assert requested_cm
return next(x for x in colormaps() if x.lower() == requested_cm.lower())
except (AssertionError, IndexError, AttributeError, TypeError, StopIteration):
return default
def get_imagetype(request, parameter=None, default=None):
parameter = parameter or 'styles'
default = default or 'filledcontours'
try:
z = split(request.GET.get(parameter).split(',')[0], '_', maxsplit=1)[0].lower()
assert z
return z
except (AssertionError, IndexError, AttributeError, TypeError):
return default
def get_vectorscale(request):
try:
vectorscale = float(request.GET.get('vectorscale'))
except (AttributeError, TypeError):
vectorscale = 1
return vectorscale
def get_vectorstep(request):
try:
vectorstep = int(request.GET.get('vectorstep'))
except TypeError:
vectorstep = 1 # equivalent to getting all the data
return vectorstep
def get_colorscalerange(request, default_min, default_max):
try:
climits = sorted([ float(x) for x in request.GET.get('colorscalerange').split(',') ])
return DotDict(min=climits[0], max=climits[-1])
except (AttributeError, TypeError):
return DotDict(min=default_min, max=default_max)
def get_dimensions(request, default_width=None, default_height=None):
"""
Return width and height of requested view.
RETURNS width, height request should be in pixel units.
"""
try:
width = float(request.GET.get("width"))
height = float(request.GET.get("height"))
return DotDict(width=width, height=height)
except:
return DotDict(width=default_width, height=default_height)
def get_gfi_positions(xy, bbox, crs, dims):
""" Returns the latitude and longitude the GFI should be performed at"""
EPSG4326 = pyproj.Proj(init='EPSG:4326')
lon, lat = pyproj.transform(crs, EPSG4326, bbox.minx+((bbox.maxx-bbox.minx)*(xy.x/dims.width)), bbox.maxy-((bbox.maxy-bbox.miny)*(xy.y/dims.height)))
return DotDict(latitude=lat, longitude=lon)
def get_item(request):
"""
Returns the GetMetadata 'item' function
"""
try:
return request.GET["item"].lower()
except KeyError:
return None
| gpl-3.0 |
hospace/ToughRADIUS | toughradius/console/control/config.py | 4 | 5355 | #!/usr/bin/env python
# coding:utf-8
import sys, os
from twisted.internet import reactor
from bottle import Bottle
from bottle import request
from bottle import response
from bottle import redirect
from bottle import static_file
from bottle import abort
from hashlib import md5
from urlparse import urljoin
from toughradius.console.base import *
from toughradius.console.libs import utils
from toughradius.console.libs.validate import vcache
from toughradius.console.control import config_forms
import time
import bottle
import decimal
import datetime
import functools
import ConfigParser
__prefix__ = "/config"
app = Bottle()
app.config['__prefix__'] = __prefix__
@app.get('/', apply=auth_ctl)
def control_config(render):
active = request.params.get("active","default")
cfg = ConfigParser.ConfigParser()
cfg.read(app.config['DEFAULT.cfgfile'])
default_form = config_forms.default_form()
default_form.fill(dict(cfg.items("DEFAULT")))
database_form = config_forms.database_form()
database_form.fill(dict(cfg.items("database")))
radiusd_form = config_forms.radiusd_form()
radiusd_form.fill(dict(cfg.items("radiusd")))
admin_form = config_forms.admin_form()
admin_form.fill(dict(cfg.items("admin")))
customer_form = config_forms.customer_form()
customer_form.fill(dict(cfg.items("customer")))
control_form = config_forms.control_form()
control_form.fill(dict(cfg.items("control")))
return render("config",
active=active,
default_form=default_form,
database_form=database_form,
radiusd_form=radiusd_form,
admin_form=admin_form,
customer_form=customer_form,
control_form=control_form
)
@app.post('/default/update', apply=auth_ctl)
def update_default(render):
cfg = ConfigParser.ConfigParser()
cfg.read(app.config['DEFAULT.cfgfile'])
cfg.set('DEFAULT','debug',request.forms.get("debug"))
cfg.set('DEFAULT','tz',request.forms.get("tz"))
cfg.set('DEFAULT','ssl',request.forms.get("ssl"))
cfg.set('DEFAULT','privatekey',request.forms.get("privatekey"))
cfg.set('DEFAULT','certificate',request.forms.get("certificate"))
with open(app.config['DEFAULT.cfgfile'],'wb') as configfile:
cfg.write(configfile)
redirect("/config?active=default")
@app.post('/database/update', apply=auth_ctl)
def update_default(render):
cfg = ConfigParser.ConfigParser()
cfg.read(app.config['DEFAULT.cfgfile'])
cfg.set('database', 'echo', request.forms.get("echo"))
cfg.set('database', 'dbtype', request.forms.get("dbtype"))
cfg.set('database', 'dburl', request.forms.get("dburl"))
cfg.set('database', 'pool_size', request.forms.get("pool_size"))
cfg.set('database', 'pool_recycle', request.forms.get("pool_recycle"))
cfg.set('database', 'backup_path', request.forms.get("backup_path"))
with open(app.config['DEFAULT.cfgfile'], 'wb') as configfile:
cfg.write(configfile)
redirect("/config?active=database")
@app.post('/radiusd/update', apply=auth_ctl)
def update_default(render):
cfg = ConfigParser.ConfigParser()
cfg.read(app.config['DEFAULT.cfgfile'])
cfg.set('radiusd', 'host', request.forms.get("host"))
cfg.set('radiusd', 'acctport', request.forms.get("acctport"))
cfg.set('radiusd', 'adminport', request.forms.get("adminport"))
cfg.set('radiusd', 'authport', request.forms.get("authport"))
cfg.set('radiusd', 'cache_timeout', request.forms.get("cache_timeout"))
cfg.set('radiusd', 'logfile', request.forms.get("logfile"))
with open(app.config['DEFAULT.cfgfile'], 'wb') as configfile:
cfg.write(configfile)
redirect("/config?active=radiusd")
@app.post('/admin/update', apply=auth_ctl)
def update_default(render):
cfg = ConfigParser.ConfigParser()
cfg.read(app.config['DEFAULT.cfgfile'])
cfg.set('admin', 'host', request.forms.get("host"))
cfg.set('admin', 'port', request.forms.get("port"))
cfg.set('admin', 'logfile', request.forms.get("logfile"))
with open(app.config['DEFAULT.cfgfile'], 'wb') as configfile:
cfg.write(configfile)
redirect("/config?active=admin")
@app.post('/customer/update', apply=auth_ctl)
def update_default(render):
cfg = ConfigParser.ConfigParser()
cfg.read(app.config['DEFAULT.cfgfile'])
cfg.set('customer', 'host', request.forms.get("host"))
cfg.set('customer', 'port', request.forms.get("port"))
cfg.set('customer', 'logfile', request.forms.get("logfile"))
with open(app.config['DEFAULT.cfgfile'], 'wb') as configfile:
cfg.write(configfile)
redirect("/config?active=customer")
@app.post('/control/update', apply=auth_ctl)
def update_default(render):
cfg = ConfigParser.ConfigParser()
cfg.read(app.config['DEFAULT.cfgfile'])
cfg.set('control', 'host', request.forms.get("host"))
cfg.set('control', 'port', request.forms.get("port"))
cfg.set('control', 'user', request.forms.get("user"))
if request.forms.get("passwd"):
cfg.set('control', 'passwd', request.forms.get("passwd"))
cfg.set('control', 'logfile', request.forms.get("logfile"))
with open(app.config['DEFAULT.cfgfile'], 'wb') as configfile:
cfg.write(configfile)
redirect("/config?active=control") | agpl-3.0 |
nvoron23/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/sitemaps/georss.py | 314 | 2134 | from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
class GeoRSSSitemap(Sitemap):
"""
A minimal hook to produce sitemaps for GeoRSS feeds.
"""
def __init__(self, feed_dict, slug_dict=None):
"""
This sitemap object initializes on a feed dictionary (as would be passed
to `django.contrib.gis.views.feed`) and a slug dictionary.
If the slug dictionary is not defined, then it's assumed the keys provide
the URL parameter to the feed. However, if you have a complex feed (e.g.,
you override `get_object`, then you'll need to provide a slug dictionary.
The slug dictionary should have the same keys as the feed dictionary, but
each value in the slug dictionary should be a sequence of slugs that may
be used for valid feeds. For example, let's say we have a feed that
returns objects for a specific ZIP code in our feed dictionary:
feed_dict = {'zipcode' : ZipFeed}
Then we would use a slug dictionary with a list of the zip code slugs
corresponding to feeds you want listed in the sitemap:
slug_dict = {'zipcode' : ['77002', '77054']}
"""
# Setting up.
self.feed_dict = feed_dict
self.locations = []
if slug_dict is None: slug_dict = {}
# Getting the feed locations.
for section in feed_dict.keys():
if slug_dict.get(section, False):
for slug in slug_dict[section]:
self.locations.append('%s/%s' % (section, slug))
else:
self.locations.append(section)
def get_urls(self, page=1, site=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site)
for url in urls: url['geo_format'] = 'georss'
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.views.feed', args=(obj,))
| apache-2.0 |
julien78910/CouchPotatoServer | libs/suds/transport/options.py | 200 | 2211 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains classes for transport options.
"""
from suds.transport import *
from suds.properties import *
class Options(Skin):
"""
Options:
- B{proxy} - An http proxy to be specified on requests.
The proxy is defined as {protocol:proxy,}
- type: I{dict}
- default: {}
- B{timeout} - Set the url open timeout (seconds).
- type: I{float}
- default: 90
- B{headers} - Extra HTTP headers.
- type: I{dict}
- I{str} B{http} - The I{http} protocol proxy URL.
- I{str} B{https} - The I{https} protocol proxy URL.
- default: {}
- B{username} - The username used for http authentication.
- type: I{str}
- default: None
- B{password} - The password used for http authentication.
- type: I{str}
- default: None
"""
def __init__(self, **kwargs):
domain = __name__
definitions = [
Definition('proxy', dict, {}),
Definition('timeout', (int,float), 90),
Definition('headers', dict, {}),
Definition('username', basestring, None),
Definition('password', basestring, None),
]
Skin.__init__(self, domain, definitions, kwargs) | gpl-3.0 |
linglung/ytdl | youtube_dl/extractor/theplatform.py | 1 | 15603 | # coding: utf-8
from __future__ import unicode_literals
import re
import time
import hmac
import binascii
import hashlib
from .once import OnceIE
from .adobepass import AdobePassIE
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
int_or_none,
sanitized_Request,
unsmuggle_url,
update_url_query,
xpath_with_ns,
mimetype2ext,
find_xpath_attr,
)
default_ns = 'http://www.w3.org/2005/SMIL21/Language'
_x = lambda p: xpath_with_ns(p, {'smil': default_ns})
class ThePlatformBaseIE(OnceIE):
def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'):
meta = self._download_xml(
smil_url, video_id, note=note, query={'format': 'SMIL'},
headers=self.geo_verification_headers())
error_element = find_xpath_attr(meta, _x('.//smil:ref'), 'src')
if error_element is not None and error_element.attrib['src'].startswith(
'http://link.theplatform.com/s/errorFiles/Unavailable.'):
raise ExtractorError(error_element.attrib['abstract'], expected=True)
smil_formats = self._parse_smil_formats(
meta, smil_url, video_id, namespace=default_ns,
# the parameters are from syfy.com, other sites may use others,
# they also work for nbc.com
f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'},
transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src))
formats = []
for _format in smil_formats:
if OnceIE.suitable(_format['url']):
formats.extend(self._extract_once_formats(_format['url']))
else:
media_url = _format['url']
if determine_ext(media_url) == 'm3u8':
hdnea2 = self._get_cookies(media_url).get('hdnea2')
if hdnea2:
_format['url'] = update_url_query(media_url, {'hdnea3': hdnea2.value})
formats.append(_format)
subtitles = self._parse_smil_subtitles(meta, default_ns)
return formats, subtitles
def _download_theplatform_metadata(self, path, video_id):
info_url = 'http://link.theplatform.com/s/%s?format=preview' % path
return self._download_json(info_url, video_id)
def _parse_theplatform_metadata(self, info):
subtitles = {}
captions = info.get('captions')
if isinstance(captions, list):
for caption in captions:
lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
subtitles.setdefault(lang, []).append({
'ext': mimetype2ext(mime),
'url': src,
})
return {
'title': info['title'],
'subtitles': subtitles,
'description': info['description'],
'thumbnail': info['defaultThumbnailUrl'],
'duration': int_or_none(info.get('duration'), 1000),
'timestamp': int_or_none(info.get('pubDate'), 1000) or None,
'uploader': info.get('billingCode'),
}
def _extract_theplatform_metadata(self, path, video_id):
info = self._download_theplatform_metadata(path, video_id)
return self._parse_theplatform_metadata(info)
class ThePlatformIE(ThePlatformBaseIE, AdobePassIE):
_VALID_URL = r'''(?x)
(?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
(?:(?:(?:[^/]+/)+select/)?(?P<media>media/(?:guid/\d+/)?)?|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
|theplatform:)(?P<id>[^/\?&]+)'''
_TESTS = [{
# from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/
'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true',
'info_dict': {
'id': 'e9I_cZgTgIPd',
'ext': 'flv',
'title': 'Blackberry\'s big, bold Z30',
'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.',
'duration': 247,
'timestamp': 1383239700,
'upload_date': '20131031',
'uploader': 'CBSI-NEW',
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': '404 Not Found',
}, {
# from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/
'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT',
'info_dict': {
'id': '22d_qsQ6MIRT',
'ext': 'flv',
'description': 'md5:ac330c9258c04f9d7512cf26b9595409',
'title': 'Tesla Model S: A second step towards a cleaner motoring future',
'timestamp': 1426176191,
'upload_date': '20150312',
'uploader': 'CBSI-NEW',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD',
'info_dict': {
'id': 'yMBg9E8KFxZD',
'ext': 'mp4',
'description': 'md5:644ad9188d655b742f942bf2e06b002d',
'title': 'HIGHLIGHTS: USA bag first ever series Cup win',
'uploader': 'EGSM',
}
}, {
'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7',
'only_matching': True,
}, {
'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701',
'md5': 'fb96bb3d85118930a5b055783a3bd992',
'info_dict': {
'id': 'tdy_or_siri_150701',
'ext': 'mp4',
'title': 'iPhone Siri’s sassy response to a math question has people talking',
'description': 'md5:a565d1deadd5086f3331d57298ec6333',
'duration': 83.0,
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1435752600,
'upload_date': '20150701',
'uploader': 'NBCU-NEWS',
},
}, {
# From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1
# geo-restricted (US), HLS encrypted with AES-128
'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781',
'only_matching': True,
}]
@classmethod
def _extract_urls(cls, webpage):
m = re.search(
r'''(?x)
<meta\s+
property=(["'])(?:og:video(?::(?:secure_)?url)?|twitter:player)\1\s+
content=(["'])(?P<url>https?://player\.theplatform\.com/p/.+?)\2
''', webpage)
if m:
return [m.group('url')]
matches = re.findall(
r'<(?:iframe|script)[^>]+src=(["\'])((?:https?:)?//player\.theplatform\.com/p/.+?)\1', webpage)
if matches:
return list(zip(*matches))[1]
@staticmethod
def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False):
flags = '10' if include_qs else '00'
expiration_date = '%x' % (int(time.time()) + life)
def str_to_hex(str):
return binascii.b2a_hex(str.encode('ascii')).decode('ascii')
def hex_to_bytes(hex):
return binascii.a2b_hex(hex.encode('ascii'))
relative_path = re.match(r'https?://link.theplatform.com/s/([^?]+)', url).group(1)
clear_text = hex_to_bytes(flags + expiration_date + str_to_hex(relative_path))
checksum = hmac.new(sig_key.encode('ascii'), clear_text, hashlib.sha1).hexdigest()
sig = flags + expiration_date + checksum + str_to_hex(sig_secret)
return '%s&sig=%s' % (url, sig)
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
mobj = re.match(self._VALID_URL, url)
provider_id = mobj.group('provider_id')
video_id = mobj.group('id')
if not provider_id:
provider_id = 'dJ5BDC'
path = provider_id + '/'
if mobj.group('media'):
path += mobj.group('media')
path += video_id
qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
if 'guid' in qs_dict:
webpage = self._download_webpage(url, video_id)
scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
feed_id = None
# feed id usually locates in the last script.
# Seems there's no pattern for the interested script filename, so
# I try one by one
for script in reversed(scripts):
feed_script = self._download_webpage(
self._proto_relative_url(script, 'http:'),
video_id, 'Downloading feed script')
feed_id = self._search_regex(
r'defaultFeedId\s*:\s*"([^"]+)"', feed_script,
'default feed id', default=None)
if feed_id is not None:
break
if feed_id is None:
raise ExtractorError('Unable to find feed id')
return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % (
provider_id, feed_id, qs_dict['guid'][0]))
if smuggled_data.get('force_smil_url', False):
smil_url = url
# Explicitly specified SMIL (see https://github.com/rg3/youtube-dl/issues/7385)
elif '/guid/' in url:
headers = {}
source_url = smuggled_data.get('source_url')
if source_url:
headers['Referer'] = source_url
request = sanitized_Request(url, headers=headers)
webpage = self._download_webpage(request, video_id)
smil_url = self._search_regex(
r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml',
webpage, 'smil url', group='url')
path = self._search_regex(
r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path')
smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4'
elif mobj.group('config'):
config_url = url + '&form=json'
config_url = config_url.replace('swf/', 'config/')
config_url = config_url.replace('onsite/', 'onsite/config/')
config = self._download_json(config_url, video_id, 'Downloading config')
if 'releaseUrl' in config:
release_url = config['releaseUrl']
else:
release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
smil_url = release_url + '&formats=MPEG4&manifest=f4m'
else:
smil_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
sig = smuggled_data.get('sig')
if sig:
smil_url = self._sign_url(smil_url, sig['key'], sig['secret'])
formats, subtitles = self._extract_theplatform_smil(smil_url, video_id)
self._sort_formats(formats)
ret = self._extract_theplatform_metadata(path, video_id)
combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles)
ret.update({
'id': video_id,
'formats': formats,
'subtitles': combined_subtitles,
})
return ret
class ThePlatformFeedIE(ThePlatformBaseIE):
_URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&%s'
_VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*(?P<filter>by(?:Gui|I)d=(?P<id>[\w-]+))'
_TESTS = [{
# From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
'md5': '6e32495b5073ab414471b615c5ded394',
'info_dict': {
'id': 'n_hardball_5biden_140207',
'ext': 'mp4',
'title': 'The Biden factor: will Joe run in 2016?',
'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20140208',
'timestamp': 1391824260,
'duration': 467.0,
'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
'uploader': 'NBCU-NEWS',
},
}]
def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}):
real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query)
entry = self._download_json(real_url, video_id)['entries'][0]
formats = []
subtitles = {}
first_video_id = None
duration = None
asset_types = []
for item in entry['media$content']:
smil_url = item['plfile$url']
cur_video_id = ThePlatformIE._match_id(smil_url)
if first_video_id is None:
first_video_id = cur_video_id
duration = float_or_none(item.get('plfile$duration'))
for asset_type in item['plfile$assetTypes']:
if asset_type in asset_types:
continue
asset_types.append(asset_type)
query = {
'mbr': 'true',
'formats': item['plfile$format'],
'assetTypes': asset_type,
}
if asset_type in asset_types_query:
query.update(asset_types_query[asset_type])
cur_formats, cur_subtitles = self._extract_theplatform_smil(update_url_query(
smil_url, query), video_id, 'Downloading SMIL data for %s' % asset_type)
formats.extend(cur_formats)
subtitles = self._merge_subtitles(subtitles, cur_subtitles)
self._sort_formats(formats)
thumbnails = [{
'url': thumbnail['plfile$url'],
'width': int_or_none(thumbnail.get('plfile$width')),
'height': int_or_none(thumbnail.get('plfile$height')),
} for thumbnail in entry.get('media$thumbnails', [])]
timestamp = int_or_none(entry.get('media$availableDate'), scale=1000)
categories = [item['media$name'] for item in entry.get('media$categories', [])]
ret = self._extract_theplatform_metadata('%s/%s' % (provider_id, first_video_id), video_id)
subtitles = self._merge_subtitles(subtitles, ret['subtitles'])
ret.update({
'id': video_id,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'duration': duration,
'timestamp': timestamp,
'categories': categories,
})
if custom_fields:
ret.update(custom_fields(entry))
return ret
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
provider_id = mobj.group('provider_id')
feed_id = mobj.group('feed_id')
filter_query = mobj.group('filter')
return self._extract_feed_info(provider_id, feed_id, filter_query, video_id)
| unlicense |
Slezhuk/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_host.py | 33 | 5923 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_host
version_added: "2.3"
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower host.
description:
- Create, update, or destroy Ansible Tower hosts. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the host.
required: True
description:
description:
- The description to use for the host.
required: False
default: null
inventory:
description:
- Inventory the host should be made a member of.
required: True
enabled:
description:
- If the host should be enabled.
required: False
default: True
variables:
description:
- Variables to use for the host. Use '@' for a file.
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.3"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Add tower host
tower_host:
name: localhost
description: "Local Host Group"
inventory: "Local Inventory"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import os
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
description = dict(),
inventory = dict(required=True),
enabled = dict(type='bool', default=True),
variables = dict(),
tower_host = dict(),
tower_username = dict(),
tower_password = dict(no_log=True),
tower_verify_ssl = dict(type='bool', default=True),
tower_config_file = dict(type='path'),
state = dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
description = module.params.get('description')
inventory = module.params.get('inventory')
enabled = module.params.get('enabled')
state = module.params.get('state')
variables = module.params.get('variables')
if variables:
if variables.startswith('@'):
filename = os.path.expanduser(variables[1:])
variables = module.contents_from_file(filename)
json_output = {'host': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
host = tower_cli.get_resource('host')
try:
inv_res = tower_cli.get_resource('inventory')
inv = inv_res.get(name=inventory)
if state == 'present':
result = host.modify(name=name, inventory=inv['id'], enabled=enabled,
variables=variables, description=description, create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = host.delete(name=name, inventory=inv['id'])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update host, inventory not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to update host: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
stannynuytkens/youtube-dl | youtube_dl/extractor/safari.py | 4 | 8444 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
sanitized_Request,
std_headers,
urlencode_postdata,
update_url_query,
)
class SafariBaseIE(InfoExtractor):
_LOGIN_URL = 'https://www.safaribooksonline.com/accounts/login/'
_NETRC_MACHINE = 'safari'
_API_BASE = 'https://www.safaribooksonline.com/api/v1'
_API_FORMAT = 'json'
LOGGED_IN = False
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
headers = std_headers.copy()
if 'Referer' not in headers:
headers['Referer'] = self._LOGIN_URL
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login form', headers=headers)
def is_logged(webpage):
return any(re.search(p, webpage) for p in (
r'href=["\']/accounts/logout/', r'>Sign Out<'))
if is_logged(login_page):
self.LOGGED_IN = True
return
csrf = self._html_search_regex(
r"name='csrfmiddlewaretoken'\s+value='([^']+)'",
login_page, 'csrf token')
login_form = {
'csrfmiddlewaretoken': csrf,
'email': username,
'password1': password,
'login': 'Sign In',
'next': '',
}
request = sanitized_Request(
self._LOGIN_URL, urlencode_postdata(login_form), headers=headers)
login_page = self._download_webpage(
request, None, 'Logging in')
if not is_logged(login_page):
raise ExtractorError(
'Login failed; make sure your credentials are correct and try again.',
expected=True)
self.LOGGED_IN = True
class SafariIE(SafariBaseIE):
IE_NAME = 'safari'
IE_DESC = 'safaribooksonline.com online video'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?safaribooksonline\.com/
(?:
library/view/[^/]+/(?P<course_id>[^/]+)/(?P<part>[^/?\#&]+)\.html|
videos/[^/]+/[^/]+/(?P<reference_id>[^-]+-[^/?\#&]+)
)
'''
_TESTS = [{
'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/part00.html',
'md5': 'dcc5a425e79f2564148652616af1f2a3',
'info_dict': {
'id': '0_qbqx90ic',
'ext': 'mp4',
'title': 'Introduction to Hadoop Fundamentals LiveLessons',
'timestamp': 1437758058,
'upload_date': '20150724',
'uploader_id': 'stork',
},
}, {
# non-digits in course id
'url': 'https://www.safaribooksonline.com/library/view/create-a-nodejs/100000006A0210/part00.html',
'only_matching': True,
}, {
'url': 'https://www.safaribooksonline.com/library/view/learning-path-red/9780134664057/RHCE_Introduction.html',
'only_matching': True,
}, {
'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314/9780134217314-PYMC_13_00',
'only_matching': True,
}]
_PARTNER_ID = '1926081'
_UICONF_ID = '29375172'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
reference_id = mobj.group('reference_id')
if reference_id:
video_id = reference_id
partner_id = self._PARTNER_ID
ui_id = self._UICONF_ID
else:
video_id = '%s-%s' % (mobj.group('course_id'), mobj.group('part'))
webpage, urlh = self._download_webpage_handle(url, video_id)
mobj = re.match(self._VALID_URL, urlh.geturl())
reference_id = mobj.group('reference_id')
if not reference_id:
reference_id = self._search_regex(
r'data-reference-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'kaltura reference id', group='id')
partner_id = self._search_regex(
r'data-partner-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'kaltura widget id', default=self._PARTNER_ID,
group='id')
ui_id = self._search_regex(
r'data-ui-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'kaltura uiconf id', default=self._UICONF_ID,
group='id')
query = {
'wid': '_%s' % partner_id,
'uiconf_id': ui_id,
'flashvars[referenceId]': reference_id,
}
if self.LOGGED_IN:
kaltura_session = self._download_json(
'%s/player/kaltura_session/?reference_id=%s' % (self._API_BASE, reference_id),
video_id, 'Downloading kaltura session JSON',
'Unable to download kaltura session JSON', fatal=False)
if kaltura_session:
session = kaltura_session.get('session')
if session:
query['flashvars[ks]'] = session
return self.url_result(update_url_query(
'https://cdnapisec.kaltura.com/html5/html5lib/v2.37.1/mwEmbedFrame.php', query),
'Kaltura')
class SafariApiIE(SafariBaseIE):
IE_NAME = 'safari:api'
_VALID_URL = r'https?://(?:www\.)?safaribooksonline\.com/api/v1/book/(?P<course_id>[^/]+)/chapter(?:-content)?/(?P<part>[^/?#&]+)\.html'
_TESTS = [{
'url': 'https://www.safaribooksonline.com/api/v1/book/9780133392838/chapter/part00.html',
'only_matching': True,
}, {
'url': 'https://www.safaribooksonline.com/api/v1/book/9780134664057/chapter/RHCE_Introduction.html',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
part = self._download_json(
url, '%s/%s' % (mobj.group('course_id'), mobj.group('part')),
'Downloading part JSON')
return self.url_result(part['web_url'], SafariIE.ie_key())
class SafariCourseIE(SafariBaseIE):
IE_NAME = 'safari:course'
IE_DESC = 'safaribooksonline.com online courses'
_VALID_URL = r'''(?x)
https?://
(?:
(?:www\.)?safaribooksonline\.com/
(?:
library/view/[^/]+|
api/v1/book|
videos/[^/]+
)|
techbus\.safaribooksonline\.com
)
/(?P<id>[^/]+)
'''
_TESTS = [{
'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/',
'info_dict': {
'id': '9780133392838',
'title': 'Hadoop Fundamentals LiveLessons',
},
'playlist_count': 22,
'skip': 'Requires safaribooksonline account credentials',
}, {
'url': 'https://www.safaribooksonline.com/api/v1/book/9781449396459/?override_format=json',
'only_matching': True,
}, {
'url': 'http://techbus.safaribooksonline.com/9780134426365',
'only_matching': True,
}, {
'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False if SafariIE.suitable(url) or SafariApiIE.suitable(url)
else super(SafariCourseIE, cls).suitable(url))
def _real_extract(self, url):
course_id = self._match_id(url)
course_json = self._download_json(
'%s/book/%s/?override_format=%s' % (self._API_BASE, course_id, self._API_FORMAT),
course_id, 'Downloading course JSON')
if 'chapters' not in course_json:
raise ExtractorError(
'No chapters found for course %s' % course_id, expected=True)
entries = [
self.url_result(chapter, SafariApiIE.ie_key())
for chapter in course_json['chapters']]
course_title = course_json['title']
return self.playlist_result(entries, course_id, course_title)
| unlicense |
arturtamborski/wypok | wypok/sections/models.py | 1 | 1554 | from django.db import models
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.conf import settings
from django.utils.text import slugify
from wypok.validators import FileValidator
from wypok.utils.markup import markup
from sections.validators import section_name_validator
def sections_background_path(instance, filename):
return settings.SECTIONS_BACKGROUND_PATH.format(id=instance.id, name=filename)
class SectionQuerySet(models.QuerySet):
pass
class Section(models.Model):
objects = SectionQuerySet.as_manager()
admin = models.ForeignKey(get_user_model(), default='1', on_delete=models.PROTECT)
name = models.SlugField(max_length=20, validators=[section_name_validator])
description = models.TextField()
description_html = models.TextField(editable=False, blank=True)
background = models.FileField(max_length=256, blank=True, null=True,
upload_to=sections_background_path, default=settings.SECTIONS_DEFAULT_BACKGROUND,
validators=[FileValidator(content_types=settings.SECTIONS_ALLOWED_CONTENT_TYPES)])
def save(self, *args, **kwargs):
self.name = slugify(self.name)
self.description_html = markup(self.description)
self.full_clean()
super().save(*args, **kwargs)
def __str__(self):
return self.name
def prettify(self):
return '/%s/' % self.name
def get_owner(self):
return self.admin
def get_absolute_url(self):
return reverse('sections:detail', args=[self.name])
| mit |
ilpianista/ansible | test/units/module_utils/facts/test_timeout.py | 31 | 5361 | # -*- coding: utf-8 -*-
# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import sys
import time
import pytest
from ansible.module_utils.facts import timeout
@pytest.fixture
def set_gather_timeout_higher():
default_timeout = timeout.GATHER_TIMEOUT
timeout.GATHER_TIMEOUT = 5
yield
timeout.GATHER_TIMEOUT = default_timeout
@pytest.fixture
def set_gather_timeout_lower():
default_timeout = timeout.GATHER_TIMEOUT
timeout.GATHER_TIMEOUT = 2
yield
timeout.GATHER_TIMEOUT = default_timeout
@timeout.timeout
def sleep_amount_implicit(amount):
# implicit refers to the lack of argument to the decorator
time.sleep(amount)
return 'Succeeded after {0} sec'.format(amount)
@timeout.timeout(timeout.DEFAULT_GATHER_TIMEOUT + 5)
def sleep_amount_explicit_higher(amount):
# explicit refers to the argument to the decorator
time.sleep(amount)
return 'Succeeded after {0} sec'.format(amount)
@timeout.timeout(2)
def sleep_amount_explicit_lower(amount):
# explicit refers to the argument to the decorator
time.sleep(amount)
return 'Succeeded after {0} sec'.format(amount)
#
# Tests for how the timeout decorator is specified
#
def test_defaults_still_within_bounds():
# If the default changes outside of these bounds, some of the tests will
# no longer test the right thing. Need to review and update the timeouts
# in the other tests if this fails
assert timeout.DEFAULT_GATHER_TIMEOUT >= 4
def test_implicit_file_default_succeeds():
# amount checked must be less than DEFAULT_GATHER_TIMEOUT
assert sleep_amount_implicit(1) == 'Succeeded after 1 sec'
def test_implicit_file_default_timesout(monkeypatch):
monkeypatch.setattr(timeout, 'DEFAULT_GATHER_TIMEOUT', 1)
# sleep_time is greater than the default
sleep_time = timeout.DEFAULT_GATHER_TIMEOUT + 1
with pytest.raises(timeout.TimeoutError):
assert sleep_amount_implicit(sleep_time) == '(Not expected to succeed)'
def test_implicit_file_overridden_succeeds(set_gather_timeout_higher):
# Set sleep_time greater than the default timeout and less than our new timeout
sleep_time = 3
assert sleep_amount_implicit(sleep_time) == 'Succeeded after {0} sec'.format(sleep_time)
def test_implicit_file_overridden_timesout(set_gather_timeout_lower):
# Set sleep_time greater than our new timeout but less than the default
sleep_time = 3
with pytest.raises(timeout.TimeoutError):
assert sleep_amount_implicit(sleep_time) == '(Not expected to Succeed)'
def test_explicit_succeeds(monkeypatch):
monkeypatch.setattr(timeout, 'DEFAULT_GATHER_TIMEOUT', 1)
# Set sleep_time greater than the default timeout and less than our new timeout
sleep_time = 2
assert sleep_amount_explicit_higher(sleep_time) == 'Succeeded after {0} sec'.format(sleep_time)
def test_explicit_timeout():
# Set sleep_time greater than our new timeout but less than the default
sleep_time = 3
with pytest.raises(timeout.TimeoutError):
assert sleep_amount_explicit_lower(sleep_time) == '(Not expected to succeed)'
#
# Test that exception handling works
#
@timeout.timeout(1)
def function_times_out():
time.sleep(2)
# This is just about the same test as function_times_out but uses a separate process which is where
# we normally have our timeouts. It's more of an integration test than a unit test.
@timeout.timeout(1)
def function_times_out_in_run_command(am):
am.run_command([sys.executable, '-c', 'import time ; time.sleep(2)'])
@timeout.timeout(1)
def function_other_timeout():
raise TimeoutError('Vanilla Timeout')
@timeout.timeout(1)
def function_raises():
1 / 0
@timeout.timeout(1)
def function_catches_all_exceptions():
try:
time.sleep(10)
except BaseException:
raise RuntimeError('We should not have gotten here')
def test_timeout_raises_timeout():
with pytest.raises(timeout.TimeoutError):
assert function_times_out() == '(Not expected to succeed)'
@pytest.mark.parametrize('stdin', ({},), indirect=['stdin'])
def test_timeout_raises_timeout_integration_test(am):
with pytest.raises(timeout.TimeoutError):
assert function_times_out_in_run_command(am) == '(Not expected to succeed)'
def test_timeout_raises_other_exception():
with pytest.raises(ZeroDivisionError):
assert function_raises() == '(Not expected to succeed)'
def test_exception_not_caught_by_called_code():
with pytest.raises(timeout.TimeoutError):
assert function_catches_all_exceptions() == '(Not expected to succeed)'
| gpl-3.0 |
nathanial/lettuce | lettuce/plugins/colored_shell_output.py | 6 | 9741 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import sys
from lettuce import core
from lettuce import strings
from lettuce import terminal
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.terrain import world
def wrt(what):
if isinstance(what, unicode):
what = what.encode('utf-8')
sys.stdout.write(what)
def wrap_file_and_line(string, start, end):
return re.sub(r'([#] [^:]+[:]\d+)', '%s\g<1>%s' % (start, end), string)
def wp(l):
if l.startswith("\033[1;32m"):
l = l.replace(" |", "\033[1;37m |\033[1;32m")
if l.startswith("\033[1;36m"):
l = l.replace(" |", "\033[1;37m |\033[1;36m")
if l.startswith("\033[0;36m"):
l = l.replace(" |", "\033[1;37m |\033[0;36m")
if l.startswith("\033[0;31m"):
l = l.replace(" |", "\033[1;37m |\033[0;31m")
if l.startswith("\033[1;30m"):
l = l.replace(" |", "\033[1;37m |\033[1;30m")
if l.startswith("\033[1;31m"):
l = l.replace(" |", "\033[1;37m |\033[0;31m")
return l
def write_out(what):
wrt(wp(what))
@before.each_step
def print_step_running(step):
if not step.defined_at or not step.display:
return
color = '\033[1;30m'
if step.scenario and step.scenario.outlines:
color = '\033[0;36m'
string = step.represent_string(step.original_sentence)
string = wrap_file_and_line(string, '\033[1;30m', '\033[0m')
write_out("%s%s" % (color, string))
if step.hashes and step.defined_at:
for line in step.represent_hashes().splitlines():
write_out("\033[1;30m%s\033[0m\n" % line)
@after.each_step
def print_step_ran(step):
if not step.display:
return
if step.scenario and step.scenario.outlines and (step.failed or step.passed or step.defined_at):
return
if step.hashes and step.defined_at:
write_out("\033[A" * (len(step.hashes) + 1))
string = step.represent_string(step.original_sentence)
if not step.failed:
string = wrap_file_and_line(string, '\033[1;30m', '\033[0m')
prefix = '\033[A'
width, height = terminal.get_size()
lines_up = len(string) / float(width)
if lines_up < 1:
lines_up = 1
else:
lines_up = int(lines_up) + 1
#prefix = prefix * lines_up
if step.failed:
color = "\033[0;31m"
string = wrap_file_and_line(string, '\033[1;41;33m', '\033[0m')
elif step.passed:
color = "\033[1;32m"
elif step.defined_at:
color = "\033[0;36m"
else:
color = "\033[0;33m"
prefix = ""
write_out("%s%s%s" % (prefix, color, string))
if step.hashes:
for line in step.represent_hashes().splitlines():
write_out("%s%s\033[0m\n" % (color, line))
if step.failed:
wrt("\033[1;31m")
pspaced = lambda x: wrt("%s%s" % (" " * step.indentation, x))
lines = step.why.traceback.splitlines()
for pindex, line in enumerate(lines):
pspaced(line)
if pindex + 1 < len(lines):
wrt("\n")
wrt("\033[0m\n")
@before.each_scenario
def print_scenario_running(scenario):
if scenario.background:
# Only print the background on the first scenario run
# So, we determine if this was called previously with the attached background.
# If so, skip the print_scenario() since we'll call it again in the after_background.
if not hasattr(world, 'background_scenario_holder'):
world.background_scenario_holder = {}
if scenario.background not in world.background_scenario_holder:
# We haven't seen this background before, add our 1st scenario
world.background_scenario_holder[scenario.background] = scenario
return
string = scenario.represented()
string = wrap_file_and_line(string, '\033[1;30m', '\033[0m')
write_out("\n\033[1;37m%s" % string)
@after.outline
def print_outline(scenario, order, outline, reasons_to_fail):
table = strings.dicts_to_string(scenario.outlines, scenario.keys)
lines = table.splitlines()
head = lines.pop(0)
wline = lambda x: write_out("\033[0;36m%s%s\033[0m\n" % (" " * scenario.table_indentation, x))
wline_success = lambda x: write_out("\033[1;32m%s%s\033[0m\n" % (" " * scenario.table_indentation, x))
wline_red_outline = lambda x: write_out("\033[1;31m%s%s\033[0m\n" % (" " * scenario.table_indentation, x))
wline_red = lambda x: write_out("%s%s" % (" " * scenario.table_indentation, x))
if order is 0:
wrt("\n")
wrt("\033[1;37m%s%s:\033[0m\n" % (" " * scenario.indentation, scenario.language.first_of_examples))
wline(head)
line = lines[order]
if reasons_to_fail:
wline_red_outline(line)
else:
wline_success(line)
if reasons_to_fail:
elines = reasons_to_fail[0].traceback.splitlines()
wrt("\033[1;31m")
for pindex, line in enumerate(elines):
wline_red(line)
if pindex + 1 < len(elines):
wrt("\n")
wrt("\033[0m\n")
@before.each_feature
def print_feature_running(feature):
string = feature.represented()
lines = string.splitlines()
write_out("\n")
for line in lines:
line = wrap_file_and_line(line, '\033[1;30m', '\033[0m')
write_out("\033[1;37m%s\n" % line)
@after.harvest
@after.all
def print_end(total=None):
if total is None:
return
write_out("\n")
if isinstance(total, core.SummaryTotalResults):
word = total.features_ran_overall > 1 and "features" or "feature"
color = "\033[1;32m"
if total.features_passed_overall is 0:
color = "\033[0;31m"
write_out("\033[1;37mTest Suite Summary:\n")
write_out("\033[1;37m%d %s (%s%d passed\033[1;37m)\033[0m\n" % (
total.features_ran_overall,
word,
color,
total.features_passed_overall))
else:
word = total.features_ran > 1 and "features" or "feature"
color = "\033[1;32m"
if total.features_passed is 0:
color = "\033[0;31m"
write_out("\033[1;37m%d %s (%s%d passed\033[1;37m)\033[0m\n" % (
total.features_ran,
word,
color,
total.features_passed))
color = "\033[1;32m"
if total.scenarios_passed is 0:
color = "\033[0;31m"
word = total.scenarios_ran > 1 and "scenarios" or "scenario"
write_out("\033[1;37m%d %s (%s%d passed\033[1;37m)\033[0m\n" % (
total.scenarios_ran,
word,
color,
total.scenarios_passed))
steps_details = []
kinds_and_colors = {
'failed': '\033[0;31m',
'skipped': '\033[0;36m',
'undefined': '\033[0;33m'
}
for kind, color in kinds_and_colors.items():
attr = 'steps_%s' % kind
stotal = getattr(total, attr)
if stotal:
steps_details.append("%s%d %s" % (color, stotal, kind))
steps_details.append("\033[1;32m%d passed\033[1;37m" % total.steps_passed)
word = total.steps > 1 and "steps" or "step"
content = "\033[1;37m, ".join(steps_details)
word = total.steps > 1 and "steps" or "step"
write_out("\033[1;37m%d %s (%s)\033[0m\n" % (
total.steps,
word,
content))
if total.proposed_definitions:
wrt("\n\033[0;33mYou can implement step definitions for undefined steps with these snippets:\n\n")
wrt("# -*- coding: utf-8 -*-\n")
wrt("from lettuce import step\n\n")
last = len(total.proposed_definitions) - 1
for current, step in enumerate(total.proposed_definitions):
method_name = step.proposed_method_name
wrt("@step(u'%s')\n" % step.proposed_sentence)
wrt("def %s:\n" % method_name)
wrt(" assert False, 'This step must be implemented'")
if current is last:
wrt("\033[0m")
wrt("\n")
if total.failed_scenario_locations:
# print list of failed scenarios, with their file and line number
wrt("\n")
wrt("\033[1;31m")
wrt("List of failed scenarios:\n")
wrt("\033[0;31m")
for scenario in total.failed_scenario_locations:
wrt(scenario)
wrt("\033[0m")
wrt("\n")
def print_no_features_found(where):
where = core.fs.relpath(where)
if not where.startswith(os.sep):
where = '.%s%s' % (os.sep, where)
write_out('\033[1;31mOops!\033[0m\n')
write_out(
'\033[1;37mcould not find features at '
'\033[1;33m%s\033[0m\n' % where)
@before.each_background
def print_background_running(background):
wrt('\n')
wrt('\033[1;37m')
wrt(background.represented())
wrt('\033[0m\n')
@after.each_background
def print_first_scenario_running(background, results):
scenario = world.background_scenario_holder[background]
print_scenario_running(scenario)
| gpl-3.0 |
yelongyu/chihu | venv/lib/python2.7/site-packages/sqlalchemy/testing/exclusions.py | 34 | 12570 | # testing/exclusions.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import operator
from ..util import decorator
from . import config
from .. import util
import inspect
import contextlib
from sqlalchemy.util.compat import inspect_getargspec
def skip_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.skips.add(pred)
return rule
def fails_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.fails.add(pred)
return rule
class compound(object):
def __init__(self):
self.fails = set()
self.skips = set()
self.tags = set()
def __add__(self, other):
return self.add(other)
def add(self, *others):
copy = compound()
copy.fails.update(self.fails)
copy.skips.update(self.skips)
copy.tags.update(self.tags)
for other in others:
copy.fails.update(other.fails)
copy.skips.update(other.skips)
copy.tags.update(other.tags)
return copy
def not_(self):
copy = compound()
copy.fails.update(NotPredicate(fail) for fail in self.fails)
copy.skips.update(NotPredicate(skip) for skip in self.skips)
copy.tags.update(self.tags)
return copy
@property
def enabled(self):
return self.enabled_for_config(config._current)
def enabled_for_config(self, config):
for predicate in self.skips.union(self.fails):
if predicate(config):
return False
else:
return True
def matching_config_reasons(self, config):
return [
predicate._as_string(config) for predicate
in self.skips.union(self.fails)
if predicate(config)
]
def include_test(self, include_tags, exclude_tags):
return bool(
not self.tags.intersection(exclude_tags) and
(not include_tags or self.tags.intersection(include_tags))
)
def _extend(self, other):
self.skips.update(other.skips)
self.fails.update(other.fails)
self.tags.update(other.tags)
def __call__(self, fn):
if hasattr(fn, '_sa_exclusion_extend'):
fn._sa_exclusion_extend._extend(self)
return fn
@decorator
def decorate(fn, *args, **kw):
return self._do(config._current, fn, *args, **kw)
decorated = decorate(fn)
decorated._sa_exclusion_extend = self
return decorated
@contextlib.contextmanager
def fail_if(self):
all_fails = compound()
all_fails.fails.update(self.skips.union(self.fails))
try:
yield
except Exception as ex:
all_fails._expect_failure(config._current, ex)
else:
all_fails._expect_success(config._current)
def _do(self, config, fn, *args, **kw):
for skip in self.skips:
if skip(config):
msg = "'%s' : %s" % (
fn.__name__,
skip._as_string(config)
)
config.skip_test(msg)
try:
return_value = fn(*args, **kw)
except Exception as ex:
self._expect_failure(config, ex, name=fn.__name__)
else:
self._expect_success(config, name=fn.__name__)
return return_value
def _expect_failure(self, config, ex, name='block'):
for fail in self.fails:
if fail(config):
print(("%s failed as expected (%s): %s " % (
name, fail._as_string(config), str(ex))))
break
else:
util.raise_from_cause(ex)
def _expect_success(self, config, name='block'):
if not self.fails:
return
for fail in self.fails:
if not fail(config):
break
else:
raise AssertionError(
"Unexpected success for '%s' (%s)" %
(
name,
" and ".join(
fail._as_string(config)
for fail in self.fails
)
)
)
def requires_tag(tagname):
return tags([tagname])
def tags(tagnames):
comp = compound()
comp.tags.update(tagnames)
return comp
def only_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return skip_if(NotPredicate(predicate), reason)
def succeeds_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return fails_if(NotPredicate(predicate), reason)
class Predicate(object):
@classmethod
def as_predicate(cls, predicate, description=None):
if isinstance(predicate, compound):
return cls.as_predicate(predicate.enabled_for_config, description)
elif isinstance(predicate, Predicate):
if description and predicate.description is None:
predicate.description = description
return predicate
elif isinstance(predicate, (list, set)):
return OrPredicate(
[cls.as_predicate(pred) for pred in predicate],
description)
elif isinstance(predicate, tuple):
return SpecPredicate(*predicate)
elif isinstance(predicate, util.string_types):
tokens = predicate.split(" ", 2)
op = spec = None
db = tokens.pop(0)
if tokens:
op = tokens.pop(0)
if tokens:
spec = tuple(int(d) for d in tokens.pop(0).split("."))
return SpecPredicate(db, op, spec, description=description)
elif util.callable(predicate):
return LambdaPredicate(predicate, description)
else:
assert False, "unknown predicate type: %s" % predicate
def _format_description(self, config, negate=False):
bool_ = self(config)
if negate:
bool_ = not negate
return self.description % {
"driver": config.db.url.get_driver_name(),
"database": config.db.url.get_backend_name(),
"doesnt_support": "doesn't support" if bool_ else "does support",
"does_support": "does support" if bool_ else "doesn't support"
}
def _as_string(self, config=None, negate=False):
raise NotImplementedError()
class BooleanPredicate(Predicate):
def __init__(self, value, description=None):
self.value = value
self.description = description or "boolean %s" % value
def __call__(self, config):
return self.value
def _as_string(self, config, negate=False):
return self._format_description(config, negate=negate)
class SpecPredicate(Predicate):
def __init__(self, db, op=None, spec=None, description=None):
self.db = db
self.op = op
self.spec = spec
self.description = description
_ops = {
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'in': operator.contains,
'between': lambda val, pair: val >= pair[0] and val <= pair[1],
}
def __call__(self, config):
engine = config.db
if "+" in self.db:
dialect, driver = self.db.split('+')
else:
dialect, driver = self.db, None
if dialect and engine.name != dialect:
return False
if driver is not None and engine.driver != driver:
return False
if self.op is not None:
assert driver is None, "DBAPI version specs not supported yet"
version = _server_version(engine)
oper = hasattr(self.op, '__call__') and self.op \
or self._ops[self.op]
return oper(version, self.spec)
else:
return True
def _as_string(self, config, negate=False):
if self.description is not None:
return self._format_description(config)
elif self.op is None:
if negate:
return "not %s" % self.db
else:
return "%s" % self.db
else:
if negate:
return "not %s %s %s" % (
self.db,
self.op,
self.spec
)
else:
return "%s %s %s" % (
self.db,
self.op,
self.spec
)
class LambdaPredicate(Predicate):
def __init__(self, lambda_, description=None, args=None, kw=None):
spec = inspect_getargspec(lambda_)
if not spec[0]:
self.lambda_ = lambda db: lambda_()
else:
self.lambda_ = lambda_
self.args = args or ()
self.kw = kw or {}
if description:
self.description = description
elif lambda_.__doc__:
self.description = lambda_.__doc__
else:
self.description = "custom function"
def __call__(self, config):
return self.lambda_(config)
def _as_string(self, config, negate=False):
return self._format_description(config)
class NotPredicate(Predicate):
def __init__(self, predicate, description=None):
self.predicate = predicate
self.description = description
def __call__(self, config):
return not self.predicate(config)
def _as_string(self, config, negate=False):
if self.description:
return self._format_description(config, not negate)
else:
return self.predicate._as_string(config, not negate)
class OrPredicate(Predicate):
def __init__(self, predicates, description=None):
self.predicates = predicates
self.description = description
def __call__(self, config):
for pred in self.predicates:
if pred(config):
return True
return False
def _eval_str(self, config, negate=False):
if negate:
conjunction = " and "
else:
conjunction = " or "
return conjunction.join(p._as_string(config, negate=negate)
for p in self.predicates)
def _negation_str(self, config):
if self.description is not None:
return "Not " + self._format_description(config)
else:
return self._eval_str(config, negate=True)
def _as_string(self, config, negate=False):
if negate:
return self._negation_str(config)
else:
if self.description is not None:
return self._format_description(config)
else:
return self._eval_str(config)
_as_predicate = Predicate.as_predicate
def _is_excluded(db, op, spec):
return SpecPredicate(db, op, spec)(config._current)
def _server_version(engine):
"""Return a server_version_info tuple."""
# force metadata to be retrieved
conn = engine.connect()
version = getattr(engine.dialect, 'server_version_info', ())
conn.close()
return version
def db_spec(*dbs):
return OrPredicate(
[Predicate.as_predicate(db) for db in dbs]
)
def open():
return skip_if(BooleanPredicate(False, "mark as execute"))
def closed():
return skip_if(BooleanPredicate(True, "marked as skip"))
def fails(reason=None):
return fails_if(BooleanPredicate(True, reason or "expected to fail"))
@decorator
def future(fn, *arg):
return fails_if(LambdaPredicate(fn), "Future feature")
def fails_on(db, reason=None):
return fails_if(SpecPredicate(db), reason)
def fails_on_everything_except(*dbs):
return succeeds_if(
OrPredicate([
SpecPredicate(db) for db in dbs
])
)
def skip(db, reason=None):
return skip_if(SpecPredicate(db), reason)
def only_on(dbs, reason=None):
return only_if(
OrPredicate([Predicate.as_predicate(db) for db in util.to_list(dbs)])
)
def exclude(db, op, spec, reason=None):
return skip_if(SpecPredicate(db, op, spec), reason)
def against(config, *queries):
assert queries, "no queries sent!"
return OrPredicate([
Predicate.as_predicate(query)
for query in queries
])(config)
| gpl-3.0 |
tufts-LPC/blockytalky | blockytalky/priv/hw_apis/btbrickpi.py | 2 | 1256 | from BrickPi import *
#Generic (functional) API for brickpi operations
#Author: Matthew Ahrens
#Originally created: June.13.2015
#
NO_SENSOR = -1
def setup(sensor1 = NO_SENSOR, sensor2 = NO_SENSOR, sensor3 = NO_SENSOR, sensor4 = NO_SENSOR):
BrickPiSetup()
if(sensor1 != NO_SENSOR):
BrickPi.SensorType[PORT_1] = sensor1
if(sensor2 != NO_SENSOR):
BrickPi.SensorType[PORT_2] = sensor2
if(sensor3 != NO_SENSOR):
BrickPi.SensorType[PORT_3] = sensor3
if(sensor4 != NO_SENSOR):
BrickPi.SensorType[PORT_4] = sensor4
BrickPiSetupSensors()
BrickPi.MotorEnable[PORT_A] = 1 #Enable the Motor A
BrickPi.MotorEnable[PORT_B] = 1 #Enable the Motor B
BrickPi.MotorEnable[PORT_C] = 1 #Enable the Motor C
BrickPi.MotorEnable[PORT_D] = 1 #Enable the Motor D
return
def get_sensor_value(port_num):
BrickPiUpdateValues()
return BrickPi.Sensor[port_num]
def set_sensor_type(port_num, sensor_type):
BrickPi.SensorType[port_num] = sensor_type
BrickPiSetupSensors()
return
def get_encoder_value(port_num):
BrickPiUpdateValues()
return BrickPi.Encoder[port_num]
def set_motor_value(port_num, value):
BrickPi.MotorSpeed[port_num] = value
BrickPiUpdateValues()
return
| apache-2.0 |
indictranstech/buyback-erp | erpnext/startup/notifications.py | 38 | 1142 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def get_notification_config():
return { "for_doctype":
{
"Support Ticket": {"status":"Open"},
"Customer Issue": {"status":"Open"},
"Task": {"status":"Open"},
"Lead": {"status":"Open"},
"Contact": {"status":"Open"},
"Opportunity": {"docstatus":0},
"Quotation": {"docstatus":0},
"Sales Order": {"docstatus":0},
"Journal Voucher": {"docstatus":0},
"Sales Invoice": {"docstatus":0},
"Purchase Invoice": {"docstatus":0},
"Leave Application": {"status":"Open"},
"Expense Claim": {"approval_status":"Draft"},
"Job Applicant": {"status":"Open"},
"Purchase Receipt": {"docstatus":0},
"Delivery Note": {"docstatus":0},
"Stock Entry": {"docstatus":0},
"Material Request": {"docstatus":0},
"Purchase Order": {"docstatus":0},
"Production Order": {"docstatus":0},
"BOM": {"docstatus":0},
"Timesheet": {"docstatus":0},
"Time Log": {"status":"Draft"},
"Time Log Batch": {"status":"Draft"},
}
} | agpl-3.0 |
Eibriel/scripts | blender/auto_import_vray_materials.py | 1 | 1523 | import os
import bpy
import json
import pprint
D = bpy.data
C = bpy.context
file = D.filepath
file = os.path.split( file )[1]
file = file.split('.')[0]
vrscene_directory = '//vraymaterials/{0}/'.format(file)
print (file)
material_tree = {}
files2open = []
material_tree_file = os.path.join(os.path.split(D.filepath)[0], 'vraymaterials', file, 'material_tree.json')
with open(material_tree_file, 'r') as jfile:
jtext = jfile.read()
jdata = json.loads( jtext )
for mat in jdata:
vrngname = jdata[mat].replace('.', '_')
matname = '{0}.vrscene'.format(vrngname)
#print (D.materials[matname])
vrscene_path = os.path.join(os.path.split(D.filepath)[0], 'vraymaterials', file, matname)
bpy.ops.vray.import_material(file_path=vrscene_path)
#Colocar el material en su lugar original
for obj in D.objects:
for mslot in obj.material_slots:
if mslot and mslot.material and not mslot.material.is_library_indirect:
if mslot.material.name == mat:
print ( "{0} -> {1}".format(mslot.material.name, D.materials[matname].name) )
mslot.material = D.materials[matname]
for obj in D.objects:
for mslot in obj.material_slots:
if mslot and mslot.material:
if mslot.material.name[:5] == 'MAsh_':
mslot.material.name = mslot.material.name[2:]
if mslot.material.name[-8:] == '.vrscene':
mslot.material.name = mslot.material.name[:-8]
| gpl-2.0 |
ctk3b/msibi | msibi/tests/test_msibi.py | 2 | 1303 | import pytest
from msibi.optimize import MSIBI
from msibi.tests.test_pair import init_state
n_bins = 151
def test_msibi_init_single_cutoff():
opt = MSIBI(2.5, n_bins)
assert opt.pot_cutoff == opt.rdf_cutoff
assert opt.n_rdf_points == n_bins
assert opt.rdf_n_bins == n_bins
assert opt.r_switch == 14.6/6.0
assert opt.dr == 0.1/6.0
assert opt.smooth_rdfs is False
assert opt.rdf_r_range.shape[0] == 2
assert opt.pot_r.shape[0] == n_bins
def test_msibi_init_multiple_cutoff():
opt = MSIBI(2.5, n_bins, pot_cutoff=2.0)
assert opt.pot_cutoff != opt.rdf_cutoff
assert opt.n_rdf_points == n_bins
assert opt.rdf_n_bins == n_bins
assert opt.r_switch == 11.6/6.0
assert opt.dr == 0.1/6.0
assert opt.smooth_rdfs is False
assert opt.rdf_r_range.shape[0] == 2
assert opt.pot_r.shape[0] != n_bins
assert opt.pot_r.shape[0] == 121
def test_msibi_optimize_states():
pair, state0, rdf = init_state(0)
opt = MSIBI(2.5, n_bins, pot_cutoff=2.5)
opt.optimize([state0], [pair], n_iterations=0, engine='hoomd')
def test_rdf_length():
pair, state0, rdf = init_state(0)
opt = MSIBI(2.5, n_bins + 1, pot_cutoff=2.5)
with pytest.raises(ValueError):
opt.optimize([state0], [pair], n_iterations=0, engine='hoomd')
| mit |
tailorian/Sick-Beard | lib/requests/packages/chardet/escprober.py | 2936 | 3187 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
| gpl-3.0 |
stevekew/oakleydb | infra/oakleydbdal/mappingdal.py | 1 | 2432 | from core.logger import Logger
import time
class MappingDal(object):
def __init__(self, connection_pool):
self.logger = Logger(self.__class__.__name__).get()
self.connection_pool = connection_pool
def style_family_mapping_exists(self, style_id, family_id):
mapping_id = self.get_style_family_mapping_id(style_id, family_id)
return mapping_id != -1
def get_style_family_mapping_id(self, style_id, family_id):
query = ("SELECT id, styleid, familyid FROM familystylemap "
"WHERE styleid = %s "
"AND familyid = %s "
"AND validfrom < %s "
"AND ((validto = '0000-00-00 00:00:00') OR (validto >= %s))")
now = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
cnx = self.connection_pool.get_connection()
cursor = cnx.cursor()
data = (style_id, family_id, now, now)
self.logger.debug("Getting mapping with query [%s] and data [%s]", query, data)
cursor.execute(query, data)
mapping_id = -1
for (c_id, c_styleid, c_familyid) in cursor:
if c_styleid == style_id and c_familyid == family_id:
mapping_id = c_id
cursor.close()
self.connection_pool.release_connection(cnx)
return mapping_id
def get_last_style_family_mapping_id(self):
style_query = "SELECT MAX(id) FROM familystylemap"
cnx = self.connection_pool.get_connection()
cursor = cnx.cursor()
cursor.execute(style_query)
ret_id = -1
for c_id in cursor:
if c_id is not None and c_id[0] is not None:
ret_id = int(c_id[0])
cursor.close()
self.connection_pool.release_connection(cnx)
return ret_id
def insert_style_family_mapping(self, style_id, family_id, source_id):
query = ("INSERT INTO familystylemap "
"(styleid, familyid, sourceid, validfrom) "
"VALUES (%s, %s, %s, %s)")
now = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
cnx = self.connection_pool.get_connection()
cursor = cnx.cursor()
data = (style_id, family_id, source_id, now)
cursor.execute(query, data)
cnx.commit()
mapping_id = int(cursor.lastrowid)
cursor.close()
self.connection_pool.release_connection(cnx)
return mapping_id
| mpl-2.0 |
artscoop/django-extensions | django_extensions/management/commands/export_emails.py | 28 | 5239 | from csv import writer
from optparse import make_option
from sys import stdout
import six
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand, CommandError
from django_extensions.compat import get_user_model
from django_extensions.management.utils import signalcommand
FORMATS = [
'address',
'emails',
'google',
'outlook',
'linkedin',
'vcard',
]
def full_name(first_name, last_name, username, **extra):
name = six.u(" ").join(n for n in [first_name, last_name] if n)
if not name:
return username
return name
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--group', '-g', action='store', dest='group', default=None,
help='Limit to users which are part of the supplied group name'),
make_option('--format', '-f', action='store', dest='format', default=FORMATS[0],
help="output format. May be one of '" + "', '".join(FORMATS) + "'."),
)
help = ("Export user email address list in one of a number of formats.")
args = "[output file]"
label = 'filename to save to'
can_import_settings = True
encoding = 'utf-8' # RED_FLAG: add as an option -DougN
@signalcommand
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("extra arguments supplied")
group = options['group']
if group and not Group.objects.filter(name=group).count() == 1:
names = six.u("', '").join(g['name'] for g in Group.objects.values('name')).encode('utf-8')
if names:
names = "'" + names + "'."
raise CommandError("Unknown group '" + group + "'. Valid group names are: " + names)
if len(args) and args[0] != '-':
outfile = open(args[0], 'w')
else:
outfile = stdout
User = get_user_model()
qs = User.objects.all().order_by('last_name', 'first_name', 'username', 'email')
if group:
qs = qs.filter(groups__name=group).distinct()
qs = qs.values('last_name', 'first_name', 'username', 'email')
getattr(self, options['format'])(qs, outfile)
def address(self, qs, out):
"""simple single entry per line in the format of:
"full name" <my@address.com>;
"""
out.write(six.u("\n").join('"%s" <%s>;' % (full_name(**ent), ent['email'])
for ent in qs).encode(self.encoding))
out.write("\n")
def emails(self, qs, out):
"""simpler single entry with email only in the format of:
my@address.com,
"""
out.write(six.u(",\n").join(ent['email'] for ent in qs).encode(self.encoding))
out.write("\n")
def google(self, qs, out):
"""CSV format suitable for importing into google GMail
"""
csvf = writer(out)
csvf.writerow(['Name', 'Email'])
for ent in qs:
csvf.writerow([full_name(**ent).encode(self.encoding),
ent['email'].encode(self.encoding)])
def outlook(self, qs, out):
"""CSV format suitable for importing into outlook
"""
csvf = writer(out)
columns = ['Name', 'E-mail Address', 'Notes', 'E-mail 2 Address', 'E-mail 3 Address',
'Mobile Phone', 'Pager', 'Company', 'Job Title', 'Home Phone', 'Home Phone 2',
'Home Fax', 'Home Address', 'Business Phone', 'Business Phone 2',
'Business Fax', 'Business Address', 'Other Phone', 'Other Fax', 'Other Address']
csvf.writerow(columns)
empty = [''] * (len(columns) - 2)
for ent in qs:
csvf.writerow([full_name(**ent).encode(self.encoding),
ent['email'].encode(self.encoding)] + empty)
def linkedin(self, qs, out):
"""CSV format suitable for importing into linkedin Groups.
perfect for pre-approving members of a linkedin group.
"""
csvf = writer(out)
csvf.writerow(['First Name', 'Last Name', 'Email'])
for ent in qs:
csvf.writerow([ent['first_name'].encode(self.encoding),
ent['last_name'].encode(self.encoding),
ent['email'].encode(self.encoding)])
def vcard(self, qs, out):
try:
import vobject
except ImportError:
print(self.style.ERROR("Please install python-vobject to use the vcard export format."))
import sys
sys.exit(1)
for ent in qs:
card = vobject.vCard()
card.add('fn').value = full_name(**ent)
if not ent['last_name'] and not ent['first_name']:
# fallback to fullname, if both first and lastname are not declared
card.add('n').value = vobject.vcard.Name(full_name(**ent))
else:
card.add('n').value = vobject.vcard.Name(ent['last_name'], ent['first_name'])
emailpart = card.add('email')
emailpart.value = ent['email']
emailpart.type_param = 'INTERNET'
out.write(card.serialize().encode(self.encoding))
| mit |
vitaly-krugl/nupic | examples/prediction/category_prediction/clean.py | 9 | 1148 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
def cleanup(filename):
if os.path.exists(filename):
os.remove(filename)
if __name__ == "__main__":
cleanup("tokens.txt")
cleanup("results.csv")
| agpl-3.0 |
TestInABox/openstackinabox | openstackinabox/tests/models/keystone/test_model.py | 2 | 32148 | import mock
import ddt
import six
from openstackinabox.tests.base import TestBase
from openstackinabox.models.keystone import exceptions
from openstackinabox.models.keystone.model import (
schema,
KeystoneModel
)
@ddt.ddt
class TestKeystoneModel(TestBase):
def setUp(self):
super(TestKeystoneModel, self).setUp(initialize=False)
self.model = KeystoneModel
self.db = self.master_model.database
def tearDown(self):
super(TestKeystoneModel, self).tearDown()
def test_initialize_db_schema(self):
db_cursor = mock.MagicMock()
db_execute = mock.MagicMock()
db_commit = mock.MagicMock()
db_instance = mock.MagicMock()
db_instance.cursor.return_value = db_cursor
db_instance.commit = db_commit
db_cursor.execute = db_execute
self.model.initialize_db_schema(db_instance)
self.assertTrue(db_instance.cursor.called)
self.assertTrue(db_execute.called)
self.assertTrue(db_commit.called)
self.assertEqual(db_execute.call_count, len(schema))
for s in schema:
db_execute.assert_any_call(s)
def test_get_child_models(self):
master = 'alpha'
db = 'omega'
child_models = self.model.get_child_models(master, db)
self.assertEqual(len(child_models), len(self.model.CHILD_MODELS))
def assert_has_instance(model_name, model_class):
for cm_name, cm_instance in six.iteritems(child_models):
if isinstance(cm_instance, model_class):
return
self.assertFalse(
True,
msg="instance of {0} ({1}) not in list".format(
model_name,
model_class
)
)
for child_model_name, child_model_type in six.iteritems(
self.model.CHILD_MODELS
):
assert_has_instance(child_model_name, child_model_type)
def test_initialization(self):
self.assertIsNone(self.master_model.roles.admin_role_id)
self.assertIsNone(self.master_model.roles.viewer_role_id)
self.assertIsNone(self.master_model.tenants.admin_tenant_id)
self.assertIsNone(self.master_model.users.admin_user_id)
self.assertIsNone(self.master_model.tokens.admin_token)
self.master_model.init_database()
self.assertIsNotNone(self.master_model.roles.admin_role_id)
self.assertIsNotNone(self.master_model.roles.viewer_role_id)
self.assertIsNotNone(self.master_model.tenants.admin_tenant_id)
self.assertIsNotNone(self.master_model.users.admin_user_id)
self.assertIsNotNone(self.master_model.tokens.admin_token)
token_data = self.master_model.tokens.get_by_user_id(
user_id=self.master_model.users.admin_user_id
)
self.assertEqual(
token_data['tenant_id'],
self.master_model.tenants.admin_tenant_id
)
self.assertEqual(
token_data['user_id'],
self.master_model.users.admin_user_id
)
self.assertEqual(
token_data['token'],
self.master_model.tokens.admin_token
)
self.assertFalse(token_data['revoked'])
def test_properties(self):
self.master_model.init_database()
self.assertEqual(
self.master_model.child_models['users'],
self.master_model.users
)
self.assertEqual(
self.master_model.child_models['tenants'],
self.master_model.tenants
)
self.assertEqual(
self.master_model.child_models['tokens'],
self.master_model.tokens
)
self.assertEqual(
self.master_model.child_models['roles'],
self.master_model.roles
)
self.assertEqual(
self.master_model.child_models['services'],
self.master_model.services
)
self.assertEqual(
self.master_model.child_models['endpoints'],
self.master_model.endpoints
)
@ddt.data(
0,
1
)
def test_validate_token_admin(self, extra_role_count):
self.master_model.init_database()
with self.assertRaises(exceptions.KeystoneInvalidTokenError):
self.master_model.validate_token_admin('foobar')
tenant_id = self.master_model.tenants.add(
tenant_name='foo',
description='bar',
enabled=True
)
user_id = self.master_model.users.add(
tenant_id=tenant_id,
username='bar',
email='foo@bar',
password='bar',
apikey='foo',
enabled=True
)
self.master_model.tokens.add(
tenant_id=tenant_id,
user_id=user_id,
token='foobar'
)
with self.assertRaises(exceptions.KeystoneInvalidTokenError):
self.master_model.validate_token_admin('foobar')
role_names = [
'role_{0}'.format(x)
for x in range(extra_role_count)
]
role_data = [
{
'name': role_name,
'id': self.master_model.roles.add(role_name)
}
for role_name in role_names
]
for role in role_data:
self.master_model.roles.add_user_role_by_id(
tenant_id=tenant_id,
user_id=user_id,
role_id=role['id']
)
self.master_model.roles.add_user_role_by_id(
tenant_id=tenant_id,
user_id=user_id,
role_id=self.master_model.roles.admin_role_id
)
validation_user_data = self.master_model.validate_token_admin('foobar')
self.assertEqual(validation_user_data['tenantid'], tenant_id)
self.assertEqual(validation_user_data['userid'], user_id)
self.assertEqual(validation_user_data['token'], 'foobar')
def test_validate_token_service_admin(self):
self.master_model.init_database()
tenant_id = self.master_model.tenants.add(
tenant_name='foo',
description='bar',
enabled=True
)
user_id = self.master_model.users.add(
tenant_id=tenant_id,
username='bar',
email='foo@bar',
password='bar',
apikey='foo',
enabled=True
)
self.master_model.tokens.add(
tenant_id=tenant_id,
user_id=user_id,
token='foobar'
)
self.master_model.roles.add_user_role_by_id(
tenant_id=tenant_id,
user_id=user_id,
role_id=self.master_model.roles.admin_role_id
)
with self.assertRaises(exceptions.KeystoneInvalidTokenError):
self.master_model.validate_token_service_admin('foobar')
user_data = self.master_model.validate_token_service_admin(
self.master_model.tokens.admin_token
)
self.assertEqual(
user_data['tenantid'],
self.master_model.tenants.admin_tenant_id
)
self.assertEqual(
user_data['userid'],
self.master_model.users.admin_user_id
)
self.assertEqual(
user_data['token'],
self.master_model.tokens.admin_token
)
@ddt.ddt
class TestKeystoneModelServiceCatalog(TestBase):
def setUp(self):
super(TestKeystoneModelServiceCatalog, self).setUp(initialize=False)
self.model = KeystoneModel
self.db = self.master_model.database
self.master_model.init_database()
self.token = 'f1gur3f0ll0w$f4$h10n'
self.tenant_info = {
'name': 'foo',
'description': 'bar',
'enabled': True
}
self.user_info = {
'username': 'bar',
'email': 'foo@bar',
'password': 'b4R',
'apikey': 'foo',
'enabled': True
}
self.tenant_id = self.master_model.tenants.add(
tenant_name=self.tenant_info['name'],
description=self.tenant_info['description'],
enabled=self.tenant_info['enabled']
)
self.user_id = self.master_model.users.add(
tenant_id=self.tenant_id,
username=self.user_info['username'],
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=self.user_info['enabled']
)
self.master_model.tokens.add(
tenant_id=self.tenant_id,
user_id=self.user_id,
token=self.token
)
self.user_data = self.master_model.users.get_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id
)
self.token_data = self.master_model.tokens.validate_token(
self.token
)
def tearDown(self):
super(TestKeystoneModelServiceCatalog, self).tearDown()
def generate_roles(self, role_count):
role_names = [
'role_{0}'.format(x)
for x in range(role_count)
]
role_data = [
{
'name': role_name,
'id': self.master_model.roles.add(role_name)
}
for role_name in role_names
]
for role in role_data:
self.master_model.roles.add_user_role_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
role_id=role['id']
)
return (
role_names,
role_data,
)
def generate_services(
self, service_count, endpoint_count, endpoint_url_count,
has_region=True, has_version_info=True, has_version_list=True,
has_version_id=True
):
services = {
'service_{0}'.format(sn): {
'description': 'test service {0}'.format(sn),
'endpoints': [
{
'name': 'endpoint_{0}'.format(epn),
'region': 'r{0}'.format(epn) if has_region else None,
'version_info': (
'version.info' if has_version_info else None
),
'version_list': (
'version.list' if has_version_list else None
),
'version_id': str(epn) if has_version_id else None,
'urls': [
{
'name': 'url_{0}'.format(urln),
'url': 'ur.l/{0}'.format(urln)
}
for urln in range(endpoint_url_count)
]
}
for epn in range(endpoint_count)
]
}
for sn in range(service_count)
}
for service_name, service_info in six.iteritems(services):
services[service_name]['id'] = self.master_model.services.add(
service_name,
service_info['description']
)
for endpoint_info in service_info['endpoints']:
endpoint_info['id'] = self.master_model.endpoints.add(
services[service_name]['id'],
endpoint_info['region'],
endpoint_info['version_info'],
endpoint_info['version_list'],
endpoint_info['version_id']
)
for endpoint_url_info in endpoint_info['urls']:
endpoint_url_info['id'] = (
self.master_model.endpoints.add_url(
endpoint_info['id'],
endpoint_url_info['name'],
endpoint_url_info['url']
)
)
return services
def check_service_catalog_auth_section(self, auth_entry):
self.assertEqual(auth_entry['id'], self.token_data['token'])
self.assertEqual(auth_entry['expires'], self.token_data['expires'])
self.assertEqual(auth_entry['tenant']['id'], self.tenant_id)
self.assertEqual(
auth_entry['tenant']['name'], self.user_data['username']
)
def check_service_catalog_user_entry(
self, role_count, role_names, role_data, user_entry
):
self.assertEqual(user_entry['id'], self.user_id)
self.assertEqual(user_entry['name'], self.user_data['username'])
self.assertEqual(len(user_entry['roles']), role_count)
def assertRoleInList(role_id, role_name):
for role in role_data:
if role['id'] == role_id and role['name'] == role_name:
# found it
return
# failed to find it, so assert
self.assertFalse(
True,
msg=(
'Unable to find role ({0} - {1}) in role_data'.format(
role_id,
role_name
)
)
)
for role_entry in user_entry['roles']:
assertRoleInList(
role_entry['id'],
role_entry['name']
)
def check_service_catalog_services(self, services, services_entries):
self.assertEqual(len(services), len(services_entries))
for service_info in services_entries:
self.assertIn(service_info['name'], services)
self.assertEqual(
service_info['type'],
services[service_info['name']]['description']
)
self.assertEqual(
len(service_info['endpoints']),
len(services[service_info['name']]['endpoints'])
)
for endpoint_info in service_info['endpoints']:
found_endpoint = False
for endpoint_data in (
services[service_info['name']]['endpoints']
):
if (
endpoint_info['region'] == endpoint_data['region'] and
endpoint_info['versionId'] == endpoint_data[
'version_id'] and
endpoint_info['versionList'] == endpoint_data[
'version_list'] and
endpoint_info['versionInfo'] == endpoint_data[
'version_info']
):
for url_data in endpoint_data['urls']:
self.assertIn(url_data['name'], endpoint_info)
self.assertEqual(
url_data['url'],
endpoint_info[url_data['name']]
)
found_endpoint = True
self.assertTrue(
found_endpoint,
msg=(
"Unable to find endpoint data: {0}, {1}".format(
endpoint_data,
endpoint_info
)
)
)
def check_service_catalog(
self, role_count, role_names, role_data, services, service_catalog,
):
self.check_service_catalog_auth_section(
service_catalog['token']
)
self.check_service_catalog_user_entry(
role_count, role_names, role_data, service_catalog['user']
)
self.check_service_catalog_services(
services, service_catalog['serviceCatalog']
)
def test_service_catalog_auth_entry(self):
self.assertEqual(self.token_data['token'], self.token)
auth_entry = self.master_model.get_auth_token_entry(
self.token_data,
self.user_data
)
self.check_service_catalog_auth_section(auth_entry)
@ddt.data(
0,
1,
10,
20
)
def test_service_catalog_user_entry(self, role_count):
role_names, role_data = self.generate_roles(role_count)
user_entry = self.master_model.get_auth_user_entry(
self.user_data
)
self.check_service_catalog_user_entry(
role_count, role_names, role_data, user_entry
)
@ddt.data(
(0, 0, 0, True, True, True, True),
(1, 0, 0, True, True, True, True),
(1, 1, 0, True, True, True, True),
(5, 4, 0, True, True, True, True),
(5, 10, 0, True, True, True, True),
(0, 0, 1, True, True, True, True),
(1, 0, 1, True, True, True, True),
(1, 1, 1, True, True, True, True),
(5, 4, 1, True, True, True, True),
(5, 10, 1, True, True, True, True),
(1, 3, 2, False, True, True, True),
(1, 3, 2, True, False, True, True),
(1, 3, 2, True, True, False, True),
# TODO: Fix the below test cases
# (1, 3, 2, True, True, True, False),
# (1, 3, 2, False, False, False, False),
)
@ddt.unpack
def test_service_catalog_services_entry(
self, service_count, endpoint_count, endpoint_url_count,
has_region, has_version_info, has_version_list, has_version_id
):
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count,
has_region, has_version_info, has_version_list, has_version_id
)
services_entries = self.master_model.get_auth_service_catalog(
self.user_data
)
self.check_service_catalog_services(services, services_entries)
@ddt.data(
(0, 0, 0, 0),
(1, 1, 1, 1),
(5, 10, 3, 4),
(2, 20, 15, 10)
)
@ddt.unpack
def test_service_catalog_services_entry_2(
self, role_count, service_count, endpoint_count, endpoint_url_count
):
role_names, role_data = self.generate_roles(role_count)
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count
)
service_catalog = self.master_model.get_service_catalog(
self.token_data,
self.user_data
)
self.check_service_catalog(
role_count, role_names, role_data, services, service_catalog
)
def test_password_auth_failures(self):
with self.assertRaises(exceptions.KeystoneUserError):
password_data = {
'username': '43failme',
'password': self.user_info['password']
}
self.master_model.password_authenticate(
password_data
)
with self.assertRaises(exceptions.KeystoneUserError):
password_data = {
'username': self.user_info['username'],
'password': '$$$$'
}
self.master_model.password_authenticate(
password_data
)
with self.assertRaises(exceptions.KeystoneUserInvalidPasswordError):
password_data = {
'username': self.user_info['username'],
'password': self.user_info['password'] + 'a'
}
self.master_model.password_authenticate(
password_data
)
with self.assertRaises(exceptions.KeystoneUnknownUserError):
password_data = {
'username': self.user_info['username'] + 'a',
'password': self.user_info['password']
}
self.master_model.password_authenticate(
password_data
)
with self.assertRaises(exceptions.KeystoneUserInvalidPasswordError):
password_data = {
'username': self.user_info['username'],
'password': self.user_info['password'] + 'a'
}
self.master_model.password_authenticate(
password_data
)
with self.assertRaises(exceptions.KeystoneDisabledUserError):
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=False
)
password_data = {
'username': self.user_info['username'],
'password': self.user_info['password']
}
self.master_model.password_authenticate(
password_data
)
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=True
)
@ddt.data(
(0, 0, 0, 0),
(1, 1, 1, 1),
(5, 10, 3, 4),
(2, 20, 15, 10)
)
@ddt.unpack
def test_password_auth(
self, role_count, service_count, endpoint_count, endpoint_url_count
):
role_names, role_data = self.generate_roles(role_count)
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count
)
password_data = {
'username': self.user_info['username'],
'password': self.user_info['password']
}
service_catalog = self.master_model.password_authenticate(
password_data
)
self.check_service_catalog(
role_count, role_names, role_data, services, service_catalog
)
def test_apikey_auth_failures(self):
with self.assertRaises(exceptions.KeystoneUserError):
apikey_data = {
'username': '43failme',
'apiKey': self.user_info['password']
}
self.master_model.apikey_authenticate(
apikey_data
)
with self.assertRaises(exceptions.KeystoneUserError):
apikey_data = {
'username': self.user_info['username'],
'apiKey': 9392
}
self.master_model.apikey_authenticate(
apikey_data
)
with self.assertRaises(exceptions.KeystoneUserInvalidApiKeyError):
apikey_data = {
'username': self.user_info['username'],
'apiKey': self.user_info['apikey'] + 'a'
}
self.master_model.apikey_authenticate(
apikey_data
)
with self.assertRaises(exceptions.KeystoneUnknownUserError):
apikey_data = {
'username': self.user_info['username'] + 'a',
'apiKey': self.user_info['apikey']
}
self.master_model.apikey_authenticate(
apikey_data
)
with self.assertRaises(exceptions.KeystoneUserInvalidApiKeyError):
apikey_data = {
'username': self.user_info['username'],
'apiKey': self.user_info['apikey'] + 'a'
}
self.master_model.apikey_authenticate(
apikey_data
)
with self.assertRaises(exceptions.KeystoneDisabledUserError):
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=False
)
apikey_data = {
'username': self.user_info['username'],
'apiKey': self.user_info['apikey']
}
self.master_model.apikey_authenticate(
apikey_data
)
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=True
)
@ddt.data(
(0, 0, 0, 0),
(1, 1, 1, 1),
(5, 10, 3, 4),
(2, 20, 15, 10)
)
@ddt.unpack
def test_apikey_auth(
self, role_count, service_count, endpoint_count, endpoint_url_count
):
role_names, role_data = self.generate_roles(role_count)
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count
)
apikey_data = {
'username': self.user_info['username'],
'apiKey': self.user_info['apikey']
}
service_catalog = self.master_model.apikey_authenticate(
apikey_data
)
self.check_service_catalog(
role_count, role_names, role_data, services, service_catalog
)
def test_tenant_id_token_auth_failures(self):
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantId': self.tenant_id,
'token': {
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantId': self.tenant_id
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantId': 'aphrodite',
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneInvalidTokenError):
token_data = {
'tenantId': self.tenant_id,
'token': {
'id': self.token + 'a'
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneTenantError):
token_data = {
'tenantId': 93920395,
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneTenantError):
self.master_model.tenants.update_status(
tenant_id=self.tenant_id,
enabled=False
)
token_data = {
'tenantId': self.tenant_id,
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
self.master_model.tenants.update_status(
tenant_id=self.tenant_id,
enabled=True
)
new_tenant_id = self.master_model.tenants.add(
tenant_name='krash-kourse',
description='breaking things',
)
new_user_id = self.master_model.users.add(
tenant_id=new_tenant_id,
username='krispy',
email='kri@spy',
password='$py',
apikey='kryme',
enabled=True
)
with self.assertRaises(exceptions.KeystoneUnknownUserError):
token_data = {
'tenantId': new_tenant_id,
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUnknownUserError):
self.master_model.tokens.add(
tenant_id=new_tenant_id,
user_id=new_user_id
)
token_data = {
'tenantId': new_tenant_id,
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneDisabledUserError):
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=False
)
token_data = {
'tenantId': self.tenant_id,
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=True
)
@ddt.data(
(0, 0, 0, 0),
(1, 1, 1, 1),
(5, 10, 3, 4),
(2, 20, 15, 10)
)
@ddt.unpack
def test_tenant_id_token_auth(
self, role_count, service_count, endpoint_count, endpoint_url_count
):
role_names, role_data = self.generate_roles(role_count)
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count
)
token_data = {
'tenantId': self.tenant_id,
'token': {
'id': self.token
}
}
service_catalog = self.master_model.tenant_id_token_auth(
token_data
)
self.check_service_catalog(
role_count, role_names, role_data, services, service_catalog
)
def test_tenant_name_token_auth_failures(self):
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantName': self.tenant_info['name'],
'token': {
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantName': self.tenant_info['name']
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantName': self.tenant_info['name'] + 'x',
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
@ddt.data(
(0, 0, 0, 0),
(1, 1, 1, 1),
(5, 10, 3, 4),
(2, 20, 15, 10)
)
@ddt.unpack
def test_tenant_name_token_auth(
self, role_count, service_count, endpoint_count, endpoint_url_count
):
role_names, role_data = self.generate_roles(role_count)
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count
)
token_data = {
'tenantName': self.tenant_info['name'],
'token': {
'id': self.token
}
}
service_catalog = self.master_model.tenant_name_token_auth(
token_data
)
self.check_service_catalog(
role_count, role_names, role_data, services, service_catalog
)
| apache-2.0 |
byllyfish/pylibofp | zof/logging.py | 1 | 2698 | import logging.config
import os
import warnings
import sys
import traceback
EXT_STDERR = 'ext://stderr'
DEFAULT_FORMATTER = logging.Formatter(
'%(asctime)s.%(msecs)03d %(name)-6s %(levelname)-8s %(message)s',
'%b %d %H:%M:%S')
STDERR_HANDLER = logging.StreamHandler(sys.stderr)
STDERR_HANDLER.setFormatter(DEFAULT_FORMATTER)
_LOGGING_INITED = False
def init_logging(loglevel, logfile=EXT_STDERR):
"""Set up logging.
This method attaches log handlers to the root logger:
- a stderr handler (only if the root logger has no other handlers)
- a file handler if specified
When a logfile is specified, the stderr handler will only log critical
events.
This routine enables asyncio debug mode if `loglevel` is 'debug'.
"""
global _LOGGING_INITED # pylint: disable=global-statement
if _LOGGING_INITED:
# Make sure we only initialize logging once.
set_loglevel(loglevel)
return
_LOGGING_INITED = True
_make_default_handlers(logfile)
set_loglevel(loglevel)
logging.captureWarnings(True)
warnings.simplefilter('always')
if loglevel.lower() == 'debug':
# Make sure that asyncio debugging is enabled.
os.environ['PYTHONASYNCIODEBUG'] = '1'
# Log stack traces with all warnings.
_formatwarning = warnings.formatwarning
def _formatwarning_tb(*args, **kwargs):
out = _formatwarning(*args, **kwargs)
out += ''.join(traceback.format_stack()[:-1])
return out
warnings.formatwarning = _formatwarning_tb
else:
# When loglevel > debug, ignore ImportWarning's from uvloop.
warnings.filterwarnings("ignore", category=ImportWarning)
asyncio_logger = logging.getLogger('asyncio')
asyncio_logger.setLevel('WARNING')
def set_loglevel(loglevel):
"""Change current log level.
"""
ofp_logger = logging.getLogger(__package__)
ofp_logger.setLevel(loglevel.upper())
def _make_default_handlers(logfile):
"""Prepare the default loggers.
"""
root_logger = logging.getLogger()
if not root_logger.hasHandlers():
root_logger.addHandler(STDERR_HANDLER)
if logfile and logfile != EXT_STDERR:
logfile_handler = _make_logfile_handler(logfile)
logfile_handler.setFormatter(DEFAULT_FORMATTER)
root_logger.addHandler(logfile_handler)
# When there is a log file, only log critical events to stderr.
STDERR_HANDLER.setLevel('CRITICAL')
def _make_logfile_handler(logfile):
"""Return log file handler."""
return logging.handlers.RotatingFileHandler(
logfile, maxBytes=2**20, backupCount=10, encoding='utf8')
| mit |
Barmaley-exe/scikit-learn | sklearn/ensemble/partial_dependence.py | 36 | 14909 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
MotoG3/android_kernel_motorola_msm8916 | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
andymckay/addons-server | src/olympia/api/tests/test_models.py | 9 | 2249 | import mock
from olympia.amo.tests import TestCase
from olympia.users.models import UserProfile
from ..models import APIKey, SYMMETRIC_JWT_TYPE
class TestAPIKey(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestAPIKey, self).setUp()
self.user = UserProfile.objects.get(email='del@icio.us')
def test_new_jwt_credentials(self):
credentials = APIKey.new_jwt_credentials(self.user)
assert credentials.user == self.user
assert credentials.type == SYMMETRIC_JWT_TYPE
assert credentials.key
assert credentials.secret
assert credentials.is_active
def test_string_representation(self):
credentials = APIKey.new_jwt_credentials(self.user)
str_creds = str(credentials)
assert credentials.key in str_creds
assert credentials.secret not in str_creds
assert str(credentials.user) in str_creds
def test_generate_new_unique_keys(self):
last_key = None
for counter in range(3):
credentials = APIKey.new_jwt_credentials(self.user)
assert credentials.key != last_key
last_key = credentials.key
def test_too_many_tries_at_finding_a_unique_key(self):
max = 3
# Make APIKey.objects.filter().exists() always return True.
patch = mock.patch('olympia.api.models.APIKey.objects.filter')
mock_filter = patch.start()
self.addCleanup(patch.stop)
mock_filter.return_value.exists.return_value = True
with self.assertRaises(RuntimeError):
for counter in range(max + 1):
APIKey.get_unique_key('key-prefix-', max_tries=max)
def test_generate_secret(self):
assert APIKey.generate_secret(32) # check for exceptions
def test_generated_secret_must_be_long_enough(self):
with self.assertRaises(ValueError):
APIKey.generate_secret(31)
def test_hide_inactive_jwt_keys(self):
active_key = APIKey.new_jwt_credentials(self.user)
inactive_key = APIKey.new_jwt_credentials(self.user)
inactive_key.update(is_active=False)
fetched_key = APIKey.get_jwt_key(user=self.user)
assert fetched_key == active_key
| bsd-3-clause |
infowantstobeseen/pyglet-darwincore | contrib/scene2d/tests/scene2d/HEX_FLAT_MOUSE.py | 29 | 1990 | #!/usr/bin/env python
'''Testing mouse interaction
The cell the mouse is hovering over should highlight in red.
Clicking in a cell should highliht that cell green. Clicking again will
clear the highlighting.
Clicking on the ball sprite should highlight it and not underlying cells.
You may press the arrow keys to scroll the focus around the map (this
will move the map eventually)
Press escape or close the window to finish the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import unittest
from render_base import RenderBase
from scene2d import Tile, Sprite
from pyglet.event import event
from scene2d.event import for_cells, for_sprites
from scene2d.image import TintEffect
from scene2d.debug import gen_hex_map
class RectFlatMouseTest(RenderBase):
def test_main(self):
self.init_window(256, 256)
self.set_map(gen_hex_map([[{}]*10]*10, 32))
self.w.push_handlers(self.view)
self.view.allow_oob = False
@event(self.view)
@for_cells()
def on_mouse_enter(cells):
for cell in cells:
e = TintEffect((1, .5, .5, 1))
cell.properties['hover'] = e
cell.add_effect(e)
@event(self.view)
@for_cells()
def on_mouse_leave(cells):
for cell in cells:
cell.remove_effect(cell.properties['hover'])
@event(self.view)
@for_cells()
@for_sprites()
def on_mouse_press(objs, x, y, button, modifiers):
for obj in objs:
if 'clicked' in obj.properties:
obj.remove_effect(obj.properties['clicked'])
del obj.properties['clicked']
else:
e = TintEffect((.5, 1, .5, 1))
obj.properties['clicked'] = e
obj.add_effect(e)
return
self.show_focus()
self.run_test()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
CPqD/RouteFlow | pox/pox/lib/util.py | 21 | 12304 | # Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Various utility functions
"""
from __future__ import print_function
import traceback
import struct
import sys
import os
import time
import socket
#FIXME: ugh, why can't I make importing pox.core work here?
import logging
log = logging.getLogger("util")
class DirtyList (list):
#TODO: right now the callback may be called more often than needed
# and it may not be called with good names/parameters.
# All you can really rely on is that it will be called in
# some way if something may have changed.
def __init__ (self, *args, **kw):
list.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def __setslice__ (self, k, v):
#TODO: actually check for change
self._smudge('__setslice__', k, v)
list.__setslice__(self, k, v)
def __delslice__ (self, k):
#TODO: actually check for change
self._smudge('__delslice__', k, None)
list.__delslice__(self, k)
def append (self, v):
self._smudge('append', None, v)
list.append(self, v)
def extend (self, v):
self._smudge('extend', None, v)
list.extend(self, v)
def insert (self, i, v):
self._smudge('insert', k, v)
list.extend(self, v)
def pop (self, i=-1):
self._smudge('pop', i, None)
list.pop(self, i)
def remove (self, v):
if v in self:
self._smudge('remove', None, v)
list.remove(self, v)
def reverse (self):
if len(self):
self._smudge('reverse', None, None)
list.reverse(self)
def sort (self, *arg, **kw):
#TODO: check for changes?
self._smudge('sort', None, None)
list.sort(self, *arg, **kw)
def __setitem__ (self, k, v):
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__setitem__slice',k,v)
elif self[k] != v:
self._smudge('__setitem__',k,v)
list.__setitem__(self, k, v)
assert good
def __delitem__ (self, k):
list.__delitem__(self, k)
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__delitem__slice',k,v)
else:
self._smudge('__delitem__', k, None)
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
class DirtyDict (dict):
"""
A dict that tracks whether values have been changed shallowly.
If you set a callback, it will be called when the value changes, and
passed three values: "add"/"modify"/"delete", key, value
"""
def __init__ (self, *args, **kw):
dict.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
def __setitem__ (self, k, v):
if k not in self:
self._smudge('__setitem__add',k,v)
elif self[k] != v:
self._smudge('__setitem__modify',k,v)
dict.__setitem__(self, k, v)
def __delitem__ (self, k):
self._smudge('__delitem__', k, None)
dict.__delitem__(self, k)
def set_extend (l, index, item, emptyValue = None):
"""
Adds item to the list l at position index. If index is beyond the end
of the list, it will pad the list out until it's large enough, using
emptyValue for the new entries.
"""
if index >= len(l):
l += ([emptyValue] * (index - len(self) + 1))
l[index] = item
def str_to_dpid (s):
"""
Convert a DPID in the canonical string form into a long int.
"""
if s.lower().startswith("0x"):
s = s[2:]
s = s.replace("-", "").split("|", 2)
a = int(s[0], 16)
if a > 0xffFFffFFffFF:
b = a >> 48
a &= 0xffFFffFFffFF
else:
b = 0
if len(s) == 2:
b = int(s[1])
return a | (b << 48)
strToDPID = str_to_dpid
def dpid_to_str (dpid, alwaysLong = False):
"""
Convert a DPID from a long into into the canonical string form.
"""
if type(dpid) is long or type(dpid) is int:
# Not sure if this is right
dpid = struct.pack('!Q', dpid)
assert len(dpid) == 8
r = '-'.join(['%02x' % (ord(x),) for x in dpid[2:]])
if alwaysLong or dpid[0:2] != (b'\x00'*2):
r += '|' + str(struct.unpack('!H', dpid[0:2])[0])
return r
dpidToStr = dpid_to_str # Deprecated
def assert_type(name, obj, types, none_ok=True):
"""
Assert that a parameter is of a given type.
Raise an Assertion Error with a descriptive error msg if not.
name: name of the parameter for error messages
obj: parameter value to be checked
types: type or list or tuple of types that is acceptable
none_ok: whether 'None' is an ok value
"""
if obj is None:
if none_ok:
return True
else:
raise AssertionError("%s may not be None" % name)
if not isinstance(types, (tuple, list)):
types = [ types ]
for cls in types:
if isinstance(obj, cls):
return True
allowed_types = "|".join(map(lambda x: str(x), types))
stack = traceback.extract_stack()
stack_msg = "Function call %s() in %s:%d" % (stack[-2][2],
stack[-3][0], stack[-3][1])
type_msg = ("%s must be instance of %s (but is %s)"
% (name, allowed_types , str(type(obj))))
raise AssertionError(stack_msg + ": " + type_msg)
def initHelper (obj, kw):
"""
Inside a class's __init__, this will copy keyword arguments to fields
of the same name. See libopenflow for an example.
"""
for k,v in kw.iteritems():
if not hasattr(obj, k):
raise TypeError(obj.__class__.__name__ + " constructor got "
+ "unexpected keyword argument '" + k + "'")
setattr(obj, k, v)
def makePinger ():
"""
A pinger is basically a thing to let you wake a select().
On Unix systems, this makes a pipe pair. But on Windows, select() only
works with sockets, so it makes a pair of connected sockets.
"""
class PipePinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
assert os is not None
def ping (self):
if os is None: return #TODO: Is there a better fix for this?
os.write(self._w, ' ')
def fileno (self):
return self._r
def pongAll (self):
#TODO: make this actually read all
os.read(self._r, 1024)
def pong (self):
os.read(self._r, 1)
def __del__ (self):
try:
os.close(self._w)
except:
pass
try:
os.close(self._r)
except:
pass
def __repr__ (self):
return "<%s %i/%i>" % (self.__class__.__name__, self._w, self._r)
class SocketPinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
def ping (self):
self._w.send(' ')
def pong (self):
self._r.recv(1)
def pongAll (self):
#TODO: make this actually read all
self._r.recv(1024)
def fileno (self):
return self._r.fileno()
def __repr__ (self):
return "<%s %s/%s>" % (self.__class__.__name__, self._w, self._r)
#return PipePinger((os.pipe()[0],os.pipe()[1])) # To test failure case
if os.name == "posix":
return PipePinger(os.pipe())
#TODO: clean up sockets?
localaddress = '127.127.127.127'
startPort = 10000
import socket
import select
def tryConnect ():
l = socket.socket()
l.setblocking(0)
port = startPort
while True:
try:
l.bind( (localaddress, port) )
break
except:
port += 1
if port - startPort > 1000:
raise RuntimeError("Could not find a free socket")
l.listen(0)
r = socket.socket()
try:
r.connect((localaddress, port))
except:
import traceback
ei = sys.exc_info()
ei = traceback.format_exception_only(ei[0], ei[1])
ei = ''.join(ei).strip()
log.warning("makePinger: connect exception:\n" + ei)
return False
rlist, wlist,elist = select.select([l], [], [l], 2)
if len(elist):
log.warning("makePinger: socket error in select()")
return False
if len(rlist) == 0:
log.warning("makePinger: socket didn't connect")
return False
try:
w, addr = l.accept()
except:
return False
#w.setblocking(0)
if addr != r.getsockname():
log.info("makePinger: pair didn't connect to each other!")
return False
r.setblocking(1)
# Turn off Nagle
r.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return (r, w)
# Try a few times
for i in range(0, 3):
result = tryConnect()
if result is not False:
return SocketPinger(result)
raise RuntimeError("Could not allocate a local socket pair")
def str_to_bool (s):
"""
Given a string, parses out whether it is meant to be True or not
"""
s = str(s).lower() # Make sure
if s in ['true', 't', 'yes', 'y', 'on', 'enable', 'enabled', 'ok',
'okay', '1', 'allow', 'allowed']:
return True
try:
r = 10
if s.startswith("0x"):
s = s[2:]
r = 16
i = int(s, r)
if i != 0:
return True
except:
pass
return False
def hexdump (data):
if isinstance(data, str):
data = [ord(c) for c in data]
o = ""
def chunks (data, length):
return (data[i:i+length] for i in xrange(0, len(data), length))
def filt (c):
if c >= 32 and c <= 126: return chr(c)
return '.'
for i,chunk in enumerate(chunks(data,16)):
if i > 0: o += "\n"
o += "%04x: " % (i * 16,)
l = ' '.join("%02x" % (c,) for c in chunk)
l = "%-48s" % (l,)
l = l[:3*8-1] + " " + l[3*8:]
t = ''.join([filt(x) for x in chunk])
l += ' %-16s' % (t,)
o += l
return o
def connect_socket_with_backoff (address, port, max_backoff_seconds=32):
'''
Connect to the given address and port. If the connection attempt fails,
exponentially back off, up to the max backoff
return the connected socket, or raise an exception if the connection
was unsuccessful
'''
backoff_seconds = 1
sock = None
print("connect_socket_with_backoff(address=%s, port=%d)"
% (address, port), file=sys.stderr)
while True:
try:
sock = socket.socket()
sock.connect( (address, port) )
break
except socket.error as e:
print("%s. Backing off %d seconds ..." % (str(e), backoff_seconds),
file=sys.stderr)
if backoff_seconds >= max_backoff_seconds:
raise RuntimeError("Could not connect to controller %s:%d"
% (address, port))
else:
time.sleep(backoff_seconds)
backoff_seconds <<= 1
return sock
_scalar_types = (int, long, basestring, float, bool)
def is_scalar (v):
return isinstance(v, _scalar_types)
def fields_of (obj, primitives_only=False,
primitives_and_composites_only=False, allow_caps=False):
"""
Returns key/value pairs of things that seem like public fields of an object.
"""
#NOTE: The above docstring isn't split into two lines on purpose.
r = {}
for k in dir(obj):
if k.startswith('_'): continue
v = getattr(obj, k)
if hasattr(v, '__call__'): continue
if not allow_caps and k.upper() == k: continue
if primitives_only:
if not isinstance(v, _scalar_types):
continue
elif primitives_and_composites_only:
if not isinstance(v, (int, long, basestring, float, bool, set,
dict, list)):
continue
#r.append((k,v))
r[k] = v
return r
if __name__ == "__main__":
#TODO: move to tests?
def cb (t,k,v): print(v)
l = DirtyList([10,20,30,40,50])
l.callback = cb
l.append(3)
print(l)
| apache-2.0 |
donvel/rep | comm/migrations/0026_auto_20150226_1439.py | 1 | 1194 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('comm', '0025_auto_20150224_1330'),
]
operations = [
migrations.AlterModelOptions(
name='branch',
options={'ordering': ['name'], 'verbose_name': 'filia', 'verbose_name_plural': 'filie'},
),
migrations.AlterModelOptions(
name='customtextconfig',
options={'ordering': ['-display_priority'], 'verbose_name': 'typ pola', 'verbose_name_plural': 'typy p\xf3l'},
),
migrations.AlterModelOptions(
name='diocese',
options={'ordering': ['name'], 'verbose_name': 'diecezja', 'verbose_name_plural': 'diecezje'},
),
migrations.AlterField(
model_name='service',
name='full_name',
field=models.CharField(unique=True, max_length=100, verbose_name='pe\u0142na nazwa'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='customtext',
unique_together=set([('report', 'config')]),
),
]
| mit |
veronicagg/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/AzureSpecials/setup.py | 14 | 1166 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestazurespecialparameterstestclient"
VERSION = "2015-07-01-preview"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrestazure>=0.4.7"]
setup(
name=NAME,
version=VERSION,
description="AutoRestAzureSpecialParametersTestClient",
author_email="",
url="",
keywords=["Swagger", "AutoRestAzureSpecialParametersTestClient"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest
"""
)
| mit |
USC-ICT/gift-integration-demo | GiftDemo/Assets/StreamingAssets/SB/locomotion-ChrBrad-state-StartingLeft.py | 1 | 2156 | # state ChrMarineStartingLeft
# autogenerated by SmartBody
stateManager = scene.getStateManager()
stateChrMarineStartingLeft = stateManager.getState("ChrMarineStartingLeft")
if (stateChrMarineStartingLeft is None):
stateChrMarineStartingLeft = stateManager.createState1D("ChrMarineStartingLeft")
stateChrMarineStartingLeft.setBlendSkeleton("ChrBrad.sk")
motions = StringVec()
motions.append("ChrBrad_ChrMarine@Idle01_ToWalk01")
motions.append("ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Lf01")
motions.append("ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Lf01")
paramsX = DoubleVec()
paramsX.append(0) # ChrBrad_ChrMarine@Idle01 X
paramsX.append(-90) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Lf01 X
paramsX.append(-180) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Lf01 X
for i in range(0, len(motions)):
stateChrMarineStartingLeft.addMotion(motions[i], paramsX[i])
points0 = DoubleVec()
points0.append(0) # ChrBrad_ChrMarine@Idle01 0
points0.append(0) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Rt01 0
points0.append(0) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Rt01 0
stateChrMarineStartingLeft.addCorrespondencePoints(motions, points0)
points1 = DoubleVec()
points1.append(0.73) # ChrBrad_ChrMarine@Idle01 1
points1.append(1.42) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Rt01 1
points1.append(1.37) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Rt01 1
stateChrMarineStartingLeft.addCorrespondencePoints(motions, points1)
points2 = DoubleVec()
points2.append(1.32) # ChrBrad_ChrMarine@Idle01 2
points2.append(2.08) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Rt01 2
points2.append(1.94) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Rt01 2
stateChrMarineStartingLeft.addCorrespondencePoints(motions, points2)
points3 = DoubleVec()
points3.append(1.56) # ChrBrad_ChrMarine@Idle01 2
points3.append(2.43) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Rt01 2
points3.append(2.46) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Rt01 2
stateChrMarineStartingLeft.addCorrespondencePoints(motions, points3)
| bsd-3-clause |
thc202/zaproxy | docker/zap-api-scan.py | 6 | 25656 | #!/usr/bin/env python
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2017 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs a full scan against an API defined by OpenAPI/Swagger, SOAP
# or GraphQL using ZAP.
#
# It can either be run 'standalone', in which case depends on
# https://pypi.python.org/pypi/python-owasp-zap-v2.4 and Docker, or it can be run
# inside one of the ZAP docker containers. It automatically detects if it is
# running in docker so the parameters are the same.
#
# It currently support APIS defined by:
# OpenAPI/Swagger URL
# OpenAPI/Swagger file
# SOAP URL
# SOAP File
# GraphQL URL
# GraphQL File
# It will exit with codes of:
# 0: Success
# 1: At least 1 FAIL
# 2: At least one WARN and no FAILs
# 3: Any other failure
# By default all alerts found by ZAP will be treated as WARNings.
# You can use the -c or -u parameters to specify a configuration file to override
# this.
# You can generate a template configuration file using the -g parameter. You will
# then need to change 'WARN' to 'FAIL', 'INFO' or 'IGNORE' for the rules you want
# to be handled differently.
# You can also add your own messages for the rules by appending them after a tab
# at the end of each line.
# By default the active scan rules run are hardcoded in the API-Minimal.policy
# file but you can change them by supplying a configuration file with the rules
# you dont want to be run set to IGNORE.
import getopt
import json
import logging
import os
import os.path
import subprocess
import sys
import time
from datetime import datetime
from six.moves.urllib.parse import urljoin
from zapv2 import ZAPv2
from zap_common import *
class NoUrlsException(Exception):
pass
config_dict = {}
config_msg = {}
out_of_scope_dict = {}
min_level = 0
# Scan rules that aren't really relevant, e.g. the examples rules in the alpha set
ignore_scan_rules = ['-1', '50003', '60000', '60001']
# Scan rules that are being addressed
in_progress_issues = {}
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
# Hide "Starting new HTTP connection" messages
logging.getLogger("requests").setLevel(logging.WARNING)
def usage():
print('Usage: zap-api-scan.py -t <target> -f <format> [options]')
print(' -t target target API definition, OpenAPI or SOAP, local file or URL, e.g. https://www.example.com/openapi.json')
print(' or target endpoint URL, GraphQL, e.g. https://www.example.com/graphql')
print(' -f format openapi, soap, or graphql')
print('Options:')
print(' -h print this help message')
print(' -c config_file config file to use to INFO, IGNORE or FAIL warnings')
print(' -u config_url URL of config file to use to INFO, IGNORE or FAIL warnings')
print(' -g gen_file generate default config file(all rules set to WARN)')
print(' -r report_html file to write the full ZAP HTML report')
print(' -w report_md file to write the full ZAP Wiki(Markdown) report')
print(' -x report_xml file to write the full ZAP XML report')
print(' -J report_json file to write the full ZAP JSON document')
print(' -a include the alpha passive scan rules as well')
print(' -d show debug messages')
print(' -P specify listen port')
print(' -D delay in seconds to wait for passive scanning ')
print(' -i default rules not in the config file to INFO')
print(' -I do not return failure on warning')
print(' -l level minimum level to show: PASS, IGNORE, INFO, WARN or FAIL, use with -s to hide example URLs')
print(' -n context_file context file which will be loaded prior to scanning the target')
print(' -p progress_file progress file which specifies issues that are being addressed')
print(' -s short output format - dont show PASSes or example URLs')
print(' -S safe mode this will skip the active scan and perform a baseline scan')
print(' -T max time in minutes to wait for ZAP to start and the passive scan to run')
print(' -U user username to use for authenticated scans - must be defined in the given context file')
print(' -O the hostname to override in the (remote) OpenAPI spec')
print(' -z zap_options ZAP command line options e.g. -z "-config aaa=bbb -config ccc=ddd"')
print(' --hook path to python file that define your custom hooks')
print(' --schema GraphQL schema location, URL or file, e.g. https://www.example.com/schema.graphqls')
print('')
print('For more details see https://www.zaproxy.org/docs/docker/api-scan/')
def main(argv):
global min_level
global in_progress_issues
cid = ''
context_file = ''
progress_file = ''
config_file = ''
config_url = ''
generate = ''
port = 0
detailed_output = True
report_html = ''
report_md = ''
report_xml = ''
report_json = ''
target = ''
target_url = ''
host_override = ''
format = ''
zap_alpha = False
baseline = False
info_unspecified = False
base_dir = ''
zap_ip = 'localhost'
zap_options = ''
delay = 0
timeout = 0
ignore_warn = False
hook_file = None
schema = ''
schema_url = ''
user = ''
pass_count = 0
warn_count = 0
fail_count = 0
info_count = 0
ignore_count = 0
warn_inprog_count = 0
fail_inprog_count = 0
exception_raised = False
try:
opts, args = getopt.getopt(argv, "t:f:c:u:g:m:n:r:J:w:x:l:hdaijSp:sz:P:D:T:IO:U:", ["hook=", "schema="])
except getopt.GetoptError as exc:
logging.warning('Invalid option ' + exc.opt + ' : ' + exc.msg)
usage()
sys.exit(3)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit(0)
elif opt == '-t':
target = arg
logging.debug('Target: ' + target)
elif opt == '-f':
format = arg
elif opt == '-c':
config_file = arg
elif opt == '-u':
config_url = arg
elif opt == '-g':
generate = arg
elif opt == '-d':
logging.getLogger().setLevel(logging.DEBUG)
elif opt == '-P':
port = int(arg)
elif opt == '-D':
delay = int(arg)
elif opt == '-n':
context_file = arg
elif opt == '-p':
progress_file = arg
elif opt == '-r':
report_html = arg
elif opt == '-J':
report_json = arg
elif opt == '-w':
report_md = arg
elif opt == '-x':
report_xml = arg
elif opt == '-a':
zap_alpha = True
elif opt == '-i':
info_unspecified = True
elif opt == '-I':
ignore_warn = True
elif opt == '-l':
try:
min_level = zap_conf_lvls.index(arg)
except ValueError:
logging.warning('Level must be one of ' + str(zap_conf_lvls))
usage()
sys.exit(3)
elif opt == '-z':
zap_options = arg
elif opt == '-s':
detailed_output = False
elif opt == '-S':
baseline = True
elif opt == '-T':
timeout = int(arg)
elif opt == '-O':
host_override = arg
elif opt == '-U':
user = arg
elif opt == '--hook':
hook_file = arg
elif opt == '--schema':
schema = arg
logging.debug('Schema: ' + schema)
check_zap_client_version()
load_custom_hooks(hook_file)
trigger_hook('cli_opts', opts)
# Check target supplied and ok
if len(target) == 0:
usage()
sys.exit(3)
if format != 'openapi' and format != 'soap' and format != 'graphql':
logging.warning('Format must be either \'openapi\', \'soap\', or \'graphql\'')
usage()
sys.exit(3)
if running_in_docker():
base_dir = '/zap/wrk/'
if config_file or generate or report_html or report_xml or report_json or report_md or progress_file or context_file:
# Check directory has been mounted
if not os.path.exists(base_dir):
logging.warning('A file based option has been specified but the directory \'/zap/wrk\' is not mounted ')
usage()
sys.exit(3)
if user and not context_file:
logging.warning('A context file must be specified (and include the user) if the user option is selected')
usage()
sys.exit(3)
target_file = ''
if target.startswith('http://') or target.startswith('https://'):
target_url = target
elif format == 'graphql':
logging.warning('Target must start with \'http://\' or \'https://\' and be a valid GraphQL endpoint.')
usage()
sys.exit(3)
else:
# assume its a file
if not os.path.exists(base_dir + target):
logging.warning('Target must either start with \'http://\' or \'https://\' or be a local file')
logging.warning('File does not exist: ' + base_dir + target)
usage()
sys.exit(3)
else:
target_file = target
schema_file = ''
if schema and format == 'graphql':
if schema.startswith('http://') or schema.startswith('https://'):
schema_url = schema
else:
# assume its a file
if not os.path.exists(base_dir + schema):
logging.warning('GraphQL schema must either start with \'http://\' or \'https://\' or be a local file')
logging.warning('File does not exist: ' + base_dir + schema)
usage()
sys.exit(3)
else:
schema_file = schema
# Choose a random 'ephemeral' port and check its available if it wasn't specified with -P option
if port == 0:
port = get_free_port()
logging.debug('Using port: ' + str(port))
if config_file:
# load config file from filestore
with open(base_dir + config_file) as f:
try:
load_config(f, config_dict, config_msg, out_of_scope_dict)
except ValueError as e:
logging.warning("Failed to load config file " + base_dir + config_file + " " + str(e))
sys.exit(3)
elif config_url:
# load config file from url
try:
config_data = urlopen(config_url).read().decode('UTF-8').splitlines()
load_config(config_data, config_dict, config_msg, out_of_scope_dict)
except ValueError as e:
logging.warning("Failed to read configs from " + config_url + " " + str(e))
sys.exit(3)
except:
logging.warning('Failed to read configs from ' + config_url)
sys.exit(3)
if progress_file:
# load progress file from filestore
with open(base_dir + progress_file) as f:
progress = json.load(f)
# parse into something more useful...
# in_prog_issues = map of vulnid -> {object with everything in}
for issue in progress["issues"]:
if issue["state"] == "inprogress":
in_progress_issues[issue["id"]] = issue
if running_in_docker():
try:
params = [
'-addonupdate',
'-addoninstall', 'pscanrulesBeta'] # In case we're running in the stable container
if zap_alpha:
params.append('-addoninstall')
params.append('pscanrulesAlpha')
add_zap_options(params, zap_options)
start_zap(port, params)
except OSError:
logging.warning('Failed to start ZAP :(')
sys.exit(3)
else:
# Not running in docker, so start one
mount_dir = ''
if context_file:
mount_dir = os.path.dirname(os.path.abspath(context_file))
params = ['-addonupdate']
if (zap_alpha):
params.extend(['-addoninstall', 'pscanrulesAlpha'])
add_zap_options(params, zap_options)
try:
cid = start_docker_zap('owasp/zap2docker-weekly', port, params, mount_dir)
zap_ip = ipaddress_for_cid(cid)
logging.debug('Docker ZAP IP Addr: ' + zap_ip)
# Copy across the files that may not be in all of the docker images
try:
subprocess.check_output(['docker', 'exec', '-t', cid, 'mkdir', '-p', '/home/zap/.ZAP_D/scripts/scripts/httpsender/'])
cp_to_docker(cid, 'scripts/scripts/httpsender/Alert_on_HTTP_Response_Code_Errors.js', '/home/zap/.ZAP_D/')
cp_to_docker(cid, 'scripts/scripts/httpsender/Alert_on_Unexpected_Content_Types.js', '/home/zap/.ZAP_D/')
cp_to_docker(cid, 'policies/API-Minimal.policy', '/home/zap/.ZAP_D/')
if target_file:
cp_to_docker(cid, target_file, '/zap/')
except OSError:
logging.warning('Failed to copy one of the required files')
sys.exit(3)
except OSError:
logging.warning('Failed to start ZAP in docker :(')
sys.exit(3)
try:
zap = ZAPv2(proxies={'http': 'http://' + zap_ip + ':' + str(port), 'https': 'http://' + zap_ip + ':' + str(port)})
wait_for_zap_start(zap, timeout * 60)
trigger_hook('zap_started', zap, target)
# Make suitable performance tweaks for running in this environment
zap_tune(zap)
trigger_hook('zap_tuned', zap)
if context_file:
# handle the context file, cant use base_dir as it might not have been set up
zap_import_context(zap, '/zap/wrk/' + os.path.basename(context_file))
if (user):
zap_set_scan_user(zap, user)
# Enable scripts
zap.script.load('Alert_on_HTTP_Response_Code_Errors.js', 'httpsender', 'Oracle Nashorn', '/home/zap/.ZAP_D/scripts/scripts/httpsender/Alert_on_HTTP_Response_Code_Errors.js')
zap.script.enable('Alert_on_HTTP_Response_Code_Errors.js')
zap.script.load('Alert_on_Unexpected_Content_Types.js', 'httpsender', 'Oracle Nashorn', '/home/zap/.ZAP_D/scripts/scripts/httpsender/Alert_on_Unexpected_Content_Types.js')
zap.script.enable('Alert_on_Unexpected_Content_Types.js')
# Import the API defn
if format == 'openapi':
trigger_hook('importing_openapi', target_url, target_file)
if target_url:
logging.debug('Import OpenAPI URL ' + target_url)
res = zap.openapi.import_url(target, host_override)
urls = zap.core.urls()
if host_override:
target = urljoin(target_url, '//' + host_override)
logging.info('Using host override, new target: {0}'.format(target))
else:
logging.debug('Import OpenAPI File ' + target_file)
res = zap.openapi.import_file(base_dir + target_file)
urls = zap.core.urls()
if len(urls) > 0:
# Choose the first one - will be striping off the path below
target = urls[0]
logging.debug('Using target from imported file: {0}'.format(target))
logging.info('Number of Imported URLs: ' + str(len(urls)))
elif format == 'soap':
trigger_hook('importing_soap', target_url, target_file)
if target_url:
logging.debug('Import SOAP URL ' + target_url)
res = zap._request(zap.base + 'soap/action/importUrl/', {'url':target})
urls = zap.core.urls()
else:
logging.debug('Import SOAP File ' + target_file)
res = zap._request(zap.base + 'soap/action/importFile/', {'file': base_dir + target_file})
urls = zap.core.urls()
if len(urls) > 0:
# Choose the first one - will be striping off the path below
target = urls[0]
logging.debug('Using target from imported file: {0}'.format(target))
logging.info('Number of Imported URLs: ' + str(len(urls)))
elif format == 'graphql':
trigger_hook('importing_graphql', target, schema)
logging.debug('GraphQL Endpoint URL ' + target)
logging.info('Begin sending GraphQL requests...')
if schema:
logging.debug('Import GraphQL Schema ' + schema)
res = zap.graphql.import_file(target, base_dir + schema) if schema_file else zap.graphql.import_url(target, schema_url)
else:
res = zap.graphql.import_url(target)
logging.info('About ' + str(zap.core.number_of_messages()) + ' requests sent.')
urls = zap.core.urls()
logging.debug('Import warnings: ' + str(res))
if len(urls) == 0:
logging.warning('Failed to import any URLs')
# No point continue, there's nothing to scan.
raise NoUrlsException()
if target.count('/') > 2:
old_target = target
# The url can include a valid path, but always reset to scan the host
target = target[0:target.index('/', 8)+1]
logging.debug('Normalised target from {0} to {1}'.format(old_target, target))
# Wait for a delay if specified with -D option
if (delay):
start_scan = datetime.now()
while((datetime.now() - start_scan).seconds < delay ):
time.sleep(5)
logging.debug('Delay active scan ' + str(delay -(datetime.now() - start_scan).seconds) + ' seconds')
# Set up the scan policy
scan_policy = 'API-Minimal'
if config_dict:
# They have supplied a config file, use this to define the ascan rules
# Use the default one as the script might not have write access to the one just copied across
scan_policy = 'Default Policy'
zap.ascan.enable_all_scanners(scanpolicyname=scan_policy)
for scanner, state in config_dict.items():
if state == 'IGNORE':
# Dont bother checking the result - this will fail for pscan rules
zap.ascan.set_scanner_alert_threshold(id=scanner, alertthreshold='OFF', scanpolicyname=scan_policy)
if not baseline:
zap_active_scan(zap, target, scan_policy)
zap_wait_for_passive_scan(zap, timeout * 60)
# Print out a count of the number of urls
num_urls = len(zap.core.urls())
if num_urls == 0:
logging.warning('No URLs found - is the target URL accessible? Local services may not be accessible from the Docker container')
else:
if detailed_output:
print('Total of ' + str(num_urls) + ' URLs')
alert_dict = zap_get_alerts(zap, target, ignore_scan_rules, out_of_scope_dict)
all_ascan_rules = zap.ascan.scanners('Default Policy')
all_pscan_rules = zap.pscan.scanners
all_dict = {}
for rule in all_pscan_rules:
plugin_id = rule.get('id')
if plugin_id in ignore_scan_rules:
continue
all_dict[plugin_id] = rule.get('name') + ' - Passive/' + rule.get('quality')
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in ignore_scan_rules:
continue
all_dict[plugin_id] = rule.get('name') + ' - Active/' + rule.get('quality')
if generate:
# Create the config file
with open(base_dir + generate, 'w') as f:
f.write('# zap-api-scan rule configuration file\n')
f.write('# Change WARN to IGNORE to ignore rule or FAIL to fail if rule matches\n')
f.write('# Active scan rules set to IGNORE will not be run which will speed up the scan\n')
f.write('# Only the rule identifiers are used - the names are just for info\n')
f.write('# You can add your own messages to each rule by appending them after a tab on each line.\n')
for key, rule in sorted(all_dict.items()):
f.write(key + '\tWARN\t(' + rule + ')\n')
# print out the passing rules
pass_dict = {}
for rule in all_pscan_rules:
plugin_id = rule.get('id')
if plugin_id in ignore_scan_rules:
continue
if plugin_id not in alert_dict:
pass_dict[plugin_id] = rule.get('name')
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in ignore_scan_rules:
continue
if plugin_id not in alert_dict and not(plugin_id in config_dict and config_dict[plugin_id] == 'IGNORE'):
pass_dict[plugin_id] = rule.get('name')
if min_level == zap_conf_lvls.index("PASS") and detailed_output:
for key, rule in sorted(pass_dict.items()):
print('PASS: ' + rule + ' [' + key + ']')
pass_count = len(pass_dict)
if detailed_output:
# print out the ignored ascan rules(there will be no alerts for these as they were not run)
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in ignore_scan_rules:
continue
if plugin_id in config_dict and config_dict[plugin_id] == 'IGNORE':
print('SKIP: ' + rule.get('name') + ' [' + plugin_id + ']')
# print out the ignored rules
ignore_count, not_used = print_rules(zap, alert_dict, 'IGNORE', config_dict, config_msg, min_level,
inc_ignore_rules, True, detailed_output, {})
# print out the info rules
info_count, not_used = print_rules(zap, alert_dict, 'INFO', config_dict, config_msg, min_level,
inc_info_rules, info_unspecified, detailed_output, in_progress_issues)
# print out the warning rules
warn_count, warn_inprog_count = print_rules(zap, alert_dict, 'WARN', config_dict, config_msg, min_level,
inc_warn_rules, not info_unspecified, detailed_output, in_progress_issues)
# print out the failing rules
fail_count, fail_inprog_count = print_rules(zap, alert_dict, 'FAIL', config_dict, config_msg, min_level,
inc_fail_rules, True, detailed_output, in_progress_issues)
if report_html:
# Save the report
write_report(base_dir + report_html, zap.core.htmlreport())
if report_json:
# Save the report
write_report(base_dir + report_json, zap.core.jsonreport())
if report_md:
# Save the report
write_report(base_dir + report_md, zap.core.mdreport())
if report_xml:
# Save the report
write_report(base_dir + report_xml, zap.core.xmlreport())
print('FAIL-NEW: ' + str(fail_count) + '\tFAIL-INPROG: ' + str(fail_inprog_count) +
'\tWARN-NEW: ' + str(warn_count) + '\tWARN-INPROG: ' + str(warn_inprog_count) +
'\tINFO: ' + str(info_count) + '\tIGNORE: ' + str(ignore_count) + '\tPASS: ' + str(pass_count))
trigger_hook('zap_pre_shutdown', zap)
# Stop ZAP
zap.core.shutdown()
except UserInputException as e:
exception_raised = True
print("ERROR %s" % e)
except (NoUrlsException, ScanNotStartedException):
exception_raised = True
dump_log_file(cid)
except IOError as e:
exception_raised = True
print("ERROR %s" % e)
logging.warning('I/O error: ' + str(e))
dump_log_file(cid)
except:
exception_raised = True
print("ERROR " + str(sys.exc_info()[0]))
logging.warning('Unexpected error: ' + str(sys.exc_info()[0]))
dump_log_file(cid)
if not running_in_docker():
stop_docker(cid)
trigger_hook('pre_exit', fail_count, warn_count, pass_count)
if exception_raised:
sys.exit(3)
elif fail_count > 0:
sys.exit(1)
elif (not ignore_warn) and warn_count > 0:
sys.exit(2)
elif pass_count > 0:
sys.exit(0)
else:
sys.exit(3)
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 |
m4rx9/rna-pdb-tools | rna_tools/tools/rna_filter/rna_filter.py | 2 | 11906 | #!/usr/bin/env python
"""rna_filter.py - calculate distances based on given restrants on PDB files or SimRNA trajectories.
Changes: weight is always 1 (at least for now). ,>,=,>=,<= .
[PREVIOUS DOCUMENTATION - TO BE REMOVED]
rna_filter.py -s 4gxy_rpr.pdb -r rp06_MohPairs.rfrestrs
d:A5-A42 100.0 measured: 26.7465763417 [x]
d:A11-A26 100.0 measured: 19.2863696104 [x]
[mm] rp06$ git:(master) $ rna_filter.py -s 4gxy_rpr.pdb -r rp06_MohPairs.rfrestrs
d:A5-A42 100.0 measured: 26.7465763417 [x]
d:A11-A26 100.0 measured: 19.2863696104 [x]
Traceback (most recent call last):
File "/home/magnus/work-src/rna-pdb-tools/bin/rna_filter.py", line 270, in <module>
calc_scores_for_pdbs(args.structures, restraints, args.verbose)
File "/home/magnus/work-src/rna-pdb-tools/bin/rna_filter.py", line 221, in calc_scores_for_pdbs
dist = get_distance(residues[h[0]]['mb'], residues[h[1]]['mb'])
KeyError: 'A24'
correct, there is no A24 in this structure:
The format of restraints::
(d:A1-A2 < 10.0 1) = if distance between A1 and A2 lower than 10.0, score it with 1
Usage::
$ python rna_filter.py -r test_data/restraints.txt -s test_data/CG.pdb
d:A1-A2 10.0 measured: 6.58677550096 [x]
test_data/CG.pdb 1.0 1 out of 1
# $ python rna_filter.py -r test_data/restraints.txt -t test_data/CG.trafl
(d:A1-A2 < 10.0 1)|(d:A2-A1 <= 10 1)
restraints [('A1', 'A2', '<', '10.0', '1'), ('A2', 'A1', '<=', '10', '1')]
Frame #1 e:1252.26
mb for A1 [ 54.729 28.9375 41.421 ]
mb for A2 [ 55.3425 35.3605 42.7455]
d:A1-A2 6.58677550096
mb for A2 [ 55.3425 35.3605 42.7455]
mb for A1 [ 54.729 28.9375 41.421 ]
d:A2-A1 6.58677550096
# this ^ is off right now
"""
from __future__ import print_function
from rna_tools.tools.rna_calc_rmsd.lib.rmsd.calculate_rmsd import get_coordinates
from rna_tools.tools.extra_functions.select_fragment import select_pdb_fragment_pymol_style, select_pdb_fragment
from rna_tools.tools.simrna_trajectory.simrna_trajectory import SimRNATrajectory
import argparse
import re
import numpy as np
import os
import logging
logger = logging.getLogger()
handler = logging.StreamHandler()
logger.addHandler(handler)
formatter = logging.Formatter(
'%(asctime)-15s %(filename)s::%(funcName)s::%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
class RNAFilterErrorInRestraints(Exception):
pass
def parse_logic(restraints_fn, verbose):
"""Parse logic of restraints.
Args:
restraints_nf(string): path to a file with restraints in the rigth format (see below)
verbose (bool) : be verbose?
Format::
Returns:
list: parse restraints into a list of lists, e.g. [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')]
"""
txt = ''
with open(restraints_fn) as f:
for l in f:
if not l.startswith('#'):
txt += l.strip()
if verbose:
logger.info(txt)
restraints = re.findall(
'\(d:(?P<start>.+?)-(?P<end>.+?)\s*(?P<operator>\>\=|\=|\<|\<\=)\s*(?P<distance>[\d\.]+)\s+(?P<weight>.+?)\)', txt)
return restraints
def parse_logic_newlines(restraints_fn, offset=0, verbose=False):
"""Parse logic of restraints.
Args:
restraints_nf(string): path to a file with restraints in the rigth format (see below)
verbose (bool) : be verbose?
Format::
# ignore comments
d:Y23-Y69 < 25.0
d:Y22-Y69 < 25.0
# d:<chain><resi_A>-<resi_B> <operator> <distance> <weight>; each restraints in a new line
Raises:
__main__.RNAFilterErrorInRestraints: Please check the format of your restraints!
Returns:
list: parse restraints into a list of lists, e.g. [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')]
"""
restraints = []
with open(restraints_fn) as f:
for l in f:
if l.strip():
if not l.startswith('#'):
if verbose:
logger.info(l)
restraint = re.findall(
'd:(?P<start>.+?)-(?P<end>.+?)\s*(?P<operator>\>\=|\=|\>|\<|\<\=)\s*(?P<distance>[\d\.]+)', l) # (?P<weight>.+?)', l)
if restraint:
# without [0] it is restraints [[('Y23', 'Y69', '<', '25.0', '1')], [('Y22', 'Y69', '<', '25.0', '1')]]
# why? to convert 'Y23', 'Y69', '<', '25.0', '1' -> 'Y23', 'Y69', '<', 25.0, 1
start = restraint[0][0][0] + str(int(restraint[0][0][1:]) + offset)
end = restraint[0][1][0] + str(int(restraint[0][1][1:]) + offset)
operator = restraint[0][2]
distance = float(restraint[0][3])
weight = 1 # fix for now #float(restraint[0][4])])
restraints.append([start, end, operator, distance, weight])
else:
raise RNAFilterErrorInRestraints('Please check the format of your restraints!')
if len(restraints) == 0:
raise RNAFilterErrorInRestraints('Please check the format of your restraints!')
return restraints # [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')]
def get_distance(a, b):
diff = a - b
return np.sqrt(np.dot(diff, diff))
def parse_pdb(pdb_fn, selection):
"""
{'A9': {'OP1': array([ 53.031, 21.908, 40.226]), 'C6': array([ 54.594, 27.595, 41.069]), 'OP2': array([ 52.811, 24.217, 39.125]), 'N4': array([ 53.925, 30.861, 39.743]), "C1'": array([ 55.611, 26.965, 43.258]), "C3'": array([ 53.904, 25.437, 43.809]), "O5'": array([ 53.796, 24.036, 41.353]), 'C5': array([ 54.171, 28.532, 40.195]), "O4'": array([ 55.841, 25.746, 42.605]), "C5'": array([ 54.814, 23.605, 42.274]), 'P': array(
[ 53.57 , 23.268, 39.971]), "C4'": array([ 55.119, 24.697, 43.283]), "C2'": array([ 54.563, 26.706, 44.341]), 'N1': array([ 55.145, 27.966, 42.27 ]), "O2'": array([ 55.208, 26.577, 45.588]), 'N3': array([ 54.831, 30.285, 41.747]), 'O2': array([ 55.76 , 29.587, 43.719]), 'C2': array([ 55.258, 29.321, 42.618]), "O3'": array([ 53.272, 24.698, 44.789]), 'C4': array([ 54.313, 29.909, 40.572])}}
"""
V = {}
with open(pdb_fn) as f:
for line in f:
if line.startswith("ATOM"):
curr_chain_id = line[21]
curr_resi = int(line[22: 26])
curr_atom_name = line[12: 16].strip()
if selection:
if curr_chain_id in selection:
if curr_resi in selection[curr_chain_id]:
x = line[30: 38]
y = line[38: 46]
z = line[46: 54]
# V.append(np.asarray([x,y,z],dtype=float))
if curr_chain_id + str(curr_resi) in V:
V[curr_chain_id +
str(curr_resi)][curr_atom_name] = np.asarray([x, y, z], dtype=float)
else:
V[curr_chain_id + str(curr_resi)] = {}
V[curr_chain_id +
str(curr_resi)][curr_atom_name] = np.asarray([x, y, z], dtype=float)
return V
def check_condition(condition, wight):
"""return True/False, score"""
pass
def get_residues(pdb_fn, restraints, verbose):
"""
"""
residues = set()
for h in restraints:
a = h[0]
b = h[1]
a = a[0] + ':' + a[1:]
residues.add(a) # A19
b = b[0] + ':' + b[1:]
residues.add(b)
# set(['A:41', 'A:9', 'A:10', 'A:16'])
selection = ','.join(residues)
selection_parsed = select_pdb_fragment(selection, separator=",", splitting="[,:;]")
residues = parse_pdb(pdb_fn, selection_parsed)
# get mb
for r in residues:
if 'N9' in residues[r]: # A,G
residues[r]['mb'] = residues[r]['N9'] - ((residues[r]['N9'] - residues[r]['C6']) / 2)
else: # A,G
residues[r]['mb'] = residues[r]['N1'] - ((residues[r]['N1'] - residues[r]['C4']) / 2)
for r in residues:
if verbose:
logger.info(' '.join(['mb for ', str(r), str(residues[r]['mb'])]))
return residues
def get_parser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-r', "--restraints_fn",
dest="restraints_fn",
required=True,
help="""restraints_fn:
Format:
(d:A9-A41 < 10.0 1)|(d:A41-A9 <= 10 1)
""")
parser.add_argument("-v", "--verbose",
action="store_true", help="be verbose")
parser.add_argument('-s', dest="structures", help='structures',
nargs='+') # , type=string)
parser.add_argument(
'--offset', help='use offset to adjust your restraints to numbering in PDB files, ade (1y26)'
'pdb starts with 13, so offset is -12)', default=0, type=int)
parser.add_argument('-t', dest="trajectory", help="SimRNA trajectory")
return parser
def calc_scores_for_pdbs(pdb_files, restraints, verbose):
"""
"""
# h = ('A1', 'A2', '<', '10.0', '1')
print('fn, rst_score')
for pdb_fn in pdb_files:
# logger.info(pdb_fn)
if verbose:
print(pdb_fn, end=",")
score = 0
residues = get_residues(pdb_fn, restraints, verbose)
good_dists = 0
for h in restraints:
dist = get_distance(residues[h[0]]['mb'], residues[h[1]]['mb'])
# change distance
ok = '[ ]'
is_fulfiled = eval('dist ' + h[2] + ' h[3]')
if is_fulfiled: # dist is calculated above
score += h[4]
ok = '[x]'
good_dists += 1
if verbose:
print(' '.join([' d:' + h[0] + '-' + h[1] + ' ' + str(h[4]), 'measured:', str(dist), ok]))
if verbose:
print(pdb_fn, score / float(len(restraints)), good_dists, 'out of', len(restraints))
# print(pdb_fn, score / float(len(restraints)), good_dists, 'out of', len(restraints))
print('%s,%f' % (os.path.basename(pdb_fn), score / float(len(restraints)))) # , good_dists, 'out of', len(restraints))
def __filter_simrna_trajectory():
f = (line for line in open(args.trajectory))
c = 0
while 1:
try:
header = f.next().strip()
except StopIteration: # not nice
break
c += 1
coords = f.next().strip()
traj = SimRNATrajectory()
traj.load_from_string(c, header + '\n' + coords)
frame = traj.frames[0]
print(c)
for h in restraints:
a = int(h[0].replace('A', '')) - 1 # A1 -> 0 (indexing Python-like)
b = int(h[1].replace('A', '')) - 1
a_mb = frame.residues[a].get_center()
b_mb = frame.residues[b].get_center()
# print ' mb for A' + str(a+1), a_mb
# print ' mb for A' + str(b+1), b_mb
dist = get_distance(a_mb, b_mb)
logger.info(' '.join(' d:A' + str(a + 1) + "-A" + str(b + 1), dist))
# main
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
# score = 1
# print ((True|True)|(False|False)), score
restraints = parse_logic_newlines(args.restraints_fn, args.offset, args.verbose)
if args.verbose:
logger.info('restraints' + str(restraints))
if args.structures:
calc_scores_for_pdbs(args.structures, restraints, args.verbose)
# if args.trajectory:
# __filter_simrna_trajectory()
| mit |
macs03/demo-cms | cms/lib/python2.7/site-packages/django/contrib/gis/gdal/geomtype.py | 219 | 3001 | from django.contrib.gis.gdal.error import OGRException
from django.utils import six
#### OGRGeomType ####
class OGRGeomType(object):
"Encapulates OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0 : 'Unknown',
1 : 'Point',
2 : 'LineString',
3 : 'Polygon',
4 : 'MultiPoint',
5 : 'MultiLineString',
6 : 'MultiPolygon',
7 : 'GeometryCollection',
100 : 'None',
101 : 'LinearRing',
1 + wkb25bit: 'Point25D',
2 + wkb25bit: 'LineString25D',
3 + wkb25bit: 'Polygon25D',
4 + wkb25bit: 'MultiPoint25D',
5 + wkb25bit : 'MultiLineString25D',
6 + wkb25bit : 'MultiPolygon25D',
7 + wkb25bit : 'GeometryCollection25D',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = dict([(v.lower(), k) for k, v in _types.items()])
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, six.string_types):
type_input = type_input.lower()
if type_input == 'geometry': type_input='unknown'
num = self._str_types.get(type_input, None)
if num is None:
raise OGRException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if not type_input in self._types:
raise OGRException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, six.string_types):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name.replace('25D', '')
if s in ('LinearRing', 'None'):
return None
elif s == 'Unknown':
s = 'Geometry'
return s + 'Field'
| mit |
superdesk/superdesk-core | superdesk/media/crop.py | 2 | 16898 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import json
from eve.utils import ParsedRequest
import superdesk
import logging
from copy import deepcopy
from flask import current_app as app
from superdesk import get_resource_service, config
from superdesk.errors import SuperdeskApiError
from superdesk.media.media_operations import crop_image, process_file_from_stream
from superdesk.upload import url_for_media
from superdesk.metadata.item import CONTENT_TYPE, ITEM_TYPE, MEDIA_TYPES, ASSOCIATIONS
from .renditions import _resize_image
logger = logging.getLogger(__name__)
class CropService:
crop_sizes = []
def validate_crop(self, original, updates, crop_name):
"""Validate crop info on media item.
:param dict original: original item
:param dict updates: updated renditions
:param str crop_name: name of the crop
:param dict doc: crop co-ordinates
:raises SuperdeskApiError.badRequestError:
For following conditions:
1) if type != picture
2) if renditions are missing in the original image
3) if original rendition is missing
4) Crop name is invalid
"""
# Check if type is picture
if original[ITEM_TYPE] != CONTENT_TYPE.PICTURE:
raise SuperdeskApiError.badRequestError(message="Only images can be cropped!")
# Check if the renditions exists
if not original.get("renditions"):
raise SuperdeskApiError.badRequestError(message="Missing renditions!")
# Check if the original rendition exists
if not original.get("renditions").get("original"):
raise SuperdeskApiError.badRequestError(message="Missing original rendition!")
# Check if the crop name is valid
crop = self.get_crop_by_name(crop_name)
crop_data = updates.get("renditions", {}).get(crop_name, {})
if not crop and "CropLeft" in crop_data:
raise SuperdeskApiError.badRequestError(message="Unknown crop name! (name=%s)" % crop_name)
self._validate_values(crop_data)
self._validate_poi(original, updates, crop_name)
self._validate_aspect_ratio(crop, crop_data)
def _validate_values(self, crop):
int_fields = ("CropLeft", "CropTop", "CropRight", "CropBottom", "width", "height")
for field in int_fields:
if field in crop:
try:
crop[field] = int(crop[field])
except (TypeError, ValueError):
raise SuperdeskApiError.badRequestError("Invalid value for %s in renditions" % field)
def _validate_poi(self, original, updates, crop_name):
"""Validate the crop point of interest in the renditions dictionary for the given crop
:param dict original: original item
:param dict updates: updated renditions
"""
renditions = original.get("renditions", {})
updated_renditions = updates.get("renditions", {})
original_image = deepcopy(renditions["original"])
original_image.update(updated_renditions.get("original", {}))
if "poi" in updates:
if "x" not in updates["poi"] or "y" not in updates["poi"]:
del updates["poi"]
return
poi = updates["poi"]
elif "poi" not in original:
return
else:
if crop_name not in updated_renditions:
return
poi = original["poi"]
crop_data = updated_renditions[crop_name] if crop_name in updated_renditions else renditions[crop_name]
orig_poi_x = int(original_image["width"] * poi["x"])
orig_poi_y = int(original_image["height"] * poi["y"])
if (
orig_poi_y < crop_data.get("CropTop", 0)
or orig_poi_y > crop_data.get("CropBottom", original_image["height"])
or orig_poi_x < crop_data.get("CropLeft", 0)
or orig_poi_x > crop_data.get("CropRight", original_image["width"])
):
raise SuperdeskApiError("Point of interest outside the crop %s limits" % crop_name)
def _validate_aspect_ratio(self, crop, doc):
"""Checks if the aspect ratio is consistent with one in defined in spec
:param crop: Spec parameters
:param doc: Posted parameters
:raises SuperdeskApiError.badRequestError:
"""
if "CropLeft" not in doc:
return
width = doc["CropRight"] - doc["CropLeft"]
height = doc["CropBottom"] - doc["CropTop"]
if not (crop.get("width") or crop.get("height") or crop.get("ratio")):
raise SuperdeskApiError.badRequestError(
message="Crop data are missing. width, height or ratio need to be defined"
)
if crop.get("width") and crop.get("height"):
expected_crop_width = int(crop["width"])
expected_crop_height = int(crop["height"])
if width < expected_crop_width or height < expected_crop_height:
raise SuperdeskApiError.badRequestError(
message="Wrong crop size. Minimum crop size is {}x{}.".format(crop["width"], crop["height"])
)
doc_ratio = round(width / height, 1)
spec_ratio = round(expected_crop_width / expected_crop_height, 1)
if doc_ratio != spec_ratio:
raise SuperdeskApiError.badRequestError(message="Wrong aspect ratio!")
elif crop.get("ratio"):
ratio = crop.get("ratio")
if type(ratio) not in [int, float]:
ratio = ratio.split(":")
ratio = int(ratio[0]) / int(ratio[1])
if abs((width / height) - ratio) > 0.1:
raise SuperdeskApiError.badRequestError(
message="Ratio %s is not respected. We got %f" % (crop.get("ratio"), abs((width / height)))
)
def get_crop_by_name(self, crop_name):
"""Finds the crop in the list of crops by name
:param crop_name: Crop name
:return: Matching crop or None
"""
if not self.crop_sizes:
self.crop_sizes = get_resource_service("vocabularies").find_one(req=None, _id="crop_sizes").get("items")
if not self.crop_sizes:
raise SuperdeskApiError.badRequestError(message="Crops sizes couldn't be loaded!")
return next((c for c in self.crop_sizes if c.get("name", "").lower() == crop_name.lower()), None)
def create_crop(self, original_image, crop_name, crop_data):
"""Create a new crop based on the crop co-ordinates
:param original: Article to add the crop
:param crop_name: Name of the crop
:param doc: Crop details
:raises SuperdeskApiError.badRequestError
:return dict: rendition
"""
original_file = app.media.fetch_rendition(original_image)
if not original_file:
raise SuperdeskApiError.badRequestError("Original file couldn't be found")
try:
cropped, out = crop_image(original_file, crop_name, crop_data)
crop = self.get_crop_by_name(crop_name)
if not cropped:
raise SuperdeskApiError.badRequestError("Saving crop failed.")
# resize if needed
if crop.get("width") or crop.get("height"):
out, width, height = _resize_image(
out,
size=(crop.get("width"), crop.get("height")),
keepProportions=crop.get("keep_proportions", True),
)
crop["width"] = width
crop["height"] = height
out.seek(0)
return self._save_cropped_image(out, original_image, crop_data)
except SuperdeskApiError:
raise
except Exception as ex:
raise SuperdeskApiError.badRequestError("Generating crop failed: {}".format(str(ex)))
def _save_cropped_image(self, file_stream, original, doc):
"""Saves the cropped image and returns the crop dictionary
:param file_stream: cropped image stream
:param original: original rendition
:param doc: crop data
:return dict: Crop values
:raises SuperdeskApiError.internalError
"""
crop = {}
try:
file_name, content_type, metadata = process_file_from_stream(
file_stream, content_type=original.get("mimetype")
)
file_stream.seek(0)
file_id = app.media.put(
file_stream, filename=file_name, content_type=content_type, resource="upload", metadata=metadata
)
crop["media"] = file_id
crop["mimetype"] = content_type
crop["href"] = url_for_media(file_id, content_type)
crop["CropTop"] = doc.get("CropTop", None)
crop["CropLeft"] = doc.get("CropLeft", None)
crop["CropRight"] = doc.get("CropRight", None)
crop["CropBottom"] = doc.get("CropBottom", None)
return crop
except Exception as ex:
try:
app.media.delete(file_id)
except Exception:
pass
raise SuperdeskApiError.internalError("Generating crop failed: {}".format(str(ex)), exception=ex)
def _delete_crop_file(self, file_id):
"""Delete the crop file
:param Object_id file_id: Object_Id of the file.
"""
try:
app.media.delete(file_id)
except Exception:
logger.exception("Crop File cannot be deleted. File_Id {}".format(file_id))
def create_multiple_crops(self, updates, original):
"""Create multiple crops based on the renditions.
:param dict updates: update item
:param dict original: original of the updated item
"""
if original.get(ITEM_TYPE) != CONTENT_TYPE.PICTURE:
return
update_renditions = updates.get("renditions", {})
renditions = deepcopy(original.get("renditions", {}))
# keep renditions updates (urls may have changed)
renditions.update(update_renditions)
renditions = {k: renditions[k] for k in renditions if renditions[k]}
if "original" in updates.get("renditions", {}):
original_image = updates["renditions"]["original"]
else:
try:
original_image = original["renditions"]["original"]
except KeyError:
return
for key in [k for k in update_renditions if update_renditions[k]]:
if not self.get_crop_by_name(key):
continue
original_crop = original.get("renditions", {}).get(key, {})
fields = ("CropLeft", "CropTop", "CropRight", "CropBottom")
crop_data = update_renditions.get(key, {})
if any(crop_data.get(name) != original_crop.get(name) for name in fields) and not crop_data.get("media"):
rendition = self.create_crop(original_image, key, crop_data)
renditions[key] = rendition
poi = updates.get("poi")
if poi:
for crop_name in renditions:
self._set_crop_poi(renditions, crop_name, poi)
updates["renditions"] = renditions
def _set_crop_poi(self, renditions, crop_name, poi):
"""Set the crop point of interest in the renditions dictionary for the given crop
:param dict renditions: updated renditions
:param string crop_name: the crop for which to set the poi
:param dict poi: the point of interest dictionary
"""
fields = ("CropLeft", "CropTop", "CropRight", "CropBottom")
if "x" in poi and "y" in poi:
original_image = renditions["original"]
crop_data = renditions[crop_name]
orig_poi_x = int(original_image["width"] * poi["x"])
orig_poi_y = int(original_image["height"] * poi["y"])
if any(name in crop_data for name in fields):
crop_poi_x = orig_poi_x - crop_data.get("CropLeft", 0)
crop_poi_y = orig_poi_y - crop_data.get("CropTop", 0)
else:
crop_poi_x = int(crop_data.get("width", original_image["width"]) * poi["x"])
crop_poi_y = int(crop_data.get("height", original_image["height"]) * poi["y"])
renditions[crop_name]["poi"] = {"x": crop_poi_x, "y": crop_poi_y}
def validate_multiple_crops(self, updates, original):
"""Validate crops for the image
:param dict updates: update item
:param dict original: original of the updated item
"""
renditions = updates.get("renditions", {})
if not (renditions and original.get(ITEM_TYPE) == CONTENT_TYPE.PICTURE):
return
for key in [k for k in renditions if renditions[k]]:
self.validate_crop(original, updates, key)
def delete_replaced_crop_files(self, updates, original):
"""Delete the replaced crop files.
:param dict updates: update item
:param dict original: original of the updated item
"""
update_renditions = updates.get("renditions", {})
if original.get(ITEM_TYPE) == CONTENT_TYPE.PICTURE and update_renditions:
renditions = original.get("renditions", {})
for key in update_renditions:
if self.get_crop_by_name(key) and update_renditions.get(key, {}).get("media") != renditions.get(
key, {}
).get("media"):
self._delete_crop_file(renditions.get(key, {}).get("media"))
def update_media_references(self, updates, original, published=False):
"""Update the media references collection.
When item (media item or associated media) is updated or created,
media_references are created. These media_references are updated to published state
once the item is published.
:param dict updates: Updates of the item
:param dict original: Original item
:param boolean published: True if publishing the item else False
"""
item_id = original.get(config.ID_FIELD)
references = {}
if updates.get("renditions", original.get("renditions", {})):
references = {item_id: updates.get("renditions", original.get("renditions", {}))}
if original.get(ITEM_TYPE) not in MEDIA_TYPES:
associations = updates.get(ASSOCIATIONS) or original.get(ASSOCIATIONS)
if not associations:
return
references = {
assoc.get(config.ID_FIELD): assoc.get("renditions")
for assoc in associations.values()
if assoc and assoc.get("renditions")
}
if not references:
return
for assoc_id, renditions in references.items():
associated_id = assoc_id if assoc_id != item_id else None
for rendition in [r for r in renditions.values() if r]:
if not rendition.get("media"):
continue
media = str(rendition.get("media"))
reference = get_resource_service("media_references").find_one(req=None, item_id=item_id, media_id=media)
if not reference:
try:
get_resource_service("media_references").post(
[
{
"item_id": item_id,
"media_id": media,
"associated_id": associated_id,
"published": False,
}
]
)
except Exception:
logger.exception("Failed to insert media reference item {} media {}".format(item_id, media))
# item is publish
if not published:
return
req = ParsedRequest()
req.where = json.dumps({"item_id": item_id, "published": False})
refs = list(get_resource_service("media_references").get(req=req, lookup=None))
for ref in refs:
try:
get_resource_service("media_references").patch(ref.get(config.ID_FIELD), updates={"published": True})
except Exception:
logger.exception(
"Failed to update media "
"reference item {} media {}".format(ref.get("item_id"), ref.get("media_id"))
)
| agpl-3.0 |
takis/django | django/conf/locale/hu/formats.py | 504 | 1117 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y. F j.'
TIME_FORMAT = 'G.i'
DATETIME_FORMAT = 'Y. F j. G.i'
YEAR_MONTH_FORMAT = 'Y. F'
MONTH_DAY_FORMAT = 'F j.'
SHORT_DATE_FORMAT = 'Y.m.d.'
SHORT_DATETIME_FORMAT = 'Y.m.d. G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y.%m.%d.', # '2006.10.25.'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%Y.%m.%d. %H.%M.%S', # '2006.10.25. 14.30.59'
'%Y.%m.%d. %H.%M.%S.%f', # '2006.10.25. 14.30.59.000200'
'%Y.%m.%d. %H.%M', # '2006.10.25. 14.30'
'%Y.%m.%d.', # '2006.10.25.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
ToonTownInfiniteRepo/ToontownInfinite | Panda3D-1.9.0/python/Lib/idlelib/AutoCompleteWindow.py | 81 | 17201 | """
An auto-completion window for IDLE, used by the AutoComplete extension
"""
from Tkinter import *
from idlelib.MultiCall import MC_SHIFT
from idlelib.AutoComplete import COMPLETE_FILES, COMPLETE_ATTRIBUTES
HIDE_VIRTUAL_EVENT_NAME = "<<autocompletewindow-hide>>"
HIDE_SEQUENCES = ("<FocusOut>", "<ButtonPress>")
KEYPRESS_VIRTUAL_EVENT_NAME = "<<autocompletewindow-keypress>>"
# We need to bind event beyond <Key> so that the function will be called
# before the default specific IDLE function
KEYPRESS_SEQUENCES = ("<Key>", "<Key-BackSpace>", "<Key-Return>", "<Key-Tab>",
"<Key-Up>", "<Key-Down>", "<Key-Home>", "<Key-End>",
"<Key-Prior>", "<Key-Next>")
KEYRELEASE_VIRTUAL_EVENT_NAME = "<<autocompletewindow-keyrelease>>"
KEYRELEASE_SEQUENCE = "<KeyRelease>"
LISTUPDATE_SEQUENCE = "<B1-ButtonRelease>"
WINCONFIG_SEQUENCE = "<Configure>"
DOUBLECLICK_SEQUENCE = "<B1-Double-ButtonRelease>"
class AutoCompleteWindow:
def __init__(self, widget):
# The widget (Text) on which we place the AutoCompleteWindow
self.widget = widget
# The widgets we create
self.autocompletewindow = self.listbox = self.scrollbar = None
# The default foreground and background of a selection. Saved because
# they are changed to the regular colors of list items when the
# completion start is not a prefix of the selected completion
self.origselforeground = self.origselbackground = None
# The list of completions
self.completions = None
# A list with more completions, or None
self.morecompletions = None
# The completion mode. Either AutoComplete.COMPLETE_ATTRIBUTES or
# AutoComplete.COMPLETE_FILES
self.mode = None
# The current completion start, on the text box (a string)
self.start = None
# The index of the start of the completion
self.startindex = None
# The last typed start, used so that when the selection changes,
# the new start will be as close as possible to the last typed one.
self.lasttypedstart = None
# Do we have an indication that the user wants the completion window
# (for example, he clicked the list)
self.userwantswindow = None
# event ids
self.hideid = self.keypressid = self.listupdateid = self.winconfigid \
= self.keyreleaseid = self.doubleclickid = None
# Flag set if last keypress was a tab
self.lastkey_was_tab = False
def _change_start(self, newstart):
min_len = min(len(self.start), len(newstart))
i = 0
while i < min_len and self.start[i] == newstart[i]:
i += 1
if i < len(self.start):
self.widget.delete("%s+%dc" % (self.startindex, i),
"%s+%dc" % (self.startindex, len(self.start)))
if i < len(newstart):
self.widget.insert("%s+%dc" % (self.startindex, i),
newstart[i:])
self.start = newstart
def _binary_search(self, s):
"""Find the first index in self.completions where completions[i] is
greater or equal to s, or the last index if there is no such
one."""
i = 0; j = len(self.completions)
while j > i:
m = (i + j) // 2
if self.completions[m] >= s:
j = m
else:
i = m + 1
return min(i, len(self.completions)-1)
def _complete_string(self, s):
"""Assuming that s is the prefix of a string in self.completions,
return the longest string which is a prefix of all the strings which
s is a prefix of them. If s is not a prefix of a string, return s."""
first = self._binary_search(s)
if self.completions[first][:len(s)] != s:
# There is not even one completion which s is a prefix of.
return s
# Find the end of the range of completions where s is a prefix of.
i = first + 1
j = len(self.completions)
while j > i:
m = (i + j) // 2
if self.completions[m][:len(s)] != s:
j = m
else:
i = m + 1
last = i-1
if first == last: # only one possible completion
return self.completions[first]
# We should return the maximum prefix of first and last
first_comp = self.completions[first]
last_comp = self.completions[last]
min_len = min(len(first_comp), len(last_comp))
i = len(s)
while i < min_len and first_comp[i] == last_comp[i]:
i += 1
return first_comp[:i]
def _selection_changed(self):
"""Should be called when the selection of the Listbox has changed.
Updates the Listbox display and calls _change_start."""
cursel = int(self.listbox.curselection()[0])
self.listbox.see(cursel)
lts = self.lasttypedstart
selstart = self.completions[cursel]
if self._binary_search(lts) == cursel:
newstart = lts
else:
min_len = min(len(lts), len(selstart))
i = 0
while i < min_len and lts[i] == selstart[i]:
i += 1
newstart = selstart[:i]
self._change_start(newstart)
if self.completions[cursel][:len(self.start)] == self.start:
# start is a prefix of the selected completion
self.listbox.configure(selectbackground=self.origselbackground,
selectforeground=self.origselforeground)
else:
self.listbox.configure(selectbackground=self.listbox.cget("bg"),
selectforeground=self.listbox.cget("fg"))
# If there are more completions, show them, and call me again.
if self.morecompletions:
self.completions = self.morecompletions
self.morecompletions = None
self.listbox.delete(0, END)
for item in self.completions:
self.listbox.insert(END, item)
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
def show_window(self, comp_lists, index, complete, mode, userWantsWin):
"""Show the autocomplete list, bind events.
If complete is True, complete the text, and if there is exactly one
matching completion, don't open a list."""
# Handle the start we already have
self.completions, self.morecompletions = comp_lists
self.mode = mode
self.startindex = self.widget.index(index)
self.start = self.widget.get(self.startindex, "insert")
if complete:
completed = self._complete_string(self.start)
self._change_start(completed)
i = self._binary_search(completed)
if self.completions[i] == completed and \
(i == len(self.completions)-1 or
self.completions[i+1][:len(completed)] != completed):
# There is exactly one matching completion
return
self.userwantswindow = userWantsWin
self.lasttypedstart = self.start
# Put widgets in place
self.autocompletewindow = acw = Toplevel(self.widget)
# Put it in a position so that it is not seen.
acw.wm_geometry("+10000+10000")
# Make it float
acw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
acw.tk.call("::tk::unsupported::MacWindowStyle", "style", acw._w,
"help", "noActivates")
except TclError:
pass
self.scrollbar = scrollbar = Scrollbar(acw, orient=VERTICAL)
self.listbox = listbox = Listbox(acw, yscrollcommand=scrollbar.set,
exportselection=False, bg="white")
for item in self.completions:
listbox.insert(END, item)
self.origselforeground = listbox.cget("selectforeground")
self.origselbackground = listbox.cget("selectbackground")
scrollbar.config(command=listbox.yview)
scrollbar.pack(side=RIGHT, fill=Y)
listbox.pack(side=LEFT, fill=BOTH, expand=True)
# Initialize the listbox selection
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
# bind events
self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
self.hide_event)
for seq in HIDE_SEQUENCES:
self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
self.keypressid = self.widget.bind(KEYPRESS_VIRTUAL_EVENT_NAME,
self.keypress_event)
for seq in KEYPRESS_SEQUENCES:
self.widget.event_add(KEYPRESS_VIRTUAL_EVENT_NAME, seq)
self.keyreleaseid = self.widget.bind(KEYRELEASE_VIRTUAL_EVENT_NAME,
self.keyrelease_event)
self.widget.event_add(KEYRELEASE_VIRTUAL_EVENT_NAME,KEYRELEASE_SEQUENCE)
self.listupdateid = listbox.bind(LISTUPDATE_SEQUENCE,
self.listselect_event)
self.winconfigid = acw.bind(WINCONFIG_SEQUENCE, self.winconfig_event)
self.doubleclickid = listbox.bind(DOUBLECLICK_SEQUENCE,
self.doubleclick_event)
def winconfig_event(self, event):
if not self.is_active():
return
# Position the completion list window
text = self.widget
text.see(self.startindex)
x, y, cx, cy = text.bbox(self.startindex)
acw = self.autocompletewindow
acw_width, acw_height = acw.winfo_width(), acw.winfo_height()
text_width, text_height = text.winfo_width(), text.winfo_height()
new_x = text.winfo_rootx() + min(x, max(0, text_width - acw_width))
new_y = text.winfo_rooty() + y
if (text_height - (y + cy) >= acw_height # enough height below
or y < acw_height): # not enough height above
# place acw below current line
new_y += cy
else:
# place acw above current line
new_y -= acw_height
acw.wm_geometry("+%d+%d" % (new_x, new_y))
def hide_event(self, event):
if not self.is_active():
return
self.hide_window()
def listselect_event(self, event):
if not self.is_active():
return
self.userwantswindow = True
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
def doubleclick_event(self, event):
# Put the selected completion in the text, and close the list
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
self.hide_window()
def keypress_event(self, event):
if not self.is_active():
return
keysym = event.keysym
if hasattr(event, "mc_state"):
state = event.mc_state
else:
state = 0
if keysym != "Tab":
self.lastkey_was_tab = False
if (len(keysym) == 1 or keysym in ("underscore", "BackSpace")
or (self.mode == COMPLETE_FILES and keysym in
("period", "minus"))) \
and not (state & ~MC_SHIFT):
# Normal editing of text
if len(keysym) == 1:
self._change_start(self.start + keysym)
elif keysym == "underscore":
self._change_start(self.start + '_')
elif keysym == "period":
self._change_start(self.start + '.')
elif keysym == "minus":
self._change_start(self.start + '-')
else:
# keysym == "BackSpace"
if len(self.start) == 0:
self.hide_window()
return
self._change_start(self.start[:-1])
self.lasttypedstart = self.start
self.listbox.select_clear(0, int(self.listbox.curselection()[0]))
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
return "break"
elif keysym == "Return":
self.hide_window()
return
elif (self.mode == COMPLETE_ATTRIBUTES and keysym in
("period", "space", "parenleft", "parenright", "bracketleft",
"bracketright")) or \
(self.mode == COMPLETE_FILES and keysym in
("slash", "backslash", "quotedbl", "apostrophe")) \
and not (state & ~MC_SHIFT):
# If start is a prefix of the selection, but is not '' when
# completing file names, put the whole
# selected completion. Anyway, close the list.
cursel = int(self.listbox.curselection()[0])
if self.completions[cursel][:len(self.start)] == self.start \
and (self.mode == COMPLETE_ATTRIBUTES or self.start):
self._change_start(self.completions[cursel])
self.hide_window()
return
elif keysym in ("Home", "End", "Prior", "Next", "Up", "Down") and \
not state:
# Move the selection in the listbox
self.userwantswindow = True
cursel = int(self.listbox.curselection()[0])
if keysym == "Home":
newsel = 0
elif keysym == "End":
newsel = len(self.completions)-1
elif keysym in ("Prior", "Next"):
jump = self.listbox.nearest(self.listbox.winfo_height()) - \
self.listbox.nearest(0)
if keysym == "Prior":
newsel = max(0, cursel-jump)
else:
assert keysym == "Next"
newsel = min(len(self.completions)-1, cursel+jump)
elif keysym == "Up":
newsel = max(0, cursel-1)
else:
assert keysym == "Down"
newsel = min(len(self.completions)-1, cursel+1)
self.listbox.select_clear(cursel)
self.listbox.select_set(newsel)
self._selection_changed()
self._change_start(self.completions[newsel])
return "break"
elif (keysym == "Tab" and not state):
if self.lastkey_was_tab:
# two tabs in a row; insert current selection and close acw
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
self.hide_window()
return "break"
else:
# first tab; let AutoComplete handle the completion
self.userwantswindow = True
self.lastkey_was_tab = True
return
elif any(s in keysym for s in ("Shift", "Control", "Alt",
"Meta", "Command", "Option")):
# A modifier key, so ignore
return
else:
# Unknown event, close the window and let it through.
self.hide_window()
return
def keyrelease_event(self, event):
if not self.is_active():
return
if self.widget.index("insert") != \
self.widget.index("%s+%dc" % (self.startindex, len(self.start))):
# If we didn't catch an event which moved the insert, close window
self.hide_window()
def is_active(self):
return self.autocompletewindow is not None
def complete(self):
self._change_start(self._complete_string(self.start))
# The selection doesn't change.
def hide_window(self):
if not self.is_active():
return
# unbind events
for seq in HIDE_SEQUENCES:
self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
self.hideid = None
for seq in KEYPRESS_SEQUENCES:
self.widget.event_delete(KEYPRESS_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(KEYPRESS_VIRTUAL_EVENT_NAME, self.keypressid)
self.keypressid = None
self.widget.event_delete(KEYRELEASE_VIRTUAL_EVENT_NAME,
KEYRELEASE_SEQUENCE)
self.widget.unbind(KEYRELEASE_VIRTUAL_EVENT_NAME, self.keyreleaseid)
self.keyreleaseid = None
self.listbox.unbind(LISTUPDATE_SEQUENCE, self.listupdateid)
self.listupdateid = None
self.autocompletewindow.unbind(WINCONFIG_SEQUENCE, self.winconfigid)
self.winconfigid = None
# destroy widgets
self.scrollbar.destroy()
self.scrollbar = None
self.listbox.destroy()
self.listbox = None
self.autocompletewindow.destroy()
self.autocompletewindow = None
| mit |
kreatorkodi/repository.torrentbr | script.video.F4mProxy/lib/flvlib/tags.py | 99 | 11473 | import os
import struct
import logging
from primitives import *
from constants import *
from astypes import MalformedFLV
from astypes import get_script_data_variable, make_script_data_variable
log = logging.getLogger('flvlib.tags')
STRICT_PARSING = False
def strict_parser():
return globals()['STRICT_PARSING']
class EndOfTags(Exception):
pass
def ensure(value, expected, error_msg):
if value == expected:
return
if strict_parser():
raise MalformedFLV(error_msg)
else:
log.warning('Skipping non-conformant value in FLV file')
class Tag(object):
def __init__(self, parent_flv, f):
self.f = f
self.parent_flv = parent_flv
self.offset = None
self.size = None
self.timestamp = None
def parse(self):
f = self.f
self.offset = f.tell() - 1
# DataSize
self.size = get_ui24(f)
# Timestamp + TimestampExtended
self.timestamp = get_si32_extended(f)
if self.timestamp < 0:
log.warning("The tag at offset 0x%08X has negative timestamp: %d",
self.offset, self.timestamp)
# StreamID
stream_id = get_ui24(f)
ensure(stream_id, 0, "StreamID non zero: 0x%06X" % stream_id)
# The rest gets parsed in the subclass, it should move f to the
# correct position to read PreviousTagSize
self.parse_tag_content()
previous_tag_size = get_ui32(f)
ensure(previous_tag_size, self.size + 11,
"PreviousTagSize of %d (0x%08X) "
"not equal to actual tag size of %d (0x%08X)" %
(previous_tag_size, previous_tag_size,
self.size + 11, self.size + 11))
def parse_tag_content(self):
# By default just seek past the tag content
self.f.seek(self.size, os.SEEK_CUR)
class AudioTag(Tag):
def __init__(self, parent_flv, f):
Tag.__init__(self, parent_flv, f)
self.sound_format = None
self.sound_rate = None
self.sound_size = None
self.sound_type = None
self.aac_packet_type = None # always None for non-AAC tags
def parse_tag_content(self):
f = self.f
sound_flags = get_ui8(f)
read_bytes = 1
self.sound_format = (sound_flags & 0xF0) >> 4
self.sound_rate = (sound_flags & 0xC) >> 2
self.sound_size = (sound_flags & 0x2) >> 1
self.sound_type = sound_flags & 0x1
if self.sound_format == SOUND_FORMAT_AAC:
# AAC packets can be sequence headers or raw data.
# The former contain codec information needed by the decoder to be
# able to interpret the rest of the data.
self.aac_packet_type = get_ui8(f)
read_bytes += 1
# AAC always has sampling rate of 44 kHz
ensure(self.sound_rate, SOUND_RATE_44_KHZ,
"AAC sound format with incorrect sound rate: %d" %
self.sound_rate)
# AAC is always stereo
ensure(self.sound_type, SOUND_TYPE_STEREO,
"AAC sound format with incorrect sound type: %d" %
self.sound_type)
if strict_parser():
try:
sound_format_to_string[self.sound_format]
except KeyError:
raise MalformedFLV("Invalid sound format: %d",
self.sound_format)
try:
(self.aac_packet_type and
aac_packet_type_to_string[self.aac_packet_type])
except KeyError:
raise MalformedFLV("Invalid AAC packet type: %d",
self.aac_packet_type)
f.seek(self.size - read_bytes, os.SEEK_CUR)
def __repr__(self):
if self.offset is None:
return "<AudioTag unparsed>"
elif self.aac_packet_type is None:
return ("<AudioTag at offset 0x%08X, time %d, size %d, %s>" %
(self.offset, self.timestamp, self.size,
sound_format_to_string.get(self.sound_format, '?')))
else:
return ("<AudioTag at offset 0x%08X, time %d, size %d, %s, %s>" %
(self.offset, self.timestamp, self.size,
sound_format_to_string.get(self.sound_format, '?'),
aac_packet_type_to_string.get(self.aac_packet_type, '?')))
class VideoTag(Tag):
def __init__(self, parent_flv, f):
Tag.__init__(self, parent_flv, f)
self.frame_type = None
self.codec_id = None
self.h264_packet_type = None # Always None for non-H.264 tags
def parse_tag_content(self):
f = self.f
video_flags = get_ui8(f)
read_bytes = 1
self.frame_type = (video_flags & 0xF0) >> 4
self.codec_id = video_flags & 0xF
if self.codec_id == CODEC_ID_H264:
# H.264 packets can be sequence headers, NAL units or sequence
# ends.
self.h264_packet_type = get_ui8(f)
read_bytes += 1
if strict_parser():
try:
frame_type_to_string[self.frame_type]
except KeyError:
raise MalformedFLV("Invalid frame type: %d", self.frame_type)
try:
codec_id_to_string[self.codec_id]
except KeyError:
raise MalformedFLV("Invalid codec ID: %d", self.codec_id)
try:
(self.h264_packet_type and
h264_packet_type_to_string[self.h264_packet_type])
except KeyError:
raise MalformedFLV("Invalid H.264 packet type: %d",
self.h264_packet_type)
f.seek(self.size - read_bytes, os.SEEK_CUR)
def __repr__(self):
if self.offset is None:
return "<VideoTag unparsed>"
elif self.h264_packet_type is None:
return ("<VideoTag at offset 0x%08X, time %d, size %d, %s (%s)>" %
(self.offset, self.timestamp, self.size,
codec_id_to_string.get(self.codec_id, '?'),
frame_type_to_string.get(self.frame_type, '?')))
else:
return ("<VideoTag at offset 0x%08X, "
"time %d, size %d, %s (%s), %s>" %
(self.offset, self.timestamp, self.size,
codec_id_to_string.get(self.codec_id, '?'),
frame_type_to_string.get(self.frame_type, '?'),
h264_packet_type_to_string.get(
self.h264_packet_type, '?')))
class ScriptTag(Tag):
def __init__(self, parent_flv, f):
Tag.__init__(self, parent_flv, f)
self.name = None
self.variable = None
def parse_tag_content(self):
f = self.f
# Here there's always a byte with the value of 0x02,
# which means "string", although the spec says NOTHING
# about it..
value_type = get_ui8(f)
ensure(value_type, 2, "The name of a script tag is not a string")
# Need to pass the tag end offset, because apparently YouTube
# doesn't give a *shit* about the FLV spec and just happily
# ends the onMetaData tag after self.size bytes, instead of
# ending it with the *required* 0x09 marker. Bastards!
if strict_parser():
# If we're strict, just don't pass this info
tag_end = None
else:
# 11 = tag type (1) + data size (3) + timestamp (4) + stream id (3)
tag_end = self.offset + 11 + self.size
log.debug("max offset is 0x%08X", tag_end)
self.name, self.variable = \
get_script_data_variable(f, max_offset=tag_end)
log.debug("A script tag with a name of %s and value of %r",
self.name, self.variable)
def __repr__(self):
if self.offset is None:
return "<ScriptTag unparsed>"
else:
return ("<ScriptTag %s at offset 0x%08X, time %d, size %d>" %
(self.name, self.offset, self.timestamp, self.size))
tag_to_class = {
TAG_TYPE_AUDIO: AudioTag,
TAG_TYPE_VIDEO: VideoTag,
TAG_TYPE_SCRIPT: ScriptTag
}
class FLV(object):
def __init__(self, f):
self.f = f
self.version = None
self.has_audio = None
self.has_video = None
self.tags = []
def parse_header(self):
f = self.f
f.seek(0)
# FLV header
header = f.read(3)
if len(header) < 3:
raise MalformedFLV("The file is shorter than 3 bytes")
# Do this irrelevant of STRICT_PARSING, to catch bogus files
if header != "FLV":
raise MalformedFLV("File signature is incorrect: 0x%X 0x%X 0x%X" %
struct.unpack("3B", header))
# File version
self.version = get_ui8(f)
log.debug("File version is %d", self.version)
# TypeFlags
flags = get_ui8(f)
ensure(flags & 0xF8, 0,
"First TypeFlagsReserved field non zero: 0x%X" % (flags & 0xF8))
ensure(flags & 0x2, 0,
"Second TypeFlagsReserved field non zero: 0x%X" % (flags & 0x2))
self.has_audio = False
self.has_video = False
if flags & 0x4:
self.has_audio = True
if flags & 0x1:
self.has_video = True
log.debug("File %s audio",
(self.has_audio and "has") or "does not have")
log.debug("File %s video",
(self.has_video and "has") or "does not have")
header_size = get_ui32(f)
log.debug("Header size is %d bytes", header_size)
f.seek(header_size)
tag_0_size = get_ui32(f)
ensure(tag_0_size, 0, "PreviousTagSize0 non zero: 0x%08X" % tag_0_size)
def iter_tags(self):
self.parse_header()
try:
while True:
tag = self.get_next_tag()
yield tag
except EndOfTags:
pass
def read_tags(self):
self.tags = list(self.iter_tags())
def get_next_tag(self):
f = self.f
try:
tag_type = get_ui8(f)
except EndOfFile:
raise EndOfTags
tag_klass = self.tag_type_to_class(tag_type)
tag = tag_klass(self, f)
tag.parse()
return tag
def tag_type_to_class(self, tag_type):
try:
return tag_to_class[tag_type]
except KeyError:
raise MalformedFLV("Invalid tag type: %d", tag_type)
def create_flv_tag(type, data, timestamp=0):
tag_type = struct.pack("B", type)
timestamp = make_si32_extended(timestamp)
stream_id = make_ui24(0)
data_size = len(data)
tag_size = data_size + 11
return ''.join([tag_type, make_ui24(data_size), timestamp, stream_id,
data, make_ui32(tag_size)])
def create_script_tag(name, data, timestamp=0):
payload = make_ui8(2) + make_script_data_variable(name, data)
return create_flv_tag(TAG_TYPE_SCRIPT, payload, timestamp)
def create_flv_header(has_audio=True, has_video=True):
type_flags = 0
if has_video:
type_flags = type_flags | 0x1
if has_audio:
type_flags = type_flags | 0x4
return ''.join(['FLV', make_ui8(1), make_ui8(type_flags), make_ui32(9),
make_ui32(0)])
| gpl-2.0 |
dcramer/django-compositepks | django/core/serializers/pyyaml.py | 17 | 1755 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
from StringIO import StringIO
import yaml
from django.db import models
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
self.options.pop('stream', None)
self.options.pop('fields', None)
yaml.safe_dump(self.objects, self.stream, **self.options)
def getvalue(self):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(yaml.load(stream)):
yield obj
| bsd-3-clause |
ncultra/qemu | scripts/tracetool/backend/ftrace.py | 102 | 1351 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Ftrace built-in backend.
"""
__author__ = "Eiichi Tsukata <eiichi.tsukata.xh@hitachi.com>"
__copyright__ = "Copyright (C) 2013 Hitachi, Ltd."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
PUBLIC = True
def generate_h_begin(events):
out('#include "trace/ftrace.h"',
'#include "trace/control.h"',
'')
def generate_h(event):
argnames = ", ".join(event.args.names())
if len(event.args) > 0:
argnames = ", " + argnames
out(' {',
' char ftrace_buf[MAX_TRACE_STRLEN];',
' int unused __attribute__ ((unused));',
' int trlen;',
' if (trace_event_get_state(%(event_id)s)) {',
' trlen = snprintf(ftrace_buf, MAX_TRACE_STRLEN,',
' "%(name)s " %(fmt)s "\\n" %(argnames)s);',
' trlen = MIN(trlen, MAX_TRACE_STRLEN - 1);',
' unused = write(trace_marker_fd, ftrace_buf, trlen);',
' }',
' }',
name=event.name,
args=event.args,
event_id="TRACE_" + event.name.upper(),
fmt=event.fmt.rstrip("\n"),
argnames=argnames)
| gpl-2.0 |
NorfolkDataSci/presentations | 2018-01_chatbot/serverless-chatbots-workshop-master/LambdaFunctions/nlp/nltk/classify/weka.py | 7 | 12625 | # Natural Language Toolkit: Interface to Weka Classsifiers
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classifiers that make use of the external 'Weka' package.
"""
from __future__ import print_function
import time
import tempfile
import os
import subprocess
import re
import zipfile
from sys import stdin
from nltk import compat
from nltk.probability import DictionaryProbDist
from nltk.internals import java, config_java
from nltk.classify.api import ClassifierI
_weka_classpath = None
_weka_search = ['.',
'/usr/share/weka',
'/usr/local/share/weka',
'/usr/lib/weka',
'/usr/local/lib/weka',]
def config_weka(classpath=None):
global _weka_classpath
# Make sure java's configured first.
config_java()
if classpath is not None:
_weka_classpath = classpath
if _weka_classpath is None:
searchpath = _weka_search
if 'WEKAHOME' in os.environ:
searchpath.insert(0, os.environ['WEKAHOME'])
for path in searchpath:
if os.path.exists(os.path.join(path, 'weka.jar')):
_weka_classpath = os.path.join(path, 'weka.jar')
version = _check_weka_version(_weka_classpath)
if version:
print(('[Found Weka: %s (version %s)]' %
(_weka_classpath, version)))
else:
print('[Found Weka: %s]' % _weka_classpath)
_check_weka_version(_weka_classpath)
if _weka_classpath is None:
raise LookupError('Unable to find weka.jar! Use config_weka() '
'or set the WEKAHOME environment variable. '
'For more information about Weka, please see '
'http://www.cs.waikato.ac.nz/ml/weka/')
def _check_weka_version(jar):
try:
zf = zipfile.ZipFile(jar)
except (SystemExit, KeyboardInterrupt):
raise
except:
return None
try:
try:
return zf.read('weka/core/version.txt')
except KeyError:
return None
finally:
zf.close()
class WekaClassifier(ClassifierI):
def __init__(self, formatter, model_filename):
self._formatter = formatter
self._model = model_filename
def prob_classify_many(self, featuresets):
return self._classify_many(featuresets, ['-p', '0', '-distribution'])
def classify_many(self, featuresets):
return self._classify_many(featuresets, ['-p', '0'])
def _classify_many(self, featuresets, options):
# Make sure we can find java & weka.
config_weka()
temp_dir = tempfile.mkdtemp()
try:
# Write the test data file.
test_filename = os.path.join(temp_dir, 'test.arff')
self._formatter.write(test_filename, featuresets)
# Call weka to classify the data.
cmd = ['weka.classifiers.bayes.NaiveBayes',
'-l', self._model, '-T', test_filename] + options
(stdout, stderr) = java(cmd, classpath=_weka_classpath,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Check if something went wrong:
if stderr and not stdout:
if 'Illegal options: -distribution' in stderr:
raise ValueError('The installed version of weka does '
'not support probability distribution '
'output.')
else:
raise ValueError('Weka failed to generate output:\n%s'
% stderr)
# Parse weka's output.
return self.parse_weka_output(stdout.decode(stdin.encoding).split('\n'))
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
def parse_weka_distribution(self, s):
probs = [float(v) for v in re.split('[*,]+', s) if v.strip()]
probs = dict(zip(self._formatter.labels(), probs))
return DictionaryProbDist(probs)
def parse_weka_output(self, lines):
# Strip unwanted text from stdout
for i,line in enumerate(lines):
if line.strip().startswith("inst#"):
lines = lines[i:]
break
if lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'prediction']:
return [line.split()[2].split(':')[1]
for line in lines[1:] if line.strip()]
elif lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'distribution']:
return [self.parse_weka_distribution(line.split()[-1])
for line in lines[1:] if line.strip()]
# is this safe:?
elif re.match(r'^0 \w+ [01]\.[0-9]* \?\s*$', lines[0]):
return [line.split()[1] for line in lines if line.strip()]
else:
for line in lines[:10]:
print(line)
raise ValueError('Unhandled output format -- your version '
'of weka may not be supported.\n'
' Header: %s' % lines[0])
# [xx] full list of classifiers (some may be abstract?):
# ADTree, AODE, BayesNet, ComplementNaiveBayes, ConjunctiveRule,
# DecisionStump, DecisionTable, HyperPipes, IB1, IBk, Id3, J48,
# JRip, KStar, LBR, LeastMedSq, LinearRegression, LMT, Logistic,
# LogisticBase, M5Base, MultilayerPerceptron,
# MultipleClassifiersCombiner, NaiveBayes, NaiveBayesMultinomial,
# NaiveBayesSimple, NBTree, NNge, OneR, PaceRegression, PART,
# PreConstructedLinearModel, Prism, RandomForest,
# RandomizableClassifier, RandomTree, RBFNetwork, REPTree, Ridor,
# RuleNode, SimpleLinearRegression, SimpleLogistic,
# SingleClassifierEnhancer, SMO, SMOreg, UserClassifier, VFI,
# VotedPerceptron, Winnow, ZeroR
_CLASSIFIER_CLASS = {
'naivebayes': 'weka.classifiers.bayes.NaiveBayes',
'C4.5': 'weka.classifiers.trees.J48',
'log_regression': 'weka.classifiers.functions.Logistic',
'svm': 'weka.classifiers.functions.SMO',
'kstar': 'weka.classifiers.lazy.KStar',
'ripper': 'weka.classifiers.rules.JRip',
}
@classmethod
def train(cls, model_filename, featuresets,
classifier='naivebayes', options=[], quiet=True):
# Make sure we can find java & weka.
config_weka()
# Build an ARFF formatter.
formatter = ARFF_Formatter.from_train(featuresets)
temp_dir = tempfile.mkdtemp()
try:
# Write the training data file.
train_filename = os.path.join(temp_dir, 'train.arff')
formatter.write(train_filename, featuresets)
if classifier in cls._CLASSIFIER_CLASS:
javaclass = cls._CLASSIFIER_CLASS[classifier]
elif classifier in cls._CLASSIFIER_CLASS.values():
javaclass = classifier
else:
raise ValueError('Unknown classifier %s' % classifier)
# Train the weka model.
cmd = [javaclass, '-d', model_filename, '-t', train_filename]
cmd += list(options)
if quiet:
stdout = subprocess.PIPE
else: stdout = None
java(cmd, classpath=_weka_classpath, stdout=stdout)
# Return the new classifier.
return WekaClassifier(formatter, model_filename)
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
class ARFF_Formatter:
"""
Converts featuresets and labeled featuresets to ARFF-formatted
strings, appropriate for input into Weka.
Features and classes can be specified manually in the constructor, or may
be determined from data using ``from_train``.
"""
def __init__(self, labels, features):
"""
:param labels: A list of all class labels that can be generated.
:param features: A list of feature specifications, where
each feature specification is a tuple (fname, ftype);
and ftype is an ARFF type string such as NUMERIC or
STRING.
"""
self._labels = labels
self._features = features
def format(self, tokens):
"""Returns a string representation of ARFF output for the given data."""
return self.header_section() + self.data_section(tokens)
def labels(self):
"""Returns the list of classes."""
return list(self._labels)
def write(self, outfile, tokens):
"""Writes ARFF data to a file for the given data."""
if not hasattr(outfile, 'write'):
outfile = open(outfile, 'w')
outfile.write(self.format(tokens))
outfile.close()
@staticmethod
def from_train(tokens):
"""
Constructs an ARFF_Formatter instance with class labels and feature
types determined from the given data. Handles boolean, numeric and
string (note: not nominal) types.
"""
# Find the set of all attested labels.
labels = set(label for (tok, label) in tokens)
# Determine the types of all features.
features = {}
for tok, label in tokens:
for (fname, fval) in tok.items():
if issubclass(type(fval), bool):
ftype = '{True, False}'
elif issubclass(type(fval), (compat.integer_types, float, bool)):
ftype = 'NUMERIC'
elif issubclass(type(fval), compat.string_types):
ftype = 'STRING'
elif fval is None:
continue # can't tell the type.
else:
raise ValueError('Unsupported value type %r' % ftype)
if features.get(fname, ftype) != ftype:
raise ValueError('Inconsistent type for %s' % fname)
features[fname] = ftype
features = sorted(features.items())
return ARFF_Formatter(labels, features)
def header_section(self):
"""Returns an ARFF header as a string."""
# Header comment.
s = ('% Weka ARFF file\n' +
'% Generated automatically by NLTK\n' +
'%% %s\n\n' % time.ctime())
# Relation name
s += '@RELATION rel\n\n'
# Input attribute specifications
for fname, ftype in self._features:
s += '@ATTRIBUTE %-30r %s\n' % (fname, ftype)
# Label attribute specification
s += '@ATTRIBUTE %-30r {%s}\n' % ('-label-', ','.join(self._labels))
return s
def data_section(self, tokens, labeled=None):
"""
Returns the ARFF data section for the given data.
:param tokens: a list of featuresets (dicts) or labelled featuresets
which are tuples (featureset, label).
:param labeled: Indicates whether the given tokens are labeled
or not. If None, then the tokens will be assumed to be
labeled if the first token's value is a tuple or list.
"""
# Check if the tokens are labeled or unlabeled. If unlabeled,
# then use 'None'
if labeled is None:
labeled = tokens and isinstance(tokens[0], (tuple, list))
if not labeled:
tokens = [(tok, None) for tok in tokens]
# Data section
s = '\n@DATA\n'
for (tok, label) in tokens:
for fname, ftype in self._features:
s += '%s,' % self._fmt_arff_val(tok.get(fname))
s += '%s\n' % self._fmt_arff_val(label)
return s
def _fmt_arff_val(self, fval):
if fval is None:
return '?'
elif isinstance(fval, (bool, compat.integer_types)):
return '%s' % fval
elif isinstance(fval, float):
return '%r' % fval
else:
return '%r' % fval
if __name__ == '__main__':
from nltk.classify.util import names_demo, binary_names_demo_features
def make_classifier(featuresets):
return WekaClassifier.train('/tmp/name.model', featuresets,
'C4.5')
classifier = names_demo(make_classifier, binary_names_demo_features)
| mit |
jalonsob/Informes | grimoirelib_alch/aux/reports.py | 4 | 3048 | #! /usr/bin/python
# -*- coding: utf-8 -*-
## Copyright (C) 2014 Bitergia
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##
## Common code to easy the production of reports.
##
## Authors:
## Jesus M. Gonzalez-Barahona <jgb@bitergia.com>
##
from os.path import join
from jsonpickle import encode, set_encoder_options
import codecs
def produce_json (filename, data, compact = True):
"""Produce JSON content (data) into a file (filename).
Parameters
----------
filename: string
Name of file to write the content.
data: any
Content to write in JSON format. It has to be ready to pack using
jsonpickle.encode.
"""
if compact:
# Produce compact JSON output
set_encoder_options('json', separators=(',', ': '),
ensure_ascii=False,
encoding="utf8")
else:
# Produce pretty JSON output
set_encoder_options('json', sort_keys=True, indent=4,
separators=(',', ': '),
ensure_ascii=False,
encoding="utf8")
data_json = encode(data, unpicklable=False)
with codecs.open(filename, "w", "utf-8") as file:
file.write(data_json)
def create_report (report_files, destdir):
"""Create report, by producing a collection of JSON files
Parameters
----------
report_files: dictionary
Keys are the names of JSON files to produce, values are the
data to include in those JSON files.
destdir: str
Name of the destination directory to write all JSON files
"""
for file in report_files:
print "Producing file: ", join (destdir, file)
produce_json (join (destdir, file), report_files[file])
def add_report (report, to_add):
"""Add new files (with their data) to report.
Adds new_files (which is a dictorionay in report format)
to report, and returns the result.
Parameters
----------
report: dictionary
Base report Keys are the names of JSON files to produce, values are the
data to include in those JSON files.
to_add: dictionary
Report to add. Same format as report. Keys in to_add will
replace the same keys in report, or just be added to it.
"""
for file in to_add:
report[file] = to_add[file]
return report
| gpl-3.0 |
OriHoch/pysiogame | game_boards/game064.py | 1 | 18746 | # -*- coding: utf-8 -*-
import classes.level_controller as lc
import classes.game_driver as gd
import classes.extras as ex
import classes.simple_vector as sv
import pygame
import copy
import classes.board
import random
from math import pi,cos,acos,sin,asin,sqrt
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self,mainloop,12,8)
gd.BoardGame.__init__(self,mainloop,speaker,config,screen_w,screen_h,19,10)
def create_game_objects(self, level = 1):
self.vis_buttons = [1,1,1,1,1,1,1,0,0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.hand_id = 0
self.hand_coords = [[],[]]
self.board.draw_grid = False
if self.mainloop.scheme is not None:
color1 = self.mainloop.scheme.color1 #bright side of short hand
color3 = self.mainloop.scheme.color3 #inner font color
color5 = self.mainloop.scheme.color5 #dark side of short hand
color7 = self.mainloop.scheme.color7 #inner circle filling
color2 = self.mainloop.scheme.color2 #bright side of long hand
color4 = self.mainloop.scheme.color4 #ex.hsv_to_rgb(170,255,255)#outer font color
color6 = self.mainloop.scheme.color6 #dark side of long hand
color8 = self.mainloop.scheme.color8 #outer circle filling
color = self.mainloop.scheme.u_color
white = self.mainloop.scheme.u_color
gray = (100,100,100)
else:
color1 = ex.hsv_to_rgb(225,70,230)
color3 = ex.hsv_to_rgb(225,255,255)
color5 = ex.hsv_to_rgb(225,180,240)
color7 = ex.hsv_to_rgb(225,10,255)
color2 = ex.hsv_to_rgb(170,70,230)
color4 = ex.hsv_to_rgb(170,255,255)
color6 = ex.hsv_to_rgb(170,180,240)
color8 = ex.hsv_to_rgb(170,10,255)
color = (255,255,255)
white = (255,255,255)
gray = (100,100,100)
self.colors = [color1,color2]
self.colors2 = [color3,color4]
self.colors3 = [color5,color6]
self.colors4 = [color7,color8]
if self.level.lvl == 1:
data = [19,10,True,True,False,False,True,False,False,True,True,15]
h_pool = range(1,13)
m_pool = range(0,60,15)
elif self.level.lvl == 2:
data = [19,10,True,True,False,False,False,True,False,True,True,15]
h_pool = range(1,13)
m_pool = range(0,60,5)
elif self.level.lvl == 3:
data = [19,10,True,True,False,False,False,False,False,True,True,15]
h_pool = range(1,13)
m_pool = range(0,60)
elif self.level.lvl == 4:
data = [19,10,True,True,False,False,False,False,False,False,True,20]
h_pool = range(1,13)
m_pool = range(0,60)
elif self.level.lvl == 5:
data = [19,10,True,True,False,False,False,True,False,False,True,20]
h_pool = range(1,13)
m_pool = range(0,60)
elif self.level.lvl == 6:
data = [19,10,True,True,False,False,True,False,False,False,True,20]
h_pool = range(1,13)
m_pool = range(0,60)
elif self.level.lvl == 7:
data = [19,10,True,False,False,False,False,False,False,False,True,25]
h_pool = range(1,13)
m_pool = range(0,60)
elif self.level.lvl == 8:
data = [19,10,True,False,False,True,False,False,False,False,True,25]
h_pool = range(1,13)
m_pool = range(0,60)
self.points = self.level.lvl // 2 + 1
#visual display properties
self.show_outer_ring = data[2]
self.show_minutes = data[3]
self.show_24h = data[4]
self.show_only_quarters_h = data[5]
self.show_only_quarters_m = data[6]
self.show_only_fives_m = data[7]
self.show_only_spare_variable = data[8]
self.show_highlight = data[9]
self.show_hour_offset = data[10]
self.level.games_per_lvl = data[11]
tt = [random.choice(h_pool), random.choice(m_pool)]
self.target_time = tt
self.text_string = self.lang.time2str(tt[0],tt[1])
self.time = [6,0]
self.tm = self.time[:]
self.digits = ["0","1","2","3","4","5","6","7","8","9"]
x_count = self.get_x_count(data[1],even=False)
if x_count > data[0]:
data[0] = x_count
self.font_size = 0
self.data = data
self.layout.update_layout(data[0],data[1])
scale = self.layout.scale
self.board.level_start(data[0],data[1],self.layout.scale)
self.size = self.board.scale*10
ans_offset = 10+(data[0]-15)//2
self.board.add_unit(10,0,data[0]-10,2,classes.board.Label,self.lang.d["Set_clock"],white,"",2)
self.board.units[-1].font_color = gray
self.board.add_unit(10,4,data[0]-10,2,classes.board.Label,self.lang.d["Set_clock_instr"],white,"",2)
self.board.units[-1].font_color = gray
self.board.add_unit(ans_offset,2,2,1,classes.board.Label,"%02d" % self.target_time[0],white,"",0)
self.ans_h = self.board.units[-1]
self.board.add_unit(ans_offset+2,2,1,1,classes.board.Label,":",white,"",0)
self.board.add_unit(ans_offset+3,2,2,1,classes.board.Label,"%02d" % self.target_time[1],white,"",0)
self.ans_m = self.board.units[-1]
self.ans_h.align = 2
self.ans_m.align = 1
self.ans_h.immobilize()
self.ans_m.immobilize()
self.ans_h.font_color = color3
self.ans_m.font_color = color4
self.center = [self.size//2,self.size//2]
self.board.add_unit(0,0,10,10,classes.board.Ship,"",white,"",self.font_size)
self.clock_canvas = self.board.ships[-1]
self.board.active_ship = self.clock_canvas.unit_id
self.clock_canvas.font = self.clock_canvas.board.font_sizes[2]
self.clock_canvas.font2 = self.clock_canvas.board.font_sizes[7]
self.clock_canvas.font3 = self.clock_canvas.board.font_sizes[26]
self.clock_canvas.immobilize()
self.board.add_unit(10,3,data[0]-10,1,classes.board.Letter,self.text_string,white,"",4)
self.board.ships[-1].immobilize()
self.board.ships[-1].font_color = gray
if self.lang.lang in ["ru","he"]:
spk_txt = self.lang.time2spk(tt[0],tt[1])
self.board.ships[-1].speaker_val = spk_txt
self.board.ships[-1].speaker_val_update = False
self.canvas = pygame.Surface([self.size, self.size-1])
if self.mainloop.scheme is not None:
self.canvas.fill(self.mainloop.scheme.u_color)
else:
self.canvas.fill((255,255,255))
self.hands_vars()
self.draw_hands()
self.clock_canvas.hidden_value = [2,3]#numbers[i]
self.clock_canvas.font_color = color2
self.clock_canvas.painting = self.canvas.copy()
def hands_vars(self):
numbers = [2,2]
self.angle_step_12 = 2*pi/12
self.angle_step_60 = 2*pi/60
self.angle_start= -pi/2
angle_arc_start = -pi/2
self.r = self.size//3+self.size//10
self.rs = [self.r*0.6, self.r*0.85,self.r*0.6]
def draw_hands(self):
if self.show_hour_offset:
a1 = self.angle_start + (2*pi/12)*self.time[0] + (self.angle_step_12*(2*pi/60)*self.time[1])/(2*pi)
else:
a1 = self.angle_start + (2*pi/12)*self.time[0]
a2 = self.angle_start + (2*pi/60)*self.time[1]
self.angles = [a1,a2]
rs = self.rs
time = self.time
if self.show_outer_ring:
pygame.draw.circle(self.canvas,self.colors4[1],self.center,int(rs[1]+10*self.layout.scale/62),0)
pygame.draw.circle(self.canvas,self.colors2[1],self.center,int(rs[1]+10*self.layout.scale/62),1)
pygame.draw.circle(self.canvas,self.colors4[0],self.center,int(rs[2]+10*self.layout.scale/62),0)
pygame.draw.circle(self.canvas,self.colors2[0],self.center,int(rs[2]+10*self.layout.scale/62),1)
if self.show_outer_ring:
for i in range(60):
val = str(i+1)
if self.show_only_quarters_m:
if (i+1)%15 != 0:
val = ""
elif self.show_only_fives_m:
if (i+1)%5 != 0:
val = ""
if i == 59:
val = "0"
a = self.angle_start + self.angle_step_60*(i+1)
if self.show_minutes:
font_size = self.clock_canvas.font3.size(val)
if not self.show_highlight or (i+1 == time[1] or (time[1] == 0 and i==59)):
text = self.clock_canvas.font3.render("%s" % (val), 1, self.colors2[1])
else:
text = self.clock_canvas.font3.render("%s" % (val), 1, self.colors[1])
x3=(rs[1]+30*self.layout.scale/62+font_size[1]//2)*cos(a)+self.center[0] - font_size[0] / 2
y3=(rs[1]+30*self.layout.scale/62+font_size[1]//2)*sin(a)+self.center[1] - font_size[1] / 2
self.canvas.blit(text, (x3,y3))
if self.show_only_quarters_m or self.show_only_fives_m:
if (i+1)%15 == 0:
marklen = 30*self.layout.scale/62
elif (i+1)%5 == 0:
marklen = 25*self.layout.scale/62
else:
marklen = 15*self.layout.scale/62
else:
marklen = 25*self.layout.scale/62
else:
if (i+1)%15 == 0:
marklen = 30*self.layout.scale/62
elif (i+1)%5 == 0:
marklen = 25*self.layout.scale/62
else:
marklen = 15*self.layout.scale/62
if self.show_outer_ring:
x1=(rs[1]+10*self.layout.scale/62)*cos(a)+self.center[0]
y1=(rs[1]+10*self.layout.scale/62)*sin(a)+self.center[1]
x2=(rs[1]+marklen)*cos(a)+self.center[0]
y2=(rs[1]+marklen)*sin(a)+self.center[1]
pygame.draw.aaline(self.canvas, self.colors2[1], [x1,y1],[x2,y2])
for i in range(12):
val = str(i+1)
if self.show_only_quarters_h:
if (i+1)%3 != 0:
val = ""
a = self.angle_start + self.angle_step_12*(i+1)
x1=(rs[2]+10*self.layout.scale/62)*cos(a)+self.center[0]
y1=(rs[2]+10*self.layout.scale/62)*sin(a)+self.center[1]
x2=(rs[2]+20*self.layout.scale/62)*cos(a)+self.center[0]
y2=(rs[2]+20*self.layout.scale/62)*sin(a)+self.center[1]
pygame.draw.aaline(self.canvas, self.colors2[0], [x1,y1],[x2,y2])
font_size = self.clock_canvas.font.size(val)
if not self.show_highlight or i+1 == time[0]:
text = self.clock_canvas.font.render("%s" % (val), 1, self.colors2[0])
else:
text = self.clock_canvas.font.render("%s" % (val), 1, self.colors[0])
x3=(rs[2]+20*self.layout.scale/62+font_size[1]//2)*cos(a)+self.center[0] - font_size[0] / 2
y3=(rs[2]+20*self.layout.scale/62+font_size[1]//2)*sin(a)+self.center[1] - font_size[1] / 2
self.canvas.blit(text, (x3,y3))
if self.show_24h:
if i+13 == 24:
val = "0"
v = 0
else:
val = str(i+13)
v = i + 13
font_size = self.clock_canvas.font2.size(val)
if not self.show_highlight or v == time[0]:
text = self.clock_canvas.font2.render("%s" % (val), 1, self.colors2[0])
else:
text = self.clock_canvas.font2.render("%s" % (val), 1, self.colors[0])
x3=(rs[0]-10*self.layout.scale/62+font_size[1]//2)*cos(a)+self.center[0] - font_size[0] / 2
y3=(rs[0]-10*self.layout.scale/62+font_size[1]//2)*sin(a)+self.center[1] - font_size[1] / 2
self.canvas.blit(text, (x3,y3))
hand_width = [self.r//14,self.r//18]
start_offset = [self.size//10,self.size//12]
for i in range(2):
#angle for line
angle = self.angles[i]#angle_start + angle_step*i
x0=self.center[0] - start_offset[i]*cos(angle)
y0=self.center[1] - start_offset[i]*sin(angle)
# Calculate the x,y for the end point
x1=rs[i]*cos(angle)+self.center[0]
y1=rs[i]*sin(angle)+self.center[1]
x2=hand_width[i]*cos(angle-pi/2)+self.center[0]
y2=hand_width[i]*sin(angle-pi/2)+self.center[1]
x3=hand_width[i]*cos(angle+pi/2)+self.center[0]
y3=hand_width[i]*sin(angle+pi/2)+self.center[1]
points = [[x0,y0],[x2,y2],[x1,y1],[x3,y3]]
shadow = [[x0,y0],[x2,y2],[x1,y1]]
self.hand_coords[i] = points
pygame.draw.polygon(self.canvas, self.colors[i], points, 0)
pygame.draw.polygon(self.canvas, self.colors3[i], shadow, 0)
# Draw the line from the center to the calculated end point
line_through = [[x0,y0],[x1,y1]]
pygame.draw.aalines(self.canvas, self.colors2[i], True, points)
pygame.draw.aalines(self.canvas, self.colors2[i], True, line_through)
pygame.draw.circle(self.canvas,self.colors[0],self.center,self.size//50,0)
pygame.draw.circle(self.canvas,self.colors2[0],self.center,self.size//50,1)
pygame.draw.circle(self.canvas,self.colors2[0],self.center,self.size//70,1)
self.clock_canvas.update_me = True
self.mainloop.redraw_needed[0] = True
def vector_len(self,v):
return sqrt(v[0]**2 + v[1]**2)
def scalar_product(self,v1,v2):
return sum([v1[i]*v2[i] for i in range(len(v1))])
def angle(self,v1,v2):
return self.scalar_product(v1,v2)/(self.vector_len(v1)*self.vector_len(v2))
def is_contained(self, pos, coords_id = 0):
v0 = sv.Vector2.from_points(self.hand_coords[coords_id][2], self.hand_coords[coords_id][1])
v1 = sv.Vector2.from_points(self.hand_coords[coords_id][0], self.hand_coords[coords_id][1])
v2 = sv.Vector2.from_points(self.hand_coords[coords_id][2], self.hand_coords[coords_id][3])
v3 = sv.Vector2.from_points(self.hand_coords[coords_id][0], self.hand_coords[coords_id][3])
v4 = sv.Vector2.from_points(pos, self.hand_coords[coords_id][1])
v5 = sv.Vector2.from_points(pos, self.hand_coords[coords_id][3])
a1 = 1 - self.angle(v0,v1) #corner 1
a2 = 1 - self.angle(v2,v3) #corner 2
a3 = 1 - self.angle(v0,v4)#point to arm1 of corner1
a4 = 1 - self.angle(v1,v4)#point to arm2 of corner1
a5 = 1 - self.angle(v2,v5)#point to arm1 of corner2
a6 = 1 - self.angle(v3,v5)#point to arm2 of corner2
if (a3+a4) < a1 and (a5+a6) < a2:
return True
return False
def current_angle(self, pos,r):
cosa = (pos[0] - self.center[0]) / r
sina = (pos[1] - self.center[1]) / r
if 0 <= cosa <= 1 and -1 <= sina <= 0:
angle = pi/2 - acos(cosa)
elif 0 <= cosa <= 1 and 0 <= sina <= 1:
angle = acos(cosa)+pi/2 #ok
elif -1 <= cosa <= 0 and 0 <= sina <= 1:
angle = acos(cosa)+ pi/2 #ok
elif -1 <= cosa <= 0 and -1 <= sina <= 0:
angle = 2*pi+ pi/2 - acos(cosa)
return angle
def handle(self,event):
gd.BoardGame.handle(self, event) #send event handling up
self.tm = self.time[:]
if event.type == pygame.MOUSEMOTION and self.hand_id > 0:
pos = [event.pos[0]-self.layout.game_left,event.pos[1]-self.layout.top_margin]
r = self.vector_len([pos[0]-self.center[0], pos[1] - self.center[1]])
if r == 0: r = 0.1
if self.hand_id == 1:
h = (self.current_angle(pos, r)) / self.angle_step_12
if int(h) == 0:
self.tm[0] = 12
else:
self.tm[0] = int(h)
elif self.hand_id == 2:
m = (self.current_angle(pos, r)) / self.angle_step_60
self.tm[1] = int(m)
if 0 <= self.tm[1] < 5 and 55 <= self.time[1] <= 59:
if self.tm[0] == 12:
self.tm[0] = 1
else:
self.tm[0] += 1
elif 0 <= self.time[1] < 5 and 55 <= self.tm[1] <= 59:
if self.tm[0] == 1:
self.tm[0] = 12
else:
self.tm[0] -= 1
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
active = self.board.active_ship
pos = [event.pos[0]-self.layout.game_left,event.pos[1]-self.layout.top_margin]
if active == 0:
r = self.vector_len([pos[0]-self.center[0], pos[1] - self.center[1]])
if r == 0: r = 0.1
self.hand_id = 0
if self.is_contained(pos, coords_id = 0):
self.hand_id = 1
#print("activated: %d" % self.hand_id)
elif self.is_contained(pos, coords_id = 1):
self.hand_id = 2
#print("activated: %d" % self.hand_id)
elif self.rs[0]*1.1 > r:
h = (self.current_angle(pos, r)) / self.angle_step_12
if int(h) == 0:
h = 12
self.tm[0] = int(h)
else:
m = (self.current_angle(pos, r)) / self.angle_step_60
self.tm[1] = int(m)
elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:
self.hand_id = 0
if self.tm != self.time:
self.time = self.tm[:]
self.draw_hands()
self.clock_canvas.painting = self.canvas.copy()
def update(self,game):
game.fill((255,255,255))
gd.BoardGame.update(self, game) #rest of painting done by parent
def check_result(self):
if self.time == self.target_time:
self.update_score(self.points)
self.level.next_board()
else:
if self.points > 0:
self.points -= 1
self.level.try_again()
| gpl-3.0 |
fau-amos-2014-team-2/root | phonegap/node_modules/cordova/node_modules/cordova-lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 63 | 7421 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
_TARGET_TYPE_EXT = {
'executable': '.exe',
'shared_library': '.dll'
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = '_large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = '_large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config in shim_dict.get('configurations').itervalues():
msvs = config.setdefault('msvs_settings')
linker = msvs.pop('VCLinkerTool') # We want to clear this dict.
pdb_path = linker.get('ProgramDatabaseFile')
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler.setdefault('DebugInformationFormat', '3')
compiler.setdefault('ProgramDataBaseFileName', pdb_path)
# Add the new targets.
target_list.append(full_copy_target_name)
target_list.append(full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts) | agpl-3.0 |
golden1232004/webrtc_new | chromium/src/build/vs_toolchain.py | 1 | 14210 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import json
import os
import pipes
import shutil
import subprocess
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
chrome_src = os.path.abspath(os.path.join(script_dir, os.pardir))
SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(chrome_src, 'tools', 'gyp', 'pylib'))
json_data_file = os.path.join(script_dir, 'win_toolchain.json')
import gyp
# Use MSVS2015 as the default toolchain.
CURRENT_DEFAULT_TOOLCHAIN_VERSION = '2015'
def SetEnvironmentAndGetRuntimeDllDirs():
"""Sets up os.environ to use the depot_tools VS toolchain with gyp, and
returns the location of the VS runtime DLLs so they can be copied into
the output directory after gyp generation.
"""
vs_runtime_dll_dirs = None
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
# When running on a non-Windows host, only do this if the SDK has explicitly
# been downloaded before (in which case json_data_file will exist).
if ((sys.platform in ('win32', 'cygwin') or os.path.exists(json_data_file))
and depot_tools_win_toolchain):
if ShouldUpdateToolchain():
Update()
with open(json_data_file, 'r') as tempf:
toolchain_data = json.load(tempf)
toolchain = toolchain_data['path']
version = toolchain_data['version']
win_sdk = toolchain_data.get('win_sdk')
if not win_sdk:
win_sdk = toolchain_data['win8sdk']
wdk = toolchain_data['wdk']
# TODO(scottmg): The order unfortunately matters in these. They should be
# split into separate keys for x86 and x64. (See CopyVsRuntimeDlls call
# below). http://crbug.com/345992
vs_runtime_dll_dirs = toolchain_data['runtime_dirs']
os.environ['GYP_MSVS_OVERRIDE_PATH'] = toolchain
os.environ['GYP_MSVS_VERSION'] = version
# We need to make sure windows_sdk_path is set to the automated
# toolchain values in GYP_DEFINES, but don't want to override any
# otheroptions.express
# values there.
gyp_defines_dict = gyp.NameValueListToDict(gyp.ShlexEnv('GYP_DEFINES'))
gyp_defines_dict['windows_sdk_path'] = win_sdk
os.environ['GYP_DEFINES'] = ' '.join('%s=%s' % (k, pipes.quote(str(v)))
for k, v in gyp_defines_dict.iteritems())
os.environ['WINDOWSSDKDIR'] = win_sdk
os.environ['WDK_DIR'] = wdk
# Include the VS runtime in the PATH in case it's not machine-installed.
runtime_path = os.path.pathsep.join(vs_runtime_dll_dirs)
os.environ['PATH'] = runtime_path + os.path.pathsep + os.environ['PATH']
elif sys.platform == 'win32' and not depot_tools_win_toolchain:
if not 'GYP_MSVS_OVERRIDE_PATH' in os.environ:
os.environ['GYP_MSVS_OVERRIDE_PATH'] = DetectVisualStudioPath()
if not 'GYP_MSVS_VERSION' in os.environ:
os.environ['GYP_MSVS_VERSION'] = GetVisualStudioVersion()
return vs_runtime_dll_dirs
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
raise Exception('The python library _winreg not found.')
def GetVisualStudioVersion():
"""Return GYP_MSVS_VERSION of Visual Studio.
"""
return os.environ.get('GYP_MSVS_VERSION', CURRENT_DEFAULT_TOOLCHAIN_VERSION)
def DetectVisualStudioPath():
"""Return path to the GYP_MSVS_VERSION of Visual Studio.
"""
# Note that this code is used from
# build/toolchain/win/setup_toolchain.py as well.
version_as_year = GetVisualStudioVersion()
year_to_version = {
'2013': '12.0',
'2015': '14.0',
}
if version_as_year not in year_to_version:
raise Exception(('Visual Studio version %s (from GYP_MSVS_VERSION)'
' not supported. Supported versions are: %s') % (
version_as_year, ', '.join(year_to_version.keys())))
version = year_to_version[version_as_year]
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version]
for key in keys:
path = _RegistryGetValue(key, 'InstallDir')
if not path:
continue
path = os.path.normpath(os.path.join(path, '..', '..'))
return path
raise Exception(('Visual Studio Version %s (from GYP_MSVS_VERSION)'
' not found.') % (version_as_year))
def _VersionNumber():
"""Gets the standard version number ('120', '140', etc.) based on
GYP_MSVS_VERSION."""
vs_version = GetVisualStudioVersion()
if vs_version == '2013':
return '120'
elif vs_version == '2015':
return '140'
else:
raise ValueError('Unexpected GYP_MSVS_VERSION')
def _CopyRuntimeImpl(target, source, verbose=True):
"""Copy |source| to |target| if it doesn't already exist or if it needs to be
updated (comparing last modified time as an approximate float match as for
some reason the values tend to differ by ~1e-07 despite being copies of the
same file... https://crbug.com/603603).
"""
if (os.path.isdir(os.path.dirname(target)) and
(not os.path.isfile(target) or
abs(os.stat(target).st_mtime - os.stat(source).st_mtime) >= 0.01)):
if verbose:
print 'Copying %s to %s...' % (source, target)
if os.path.exists(target):
os.unlink(target)
shutil.copy2(source, target)
def _CopyRuntime2013(target_dir, source_dir, dll_pattern):
"""Copy both the msvcr and msvcp runtime DLLs, only if the target doesn't
exist, but the target directory does exist."""
for file_part in ('p', 'r'):
dll = dll_pattern % file_part
target = os.path.join(target_dir, dll)
source = os.path.join(source_dir, dll)
_CopyRuntimeImpl(target, source)
def _CopyRuntime2015(target_dir, source_dir, dll_pattern, suffix):
"""Copy both the msvcp and vccorlib runtime DLLs, only if the target doesn't
exist, but the target directory does exist."""
for file_part in ('msvcp', 'vccorlib', 'vcruntime'):
dll = dll_pattern % file_part
print "dll"
print dll
print "target_dir"
print target_dir
target = os.path.join(target_dir, dll)
print "source_dir"
print source_dir
source_dir="C:\Windows\System32"
source = os.path.join(source_dir, dll)
_CopyRuntimeImpl(target, source)
ucrt_src_dir = os.path.join(source_dir, 'api-ms-win-*.dll')
for ucrt_src_file in glob.glob(ucrt_src_dir):
file_part = os.path.basename(ucrt_src_file)
ucrt_dst_file = os.path.join(target_dir, file_part)
_CopyRuntimeImpl(ucrt_dst_file, ucrt_src_file, False)
_CopyRuntimeImpl(os.path.join(target_dir, 'ucrtbase' + suffix),
os.path.join(source_dir, 'ucrtbase' + suffix))
def _CopyRuntime(target_dir, source_dir, target_cpu, debug):
"""Copy the VS runtime DLLs, only if the target doesn't exist, but the target
directory does exist. Handles VS 2013 and VS 2015."""
suffix = "d.dll" if debug else ".dll"
if GetVisualStudioVersion() == '2015':
_CopyRuntime2015(target_dir, source_dir, '%s140' + suffix, suffix)
else:
_CopyRuntime2013(target_dir, source_dir, 'msvc%s120' + suffix)
# Copy the PGO runtime library to the release directories.
if not debug and os.environ.get('GYP_MSVS_OVERRIDE_PATH'):
pgo_x86_runtime_dir = os.path.join(os.environ.get('GYP_MSVS_OVERRIDE_PATH'),
'VC', 'bin')
pgo_x64_runtime_dir = os.path.join(pgo_x86_runtime_dir, 'amd64')
pgo_runtime_dll = 'pgort' + _VersionNumber() + '.dll'
if target_cpu == "x86":
source_x86 = os.path.join(pgo_x86_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x86):
_CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll), source_x86)
elif target_cpu == "x64":
source_x64 = os.path.join(pgo_x64_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x64):
_CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll),
source_x64)
else:
raise NotImplementedError("Unexpected target_cpu value:" + target_cpu)
def CopyVsRuntimeDlls(output_dir, runtime_dirs):
"""Copies the VS runtime DLLs from the given |runtime_dirs| to the output
directory so that even if not system-installed, built binaries are likely to
be able to run.
This needs to be run after gyp has been run so that the expected target
output directories are already created.
This is used for the GYP build and gclient runhooks.
"""
x86, x64 = runtime_dirs
out_debug = os.path.join(output_dir, 'Debug')
out_debug_nacl64 = os.path.join(output_dir, 'Debug', 'x64')
out_release = os.path.join(output_dir, 'Release')
out_release_nacl64 = os.path.join(output_dir, 'Release', 'x64')
out_debug_x64 = os.path.join(output_dir, 'Debug_x64')
out_release_x64 = os.path.join(output_dir, 'Release_x64')
if os.path.exists(out_debug) and not os.path.exists(out_debug_nacl64):
os.makedirs(out_debug_nacl64)
if os.path.exists(out_release) and not os.path.exists(out_release_nacl64):
os.makedirs(out_release_nacl64)
_CopyRuntime(out_debug, x86, "x86", debug=True)
_CopyRuntime(out_release, x86, "x86", debug=False)
_CopyRuntime(out_debug_x64, x64, "x64", debug=True)
_CopyRuntime(out_release_x64, x64, "x64", debug=False)
_CopyRuntime(out_debug_nacl64, x64, "x64", debug=True)
_CopyRuntime(out_release_nacl64, x64, "x64", debug=False)
def CopyDlls(target_dir, configuration, target_cpu):
"""Copy the VS runtime DLLs into the requested directory as needed.
configuration is one of 'Debug' or 'Release'.
target_cpu is one of 'x86' or 'x64'.
The debug configuration gets both the debug and release DLLs; the
release config only the latter.
This is used for the GN build.
"""
vs_runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
if not vs_runtime_dll_dirs:
return
x64_runtime, x86_runtime = vs_runtime_dll_dirs
runtime_dir = x64_runtime if target_cpu == 'x64' else x86_runtime
_CopyRuntime(target_dir, runtime_dir, target_cpu, debug=False)
if configuration == 'Debug':
_CopyRuntime(target_dir, runtime_dir, target_cpu, debug=True)
def _GetDesiredVsToolchainHashes():
"""Load a list of SHA1s corresponding to the toolchains that we want installed
to build with."""
if GetVisualStudioVersion() == '2015':
# Update 2.
return ['95ddda401ec5678f15eeed01d2bee08fcbc5ee97']
else:
return ['03a4e939cd325d6bc5216af41b92d02dda1366a6']
def ShouldUpdateToolchain():
"""Check if the toolchain should be upgraded."""
if not os.path.exists(json_data_file):
return True
with open(json_data_file, 'r') as tempf:
toolchain_data = json.load(tempf)
version = toolchain_data['version']
env_version = GetVisualStudioVersion()
# If there's a mismatch between the version set in the environment and the one
# in the json file then the toolchain should be updated.
return version != env_version
def Update(force=False):
"""Requests an update of the toolchain to the specific hashes we have at
this revision. The update outputs a .json of the various configuration
information required to pass to gyp which we use in |GetToolchainDir()|.
"""
if force != False and force != '--force':
print >>sys.stderr, 'Unknown parameter "%s"' % force
return 1
if force == '--force' or os.path.exists(json_data_file):
force = True
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
if ((sys.platform in ('win32', 'cygwin') or force) and
depot_tools_win_toolchain):
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
# Necessary so that get_toolchain_if_necessary.py will put the VS toolkit
# in the correct directory.
os.environ['GYP_MSVS_VERSION'] = GetVisualStudioVersion()
get_toolchain_args = [
sys.executable,
os.path.join(depot_tools_path,
'win_toolchain',
'get_toolchain_if_necessary.py'),
'--output-json', json_data_file,
] + _GetDesiredVsToolchainHashes()
if force:
get_toolchain_args.append('--force')
subprocess.check_call(get_toolchain_args)
return 0
def NormalizePath(path):
while path.endswith("\\"):
path = path[:-1]
return path
def GetToolchainDir():
"""Gets location information about the current toolchain (must have been
previously updated by 'update'). This is used for the GN build."""
runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
# If WINDOWSSDKDIR is not set, search the default SDK path and set it.
if not 'WINDOWSSDKDIR' in os.environ:
default_sdk_path = 'C:\\Program Files (x86)\\Windows Kits\\10'
if os.path.isdir(default_sdk_path):
os.environ['WINDOWSSDKDIR'] = default_sdk_path
print '''vs_path = "%s"
sdk_path = "%s"
vs_version = "%s"
wdk_dir = "%s"
runtime_dirs = "%s"
''' % (
NormalizePath(os.environ['GYP_MSVS_OVERRIDE_PATH']),
NormalizePath(os.environ['WINDOWSSDKDIR']),
GetVisualStudioVersion(),
NormalizePath(os.environ.get('WDK_DIR', '')),
os.path.pathsep.join(runtime_dll_dirs or ['None']))
def main():
commands = {
'update': Update,
'get_toolchain_dir': GetToolchainDir,
'copy_dlls': CopyDlls,
}
if len(sys.argv) < 2 or sys.argv[1] not in commands:
print >>sys.stderr, 'Expected one of: %s' % ', '.join(commands)
return 1
return commands[sys.argv[1]](*sys.argv[2:])
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
gavares/bazel | tools/android/proguard_whitelister.py | 26 | 2262 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks for proguard configuration rules that cannot be combined across libs.
The only valid proguard arguments for a library are -keep, -assumenosideeffects,
and -dontnote and -dontwarn and -checkdiscard when they are provided with
arguments.
"""
import re
import sys
from third_party.py import gflags
gflags.DEFINE_string('path', None, 'Path to the proguard config to validate')
gflags.DEFINE_string('output', None, 'Where to put the validated config')
FLAGS = gflags.FLAGS
PROGUARD_COMMENTS_PATTERN = '#.*(\n|$)'
def main():
with open(FLAGS.path) as config:
config_string = config.read()
invalid_configs = Validate(config_string)
if invalid_configs:
raise RuntimeError('Invalid proguard config parameters: '
+ str(invalid_configs))
with open(FLAGS.output, 'w+') as outconfig:
config_string = ('# Merged from %s \n' % FLAGS.path) + config_string
outconfig.write(config_string)
def Validate(config):
"""Checks the config for illegal arguments."""
config = re.sub(PROGUARD_COMMENTS_PATTERN, '', config)
args = config.split('-')
invalid_configs = []
for arg in args:
arg = arg.strip()
if not arg:
continue
elif arg.startswith('checkdiscard'):
continue
elif arg.startswith('keep'):
continue
elif arg.startswith('assumenosideeffects'):
continue
elif arg.split()[0] == 'dontnote':
if len(arg.split()) > 1:
continue
elif arg.split()[0] == 'dontwarn':
if len(arg.split()) > 1:
continue
invalid_configs.append('-' + arg.split()[0])
return invalid_configs
if __name__ == '__main__':
FLAGS(sys.argv)
main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.