repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
trondeau/gnuradio | gr-analog/python/analog/fm_demod.py | 49 | 4405 | #
# Copyright 2006,2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from fm_emph import fm_deemph
from math import pi
try:
from gnuradio import analog
except ImportError:
import analog_swig as analog
class fm_demod_cf(gr.hier_block2):
"""
Generalized FM demodulation block with deemphasis and audio
filtering.
This block demodulates a band-limited, complex down-converted FM
channel into the the original baseband signal, optionally applying
deemphasis. Low pass filtering is done on the resultant signal. It
produces an output float stream in the range of [-1.0, +1.0].
Args:
channel_rate: incoming sample rate of the FM baseband (integer)
deviation: maximum FM deviation (default = 5000) (float)
audio_decim: input to output decimation rate (integer)
audio_pass: audio low pass filter passband frequency (float)
audio_stop: audio low pass filter stop frequency (float)
gain: gain applied to audio output (default = 1.0) (float)
tau: deemphasis time constant (default = 75e-6), specify tau=0.0 to prevent deemphasis (float)
"""
def __init__(self, channel_rate, audio_decim, deviation,
audio_pass, audio_stop, gain=1.0, tau=75e-6):
gr.hier_block2.__init__(self, "fm_demod_cf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
k = channel_rate/(2*pi*deviation)
QUAD = analog.quadrature_demod_cf(k)
audio_taps = filter.optfir.low_pass(
gain, # Filter gain
channel_rate, # Sample rate
audio_pass, # Audio passband
audio_stop, # Audio stopband
0.1, # Passband ripple
60 # Stopband attenuation
)
LPF = filter.fir_filter_fff(audio_decim, audio_taps)
if tau is not None and tau > 0.0: # None should be deprecated someday
DEEMPH = fm_deemph(channel_rate, tau)
self.connect(self, QUAD, DEEMPH, LPF, self)
else:
self.connect(self, QUAD, LPF, self)
class demod_20k0f3e_cf(fm_demod_cf):
"""
NBFM demodulation block, 20 KHz channels
This block demodulates a complex, downconverted, narrowband FM
channel conforming to 20K0F3E emission standards, outputting
floats in the range [-1.0, +1.0].
Args:
sample_rate: incoming sample rate of the FM baseband (integer)
audio_decim: input to output decimation rate (integer)
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
5000, # Deviation
3000, # Audio passband frequency
4500) # Audio stopband frequency
class demod_200kf3e_cf(fm_demod_cf):
"""
WFM demodulation block, mono.
This block demodulates a complex, downconverted, wideband FM
channel conforming to 200KF3E emission standards, outputting
floats in the range [-1.0, +1.0].
Args:
sample_rate: incoming sample rate of the FM baseband (integer)
audio_decim: input to output decimation rate (integer)
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
75000, # Deviation
15000, # Audio passband
16000, # Audio stopband
20.0) # Audio gain
| gpl-3.0 |
gagoel/freebase-python | appengine_stubs/cookielib.py | 4 | 64124 | """HTTP cookie handling for web clients.
This module has (now fairly distant) origins in Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
Docstrings, comments and debug strings in this code refer to the
attributes of the HTTP cookie system as cookie-attributes, to distinguish
them clearly from Python attributes.
Class diagram (note that BSDDBCookieJar and the MSIE* classes are not
distributed with the Python standard library, but are available from
http://wwwsearch.sf.net/):
CookieJar____
/ \ \
FileCookieJar \ \
/ | \ \ \
MozillaCookieJar | LWPCookieJar \ \
| | \
| ---MSIEBase | \
| / | | \
| / MSIEDBCookieJar BSDDBCookieJar
|/
MSIECookieJar
"""
__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar']
import re, urlparse, copy, time
import urllib_stub as urllib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
from calendar import timegm
debug = False # set to True to enable debugging via the logging module
logger = None
def _debug(*args):
if not debug:
return
global logger
if not logger:
import logging
logger = logging.getLogger("cookielib")
return logger.debug(*args)
DEFAULT_HTTP_PORT = '80'
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
"instance initialised with one)")
def _warn_unhandled_exception():
# There are a few catch-all except: statements in this module, for
# catching input that's bad in unexpected ways. Warn if any
# exceptions are caught there.
import warnings, traceback, StringIO
f = StringIO.StringIO()
traceback.print_exc(None, f)
msg = f.getvalue()
warnings.warn("cookielib bug!\n%s" % msg, stacklevel=2)
# Date/time conversion
# -----------------------------------------------------------------------------
EPOCH_YEAR = 1970
def _timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
MONTHS_LOWER = []
for month in MONTHS: MONTHS_LOWER.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
year, mon, mday, hour, min, sec)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
def offset_from_tz_string(tz):
offset = None
if tz in UTC_ZONES:
offset = 0
else:
m = TIMEZONE_RE.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
# translate month name to number
# month numbers start with 1 (January)
try:
mon = MONTHS_LOWER.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = _timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
STRICT_DATE_RE = re.compile(
r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
WEEKDAY_RE = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
LOOSE_HTTP_DATE_RE = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = STRICT_DATE_RE.search(text)
if m:
g = m.groups()
mon = MONTHS_LOWER.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return _timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = LOOSE_HTTP_DATE_RE.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
ISO_DATE_RE = re.compile(
"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = ISO_DATE_RE.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
# Header parsing
# -----------------------------------------------------------------------------
def unmatched(match):
"""Return unmatched part of re.Match object."""
start, end = match.span(0)
return match.string[:start]+match.string[end:]
HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)")
HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)")
HEADER_ESCAPE_RE = re.compile(r"\\(.)")
def split_header_words(header_values):
r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]]
"""
assert not isinstance(header_values, basestring)
result = []
for text in header_values:
orig_text = text
pairs = []
while text:
m = HEADER_TOKEN_RE.search(text)
if m:
text = unmatched(m)
name = m.group(1)
m = HEADER_QUOTED_VALUE_RE.search(text)
if m: # quoted value
text = unmatched(m)
value = m.group(1)
value = HEADER_ESCAPE_RE.sub(r"\1", value)
else:
m = HEADER_VALUE_RE.search(text)
if m: # unquoted value
text = unmatched(m)
value = m.group(1)
value = value.rstrip()
else:
# no value, a lone token
value = None
pairs.append((name, value))
elif text.lstrip().startswith(","):
# concatenated headers, as per RFC 2616 section 4.2
text = text.lstrip()[1:]
if pairs: result.append(pairs)
pairs = []
else:
# skip junk
non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
assert nr_junk_chars > 0, (
"split_header_words bug: '%s', '%s', %s" %
(orig_text, text, pairs))
text = non_junk
if pairs: result.append(pairs)
return result
HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
def join_header_words(lists):
"""Do the inverse (almost) of the conversion done by split_header_words.
Takes a list of lists of (key, value) pairs and produces a single header
value. Attribute values are quoted if needed.
>>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]])
'text/plain; charset="iso-8859/1"'
>>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]])
'text/plain, charset="iso-8859/1"'
"""
headers = []
for pairs in lists:
attr = []
for k, v in pairs:
if v is not None:
if not re.search(r"^\w+$", v):
v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \
v = '"%s"' % v
k = "%s=%s" % (k, v)
attr.append(k)
if attr: headers.append("; ".join(attr))
return ", ".join(headers)
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
"port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
for ii, param in enumerate(re.split(r";\s*", ns_header)):
param = param.rstrip()
if param == "": continue
if "=" not in param:
k, v = param, None
else:
k, v = re.split(r"\s*=\s*", param, 1)
k = k.lstrip()
if ii != 0:
lc = k.lower()
if lc in known_attrs:
k = lc
if k == "version":
# This is an RFC 2109 cookie.
version_set = True
if k == "expires":
# convert expires date to seconds since epoch
if v.startswith('"'): v = v[1:]
if v.endswith('"'): v = v[:-1]
v = http2time(v) # None if invalid
pairs.append((k, v))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result
IPV4_RE = re.compile(r"\.\d+$")
def is_HDN(text):
"""Return True if text is a host domain name."""
# XXX
# This may well be wrong. Which RFC is HDN defined in, if any (for
# the purposes of RFC 2965)?
# For the current implementation, what about IPv6? Remember to look
# at other uses of IPV4_RE also, if change this.
if IPV4_RE.search(text):
return False
if text == "":
return False
if text[0] == "." or text[-1] == ".":
return False
return True
def domain_match(A, B):
"""Return True if domain A domain-matches domain B, according to RFC 2965.
A and B may be host domain names or IP addresses.
RFC 2965, section 1:
Host names can be specified either as an IP address or a HDN string.
Sometimes we compare one host name with another. (Such comparisons SHALL
be case-insensitive.) Host A's name domain-matches host B's if
* their host name strings string-compare equal; or
* A is a HDN string and has the form NB, where N is a non-empty
name string, B has the form .B', and B' is a HDN string. (So,
x.y.com domain-matches .Y.com but not Y.com.)
Note that domain-match is not a commutative operation: a.b.c.com
domain-matches .c.com, but not the reverse.
"""
# Note that, if A or B are IP addresses, the only relevant part of the
# definition of the domain-match algorithm is the direct string-compare.
A = A.lower()
B = B.lower()
if A == B:
return True
if not is_HDN(A):
return False
i = A.rfind(B)
if i == -1 or i == 0:
# A does not have form NB, or N is the empty string
return False
if not B.startswith("."):
return False
if not is_HDN(B[1:]):
return False
return True
def liberal_is_HDN(text):
"""Return True if text is a sort-of-like a host domain name.
For accepting/blocking domains.
"""
if IPV4_RE.search(text):
return False
return True
def user_domain_match(A, B):
"""For blocking/accepting domains.
A and B may be host domain names or IP addresses.
"""
A = A.lower()
B = B.lower()
if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
if A == B:
# equal IP addresses
return True
return False
initial_dot = B.startswith(".")
if initial_dot and A.endswith(B):
return True
if not initial_dot and A == B:
return True
return False
cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urlparse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = cut_port_re.sub("", host, 1)
return host.lower()
def eff_request_host(request):
"""Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased.
"""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn
def request_path(request):
"""request-URI, as defined by RFC 2965."""
url = request.get_full_url()
#scheme, netloc, path, parameters, query, frag = urlparse.urlparse(url)
#req_path = escape_path("".join(urlparse.urlparse(url)[2:]))
path, parameters, query, frag = urlparse.urlparse(url)[2:]
if parameters:
path = "%s;%s" % (path, parameters)
path = escape_path(path)
req_path = urlparse.urlunparse(("", "", path, "", query, frag))
if not req_path.startswith("/"):
# fix bad RFC 2396 absoluteURI
req_path = "/"+req_path
return req_path
def request_port(request):
host = request.get_host()
i = host.find(':')
if i >= 0:
port = host[i+1:]
try:
int(port)
except ValueError:
_debug("nonnumeric port: '%s'", port)
return None
else:
port = DEFAULT_HTTP_PORT
return port
# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
def uppercase_escaped_char(match):
return "%%%s" % match.group(1).upper()
def escape_path(path):
"""Escape any invalid characters in HTTP URL, and uppercase all escapes."""
# There's no knowing what character encoding was used to create URLs
# containing %-escapes, but since we have to pick one to escape invalid
# path characters, we pick UTF-8, as recommended in the HTML 4.0
# specification:
# http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
# And here, kind of: draft-fielding-uri-rfc2396bis-03
# (And in draft IRI specification: draft-duerst-iri-05)
# (And here, for new URI schemes: RFC 2718)
if isinstance(path, unicode):
path = path.encode("utf-8")
path = urlib.quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
def reach(h):
"""Return reach of host h, as defined by RFC 2965, section 1.
The reach R of a host name H is defined as follows:
* If
- H is the host domain name of a host; and,
- H has the form A.B; and
- A has no embedded (that is, interior) dots; and
- B has at least one embedded dot, or B is the string "local".
then the reach of H is .B.
* Otherwise, the reach of H is H.
>>> reach("www.acme.com")
'.acme.com'
>>> reach("acme.com")
'acme.com'
>>> reach("acme.local")
'.local'
"""
i = h.find(".")
if i >= 0:
#a = h[:i] # this line is only here to show what a is
b = h[i+1:]
i = b.find(".")
if is_HDN(h) and (i >= 0 or b == "local"):
return "."+b
return h
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
req_host = request_host(request)
if not domain_match(req_host, reach(request.get_origin_req_host())):
return True
else:
return False
class Cookie:
"""HTTP Cookie.
This class represents both Netscape and RFC 2965 cookies.
This is deliberately a very simple class. It just holds attributes. It's
possible to construct Cookie instances that don't comply with the cookie
standards. CookieJar.make_cookies is the factory function for Cookie
objects -- it deals with cookie parsing, supplying defaults, and
normalising to the representation used in this class. CookiePolicy is
responsible for checking them to see whether they should be accepted from
and returned to the server.
Note that the port may be present in the headers, but unspecified ("Port"
rather than"Port=80", for example); if this is the case, port is None.
"""
def __init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False,
):
if version is not None: version = int(version)
if expires is not None: expires = int(expires)
if port is None and port_specified is True:
raise ValueError("if port is None, port_specified must be false")
self.version = version
self.name = name
self.value = value
self.port = port
self.port_specified = port_specified
# normalise case, as per RFC 2965 section 3.3.3
self.domain = domain.lower()
self.domain_specified = domain_specified
# Sigh. We need to know whether the domain given in the
# cookie-attribute had an initial dot, in order to follow RFC 2965
# (as clarified in draft errata). Needed for the returned $Domain
# value.
self.domain_initial_dot = domain_initial_dot
self.path = path
self.path_specified = path_specified
self.secure = secure
self.expires = expires
self.discard = discard
self.comment = comment
self.comment_url = comment_url
self.rfc2109 = rfc2109
self._rest = copy.copy(rest)
def has_nonstandard_attr(self, name):
return name in self._rest
def get_nonstandard_attr(self, name, default=None):
return self._rest.get(name, default)
def set_nonstandard_attr(self, name, value):
self._rest[name] = value
def is_expired(self, now=None):
if now is None: now = time.time()
if (self.expires is not None) and (self.expires <= now):
return True
return False
def __str__(self):
if self.port is None: p = ""
else: p = ":"+self.port
limit = self.domain + p + self.path
if self.value is not None:
namevalue = "%s=%s" % (self.name, self.value)
else:
namevalue = self.name
return "<Cookie %s for %s>" % (namevalue, limit)
def __repr__(self):
args = []
for name in ("version", "name", "value",
"port", "port_specified",
"domain", "domain_specified", "domain_initial_dot",
"path", "path_specified",
"secure", "expires", "discard", "comment", "comment_url",
):
attr = getattr(self, name)
args.append("%s=%s" % (name, repr(attr)))
args.append("rest=%s" % repr(self._rest))
args.append("rfc2109=%s" % repr(self.rfc2109))
return "Cookie(%s)" % ", ".join(args)
class CookiePolicy:
"""Defines which cookies get accepted from and returned to server.
May also modify cookies, though this is probably a bad idea.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customised policy.
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server."""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies."""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""Constructor arguments should be passed as keyword arguments only."""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override .set_ok(), be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
_debug(" Set-Cookie2 without version attribute (%s=%s)",
cookie.name, cookie.value)
return False
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request.is_unverifiable() and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
_debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
_debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
_debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
_debug(" domain %s is not in user allow-list", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if self.strict_domain and (domain.count(".") >= 2):
# XXX This should probably be compared with the Konqueror
# (kcookiejar.cpp) and Mozilla implementations, but it's a
# losing battle.
i = domain.rfind(".")
j = domain.rfind(".", 0, i)
if j == 0: # domain like .foo.bar
tld = domain[i+1:]
sld = domain[j+1:i]
if sld.lower() in ("co", "ac", "com", "edu", "org", "net",
"gov", "mil", "int", "aero", "biz", "cat", "coop",
"info", "jobs", "mobi", "museum", "name", "pro",
"travel", "eu") and len(tld) == 2:
# domain like .co.uk
_debug(" country-code second level domain %s", domain)
return False
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
_debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
_debug(" effective request-host %s (even with added "
"initial dot) does not end end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
_debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
_debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
_debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
_debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override .return_ok(), be sure to call this method. If it
returns false, so should your subclass (assuming your subclass wants to
be more strict about which cookies to return).
"""
# Path has already been checked by .path_return_ok(), and domain
# blocking done by .domain_return_ok().
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
for n in "version", "verifiability", "secure", "expires", "port", "domain":
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request.is_unverifiable() and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
_debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
_debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
_debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
_debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
_debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
_debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
req_host, erhn = eff_request_host(request)
if not req_host.startswith("."):
req_host = "."+req_host
if not erhn.startswith("."):
erhn = "."+erhn
if not (req_host.endswith(domain) or erhn.endswith(domain)):
#_debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
_debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
_debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
_debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
_debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
def deepvalues(mapping):
"""Iterates over nested mapping, depth-first, in sorted order by key."""
values = vals_sorted_by_key(mapping)
for obj in values:
mapping = False
try:
obj.items
except AttributeError:
pass
else:
mapping = True
for subobj in deepvalues(obj):
yield subobj
if not mapping:
yield obj
# Used as second parameter to dict.get() method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try
urllib2.build_opener(HTTPCookieProcessor).open(url).
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
magic_re = r"^\#LWP-Cookies-(\d+\.\d+)"
def __init__(self, policy=None):
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies_lock = _threading.RLock()
self._cookies = {}
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
_debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
_debug(" not returning cookie")
continue
_debug(" it's a match")
cookies.append(cookie)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
"""
# add cookies in order of most specific (ie. longest) path first
def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
cookies.sort(decreasing_size)
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
self._cookies_lock.release()
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if k in standard:
# only first value is significant
continue
if k == "domain":
if v is None:
_debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
_debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ("port", "comment", "commenturl")):
_debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None: version = int(version)
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
_debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
if rfc2109_as_ns is None:
rfc2109_as_ns = not self._policy.rfc2965
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_ns:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object."""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.getheaders("Set-Cookie2")
ns_hdrs = headers.getheaders("Set-Cookie")
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except Exception:
_warn_unhandled_exception()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except Exception:
_warn_unhandled_exception()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return key not in lookup
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so."""
self._cookies_lock.acquire()
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
self._cookies_lock.release()
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set."""
c = self._cookies
self._cookies_lock.acquire()
try:
if cookie.domain not in c: c[cookie.domain] = {}
c2 = c[cookie.domain]
if cookie.path not in c2: c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
finally:
self._cookies_lock.release()
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request."""
_debug("extract_cookies: %s", response.info())
self._cookies_lock.acquire()
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
self._cookies_lock.release()
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
"""
self._cookies_lock.acquire()
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
self._cookies_lock.release()
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the
.save() method won't save expired cookies anyway (unless you ask
otherwise by passing a true ignore_expires argument).
"""
self._cookies_lock.acquire()
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
self._cookies_lock.release()
def __iter__(self):
return deepvalues(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
# derives from IOError for backwards-compatibility with Python 2.4.0
class LoadError(IOError): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file."""
def __init__(self, filename=None, delayload=False, policy=None):
"""
Cookies are NOT loaded from the named file until either the .load() or
.revert() method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None:
try:
filename+""
except:
raise ValueError("filename must be string-like")
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file."""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close()
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or IOError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
self._cookies_lock.acquire()
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except (LoadError, IOError):
self._cookies = old_state
raise
self._cookies_lock.release()
from _LWPCookieJar import LWPCookieJar, lwp_cookie_str
from _MozillaCookieJar import MozillaCookieJar
| bsd-2-clause |
SerpentCS/odoo | addons/account_followup/__init__.py | 436 | 1098 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_followup
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dirkjot/kivy | kivy/modules/console.py | 11 | 34163 | # coding=utf-8
"""
Console
=======
.. versionadded:: 1.9.1
Reboot of the old inspector, designed to be modular and keep concerns separated.
It also have a addons architecture that allow you to add a button, panel, or
more in the Console itself.
.. warning::
This module works, but might fail in some cases. Please contribute!
Usage
-----
For normal module usage, please see the :mod:`~kivy.modules` documentation::
python main.py -m console
Mouse navigation
----------------
When "Select" button is activated, you can:
- tap once on a widget to select it without leaving inspect mode
- double tap on a widget to select and leave inspect mode (then you can
manipulate the widget again)
Keyboard navigation
-------------------
- "Ctrl + e": toggle console
- "Escape": cancel widget lookup, then hide inspector view
- "Top": select the parent widget
- "Down": select the first children of the current selected widget
- "Left": select the previous following sibling
- "Right": select the next following sibling
Additionnal informations
------------------------
Some properties can be edited live. However, due to the delayed usage of
some properties, it might crash if you don't handle all the cases.
Addons
------
Addons must be added to `Console.addons` before the first Clock tick of the
application, or before the create_console is called. You cannot add addons on
the fly currently. Addons are quite cheap until the Console is activated. Panel
are even cheaper, nothing is done until the user select it.
By default, we provide multiple addons activated by default:
- ConsoleAddonFps: display the FPS at the top-right
- ConsoleAddonSelect: activate the selection mode
- ConsoleAddonBreadcrumb: display the hierarchy of the current widget at the
bottom
- ConsoleAddonWidgetTree: panel to display the widget tree of the application
- ConsoleAddonWidgetPanel: panel to display the properties of the selected
widget
If you need to add custom widget in the Console, please use either
:class:`ConsoleButton`, :class:`ConsoleToggleButton` or :class:`ConsoleLabel`
An addon must inherit from the :class:`ConsoleAddon` class.
For example, here is a simple addon for displaying the FPS at the top/right
of the Console::
from kivy.modules.console import Console, ConsoleAddon
class ConsoleAddonFps(ConsoleAddon):
def init(self):
self.lbl = ConsoleLabel(text="0 Fps")
self.console.add_toolbar_widget(self.lbl, right=True)
def activate(self):
Clock.schedule_interval(self.update_fps, 1 / 2.)
def deactivated(self):
Clock.unschedule(self.update_fps)
def update_fps(self, *args):
fps = Clock.get_fps()
self.lbl.text = "{} Fps".format(int(fps))
Console.register_addon(ConsoleAddonFps)
You can create addon that adds panels. Panel activation/deactivation are not
tied to the addon activation/deactivation, but on some cases, you can use the
same callback for deactivating the addon and the panel. Here is a simple About
panel addon::
from kivy.modules.console import Console, ConsoleAddon, ConsoleLabel
class ConsoleAddonAbout(ConsoleAddon):
def init(self):
self.console.add_panel("About", self.panel_activate,
self.panel_deactivate)
def panel_activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def panel_deactivate(self):
self.console.unbind(widget=self.update_content)
def deactivate(self):
self.panel_deactivate()
def update_content(self, *args):
widget = self.console.widget
if not widget:
return
text = "Selected widget is: {!r}".format(widget)
lbl = ConsoleLabel(text=text)
self.console.set_content(lbl)
Console.register_addon(ConsoleAddonAbout)
"""
__all__ = ("start", "stop", "create_console", "Console", "ConsoleAddon",
"ConsoleButton", "ConsoleToggleButton", "ConsoleLabel")
import kivy
kivy.require('1.0.9')
import weakref
from functools import partial
from itertools import chain
from kivy.logger import Logger
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.image import Image
from kivy.uix.treeview import TreeViewNode, TreeView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.modalview import ModalView
from kivy.graphics import Color, Rectangle, PushMatrix, PopMatrix
from kivy.graphics.context_instructions import Transform
from kivy.graphics.transformation import Matrix
from kivy.properties import (ObjectProperty, BooleanProperty, ListProperty,
NumericProperty, StringProperty, OptionProperty,
ReferenceListProperty, AliasProperty,
VariableListProperty)
from kivy.graphics.texture import Texture
from kivy.clock import Clock
from kivy.lang import Builder
Builder.load_string("""
<Console>:
size_hint: (1, None) if self.mode == "docked" else (None, None)
height: dp(250)
canvas:
Color:
rgb: .185, .18, .18
Rectangle:
size: self.size
Color:
rgb: .3, .3, .3
Rectangle:
pos: 0, self.height - dp(48)
size: self.width, dp(48)
GridLayout:
cols: 1
id: layout
GridLayout:
id: toolbar
rows: 1
height: "48dp"
size_hint_y: None
padding: "4dp"
spacing: "4dp"
RelativeLayout:
id: content
<ConsoleAddonSeparator>:
size_hint_x: None
width: "10dp"
<ConsoleButton,ConsoleToggleButton,ConsoleLabel>:
size_hint_x: None
width: self.texture_size[0] + dp(20)
<ConsoleAddonBreadcrumbView>:
size_hint_y: None
height: "48dp"
canvas:
Color:
rgb: .3, .3, .3
Rectangle:
size: self.size
ScrollView:
id: sv
do_scroll_y: False
GridLayout:
id: stack
rows: 1
size_hint_x: None
width: self.minimum_width
padding: "4dp"
spacing: "4dp"
<TreeViewProperty>:
height: max(dp(48), max(lkey.texture_size[1], ltext.texture_size[1]))
Label:
id: lkey
text: root.key
text_size: (self.width, None)
width: 150
size_hint_x: None
Label:
id: ltext
text: [repr(getattr(root.widget, root.key, '')), root.refresh][0]\
if root.widget else ''
text_size: (self.width, None)
<ConsoleAddonWidgetTreeView>:
ScrollView:
scroll_type: ['bars', 'content']
bar_width: 10
ConsoleAddonWidgetTreeImpl:
id: widgettree
hide_root: True
size_hint: None, None
height: self.minimum_height
width: max(self.parent.width, self.minimum_width)
selected_widget: root.widget
on_select_widget: root.console.highlight_widget(args[1])
<-TreeViewWidget>:
height: self.texture_size[1] + sp(4)
size_hint_x: None
width: self.texture_size[0] + sp(4)
canvas.before:
Color:
rgba: self.color_selected if self.is_selected else (0, 0, 0, 0)
Rectangle:
pos: self.pos
size: self.size
Color:
rgba: 1, 1, 1, int(not self.is_leaf)
Rectangle:
source:
('atlas://data/images/defaulttheme/tree_%s' %
('opened' if self.is_open else 'closed'))
size: 16, 16
pos: self.x - 20, self.center_y - 8
canvas:
Color:
rgba:
(self.disabled_color if self.disabled else
(self.color if not self.markup else (1, 1, 1, 1)))
Rectangle:
texture: self.texture
size: self.texture_size
pos:
(int(self.center_x - self.texture_size[0] / 2.),
int(self.center_y - self.texture_size[1] / 2.))
""")
def ignore_exception(f):
def f2(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
pass
return f2
class TreeViewProperty(BoxLayout, TreeViewNode):
key = ObjectProperty(None, allownone=True)
refresh = BooleanProperty(False)
widget_ref = ObjectProperty(None, allownone=True)
def _get_widget(self):
wr = self.widget_ref
if wr is None:
return None
wr = wr()
if wr is None:
self.widget_ref = None
return None
return wr
widget = AliasProperty(_get_widget, None, bind=('widget_ref', ))
class ConsoleButton(Button):
"""Button specialized for the Console"""
pass
class ConsoleToggleButton(ToggleButton):
"""ToggleButton specialized for the Console"""
pass
class ConsoleLabel(Label):
"""LabelButton specialized for the Console"""
pass
class ConsoleAddonSeparator(Widget):
pass
class ConsoleAddon(object):
"""Base class for implementing addons"""
#: Console instance
console = None
def __init__(self, console):
super(ConsoleAddon, self).__init__()
self.console = console
self.init()
def init(self):
"""Method called when the addon is instanciated by the Console
"""
pass
def activate(self):
"""Method called when the addon is activated by the console
(when the console is displayed)"""
pass
def deactivate(self):
"""Method called when the addon is deactivated by the console
(when the console is hidden)
"""
pass
class ConsoleAddonMode(ConsoleAddon):
def init(self):
btn = ConsoleToggleButton(text=u"Docked")
self.console.add_toolbar_widget(btn)
class ConsoleAddonSelect(ConsoleAddon):
def init(self):
self.btn = ConsoleToggleButton(text=u"Select")
self.btn.bind(state=self.on_button_state)
self.console.add_toolbar_widget(self.btn)
self.console.bind(inspect_enabled=self.on_inspect_enabled)
def on_inspect_enabled(self, instance, value):
self.btn.state = "down" if value else "normal"
def on_button_state(self, instance, value):
self.console.inspect_enabled = (value == "down")
class ConsoleAddonFps(ConsoleAddon):
def init(self):
self.lbl = ConsoleLabel(text="0 Fps")
self.console.add_toolbar_widget(self.lbl, right=True)
def activate(self):
Clock.schedule_interval(self.update_fps, 1 / 2.)
def deactivated(self):
Clock.unschedule(self.update_fps)
def update_fps(self, *args):
fps = Clock.get_fps()
self.lbl.text = "{} Fps".format(int(fps))
class ConsoleAddonBreadcrumbView(RelativeLayout):
widget = ObjectProperty(None, allownone=True)
parents = []
def on_widget(self, instance, value):
stack = self.ids.stack
# determine if we can just highlight the current one
# or if we need to rebuild the breadcrumb
prefs = [btn.widget_ref() for btn in self.parents]
if value in prefs:
# ok, so just toggle this one instead.
index = prefs.index(value)
for btn in self.parents:
btn.state = "normal"
self.parents[index].state = "down"
return
# we need to rebuild the breadcrumb.
stack.clear_widgets()
if not value:
return
widget = value
parents = []
while True:
btn = ConsoleButton(text=widget.__class__.__name__)
btn.widget_ref = weakref.ref(widget)
btn.bind(on_release=self.highlight_widget)
parents.append(btn)
if widget == widget.parent:
break
widget = widget.parent
for btn in reversed(parents):
stack.add_widget(btn)
self.ids.sv.scroll_x = 1
self.parents = parents
btn.state = "down"
def highlight_widget(self, instance):
self.console.widget = instance.widget_ref()
class ConsoleAddonBreadcrumb(ConsoleAddon):
def init(self):
self.view = ConsoleAddonBreadcrumbView()
self.view.console = self.console
self.console.ids.layout.add_widget(self.view)
def activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def deactivate(self):
self.console.unbind(widget=self.update_content)
def update_content(self, *args):
self.view.widget = self.console.widget
class ConsoleAddonWidgetPanel(ConsoleAddon):
def init(self):
self.console.add_panel("Properties", self.panel_activate,
self.deactivate)
def panel_activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def deactivate(self):
self.console.unbind(widget=self.update_content)
def update_content(self, *args):
widget = self.console.widget
if not widget:
return
from kivy.uix.scrollview import ScrollView
self.root = root = BoxLayout()
self.sv = sv = ScrollView(scroll_type=["bars", "content"])
treeview = TreeView(hide_root=True, size_hint_y=None)
treeview.bind(minimum_height=treeview.setter("height"))
keys = list(widget.properties().keys())
keys.sort()
node = None
wk_widget = weakref.ref(widget)
for key in keys:
text = '%s' % key
node = TreeViewProperty(text=text, key=key, widget_ref=wk_widget)
node.bind(is_selected=self.show_property)
try:
widget.bind(**{
key: partial(self.update_node_content, weakref.ref(node))
})
except:
pass
treeview.add_node(node)
root.add_widget(sv)
sv.add_widget(treeview)
self.console.set_content(root)
def show_property(self, instance, value, key=None, index=-1, *l):
# normal call: (tree node, focus, )
# nested call: (widget, prop value, prop key, index in dict/list)
if value is False:
return
console = self.console
content = None
if key is None:
# normal call
nested = False
widget = instance.widget
key = instance.key
prop = widget.property(key)
value = getattr(widget, key)
else:
# nested call, we might edit subvalue
nested = True
widget = instance
prop = None
dtype = None
if isinstance(prop, AliasProperty) or nested:
# trying to resolve type dynamicly
if type(value) in (str, str):
dtype = 'string'
elif type(value) in (int, float):
dtype = 'numeric'
elif type(value) in (tuple, list):
dtype = 'list'
if isinstance(prop, NumericProperty) or dtype == 'numeric':
content = TextInput(text=str(value) or '', multiline=False)
content.bind(
text=partial(self.save_property_numeric, widget, key, index))
elif isinstance(prop, StringProperty) or dtype == 'string':
content = TextInput(text=value or '', multiline=True)
content.bind(
text=partial(self.save_property_text, widget, key, index))
elif (isinstance(prop, ListProperty) or
isinstance(prop, ReferenceListProperty) or
isinstance(prop, VariableListProperty) or dtype == 'list'):
content = GridLayout(cols=1, size_hint_y=None)
content.bind(minimum_height=content.setter('height'))
for i, item in enumerate(value):
button = Button(text=repr(item), size_hint_y=None, height=44)
if isinstance(item, Widget):
button.bind(on_release=partial(console.highlight_widget,
item, False))
else:
button.bind(on_release=partial(self.show_property, widget,
item, key, i))
content.add_widget(button)
elif isinstance(prop, OptionProperty):
content = GridLayout(cols=1, size_hint_y=None)
content.bind(minimum_height=content.setter('height'))
for option in prop.options:
button = ToggleButton(
text=option,
state='down' if option == value else 'normal',
group=repr(content.uid),
size_hint_y=None,
height=44)
button.bind(
on_press=partial(self.save_property_option, widget, key))
content.add_widget(button)
elif isinstance(prop, ObjectProperty):
if isinstance(value, Widget):
content = Button(text=repr(value))
content.bind(
on_release=partial(console.highlight_widget, value))
elif isinstance(value, Texture):
content = Image(texture=value)
else:
content = Label(text=repr(value))
elif isinstance(prop, BooleanProperty):
state = 'down' if value else 'normal'
content = ToggleButton(text=key, state=state)
content.bind(on_release=partial(self.save_property_boolean, widget,
key, index))
self.root.clear_widgets()
self.root.add_widget(self.sv)
if content:
self.root.add_widget(content)
@ignore_exception
def save_property_numeric(self, widget, key, index, instance, value):
if index >= 0:
getattr(widget, key)[index] = float(instance.text)
else:
setattr(widget, key, float(instance.text))
@ignore_exception
def save_property_text(self, widget, key, index, instance, value):
if index >= 0:
getattr(widget, key)[index] = instance.text
else:
setattr(widget, key, instance.text)
@ignore_exception
def save_property_boolean(self, widget, key, index, instance, ):
value = instance.state == 'down'
if index >= 0:
getattr(widget, key)[index] = value
else:
setattr(widget, key, value)
@ignore_exception
def save_property_option(self, widget, key, instance, *l):
setattr(widget, key, instance.text)
class TreeViewWidget(Label, TreeViewNode):
widget = ObjectProperty(None)
class ConsoleAddonWidgetTreeImpl(TreeView):
selected_widget = ObjectProperty(None, allownone=True)
__events__ = ('on_select_widget', )
def __init__(self, **kwargs):
super(ConsoleAddonWidgetTreeImpl, self).__init__(**kwargs)
self.update_scroll = Clock.create_trigger(self._update_scroll)
def find_node_by_widget(self, widget):
for node in self.iterate_all_nodes():
if not node.parent_node:
continue
try:
if node.widget == widget:
return node
except ReferenceError:
pass
return None
def update_selected_widget(self, widget):
if widget:
node = self.find_node_by_widget(widget)
if node:
self.select_node(node, False)
while node and isinstance(node, TreeViewWidget):
if not node.is_open:
self.toggle_node(node)
node = node.parent_node
def on_selected_widget(self, inst, widget):
if widget:
self.update_selected_widget(widget)
self.update_scroll()
def select_node(self, node, select_widget=True):
super(ConsoleAddonWidgetTreeImpl, self).select_node(node)
if select_widget:
try:
self.dispatch("on_select_widget", node.widget.__self__)
except ReferenceError:
pass
def on_select_widget(self, widget):
pass
def _update_scroll(self, *args):
node = self._selected_node
if not node:
return
self.parent.scroll_to(node)
class ConsoleAddonWidgetTreeView(RelativeLayout):
widget = ObjectProperty(None, allownone=True)
_window_node = None
def _update_widget_tree_node(self, node, widget, is_open=False):
tree = self.ids.widgettree
update_nodes = []
nodes = {}
for cnode in node.nodes[:]:
try:
nodes[cnode.widget] = cnode
except ReferenceError:
# widget no longer exists, just remove it
pass
tree.remove_node(cnode)
for child in widget.children:
if isinstance(child, Console):
continue
if child in nodes:
cnode = tree.add_node(nodes[child], node)
else:
cnode = tree.add_node(
TreeViewWidget(text=child.__class__.__name__,
widget=child.proxy_ref,
is_open=is_open), node)
update_nodes.append((cnode, child))
return update_nodes
def update_widget_tree(self, *args):
win = self.console.win
if not self._window_node:
self._window_node = self.ids.widgettree.add_node(
TreeViewWidget(text="Window",
widget=win,
is_open=True))
nodes = self._update_widget_tree_node(self._window_node, win,
is_open=True)
while nodes:
ntmp = nodes[:]
nodes = []
for node in ntmp:
nodes += self._update_widget_tree_node(*node)
self.ids.widgettree.update_selected_widget(self.widget)
class ConsoleAddonWidgetTree(ConsoleAddon):
def init(self):
self.content = None
self.console.add_panel("Tree", self.panel_activate, self.deactivate,
self.panel_refresh)
def panel_activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def deactivate(self):
if self.content:
self.content.widget = None
self.content.console = None
self.console.unbind(widget=self.update_content)
def update_content(self, *args):
widget = self.console.widget
if not self.content:
self.content = ConsoleAddonWidgetTreeView()
self.content.console = self.console
self.content.widget = widget
self.content.update_widget_tree()
self.console.set_content(self.content)
def panel_refresh(self):
if self.content:
self.content.update_widget_tree()
class Console(RelativeLayout):
"""Console interface
This widget is created by create_console(), when the module is loaded.
During that time, you can add addons on the console to extend the
functionnalities, or add your own application stats / debugging module.
"""
#: Array of addons that will be created at Console creation
addons = [ # ConsoleAddonMode,
ConsoleAddonSelect, ConsoleAddonFps, ConsoleAddonWidgetPanel,
ConsoleAddonWidgetTree, ConsoleAddonBreadcrumb]
#: Display mode of the Console, either docked at the bottom, or as a
#: floating window.
mode = OptionProperty("docked", options=["docked", "floated"])
#: Current widget beeing selected
widget = ObjectProperty(None, allownone=True)
#: Indicate if the inspector inspection is enabled. If yes, the next
#: touch down will select a the widget under the touch
inspect_enabled = BooleanProperty(False)
#: True if the Console is activated (showed)
activated = BooleanProperty(False)
def __init__(self, **kwargs):
self.win = kwargs.pop('win', None)
super(Console, self).__init__(**kwargs)
self.avoid_bring_to_top = False
with self.canvas.before:
self.gcolor = Color(1, 0, 0, .25)
PushMatrix()
self.gtransform = Transform(Matrix())
self.grect = Rectangle(size=(0, 0))
PopMatrix()
Clock.schedule_interval(self.update_widget_graphics, 0)
# instanciate all addons
self._toolbar = {"left": [], "panels": [], "right": []}
self._addons = []
self._panel = None
for addon in self.addons:
instance = addon(self)
self._addons.append(instance)
self._init_toolbar()
# select the first panel
self._panel = self._toolbar["panels"][0]
self._panel.state = "down"
self._panel.cb_activate()
def _init_toolbar(self):
toolbar = self.ids.toolbar
for key in ("left", "panels", "right"):
if key == "right":
toolbar.add_widget(Widget())
for el in self._toolbar[key]:
toolbar.add_widget(el)
if key != "right":
toolbar.add_widget(ConsoleAddonSeparator())
@classmethod
def register_addon(cls, addon):
cls.addons.append(addon)
def add_toolbar_widget(self, widget, right=False):
"""Add a widget in the top left toolbar of the Console.
Use `right=True` if you wanna add the widget at the right instead.
"""
key = "right" if right else "left"
self._toolbar[key].append(widget)
def remove_toolbar_widget(self, widget):
"""Remove a widget from the toolbar
"""
self.ids.toolbar.remove_widget(widget)
def add_panel(self, name, cb_activate, cb_deactivate, cb_refresh=None):
"""Add a new panel in the Console.
- `cb_activate` is a callable that will be called when the panel is
activated by the user.
- `cb_deactivate` is a callable that will be called when the panel is
deactivated or when the console will hide.
- `cb_refresh` is an optionnal callable that is called if the user
click again on the button for display the panel
When activated, it's up to the panel to display a content in the
Console by using :meth:`set_content`.
"""
btn = ConsoleToggleButton(text=name)
btn.cb_activate = cb_activate
btn.cb_deactivate = cb_deactivate
btn.cb_refresh = cb_refresh
btn.bind(on_press=self._activate_panel)
self._toolbar["panels"].append(btn)
def _activate_panel(self, instance):
if self._panel != instance:
self._panel.cb_deactivate()
self._panel.state = "normal"
self.ids.content.clear_widgets()
self._panel = instance
self._panel.cb_activate()
self._panel.state = "down"
else:
self._panel.state = "down"
if self._panel.cb_refresh:
self._panel.cb_refresh()
def set_content(self, content):
"""Replace the Console content with a new one.
"""
self.ids.content.clear_widgets()
self.ids.content.add_widget(content)
def on_touch_down(self, touch):
ret = super(Console, self).on_touch_down(touch)
if (('button' not in touch.profile or touch.button == 'left') and
not ret and self.inspect_enabled):
self.highlight_at(*touch.pos)
if touch.is_double_tap:
self.inspect_enabled = False
ret = True
else:
ret = self.collide_point(*touch.pos)
return ret
def on_touch_move(self, touch):
ret = super(Console, self).on_touch_move(touch)
if not ret and self.inspect_enabled:
self.highlight_at(*touch.pos)
ret = True
return ret
def on_touch_up(self, touch):
ret = super(Console, self).on_touch_up(touch)
if not ret and self.inspect_enabled:
ret = True
return ret
def on_window_children(self, win, children):
if self.avoid_bring_to_top:
return
self.avoid_bring_to_top = True
win.remove_widget(self)
win.add_widget(self)
self.avoid_bring_to_top = False
def highlight_at(self, x, y):
"""Select a widget from a x/y window coordinate.
This is mostly used internally when Select mode is activated
"""
widget = None
# reverse the loop - look at children on top first and
# modalviews before others
win_children = self.win.children
children = chain((c for c in reversed(win_children)
if isinstance(c, ModalView)),
(c for c in reversed(win_children)
if not isinstance(c, ModalView)))
for child in children:
if child is self:
continue
widget = self.pick(child, x, y)
if widget:
break
self.highlight_widget(widget)
def highlight_widget(self, widget, *largs):
# no widget to highlight, reduce rectangle to 0, 0
self.widget = widget
if not widget:
self.grect.size = 0, 0
def update_widget_graphics(self, *l):
if not self.activated:
return
if self.widget is None:
self.grect.size = 0, 0
return
self.grect.size = self.widget.size
matrix = self.widget.get_window_matrix()
if self.gtransform.matrix.get() != matrix.get():
self.gtransform.matrix = matrix
def pick(self, widget, x, y):
"""Pick a widget at x/y, given a root `widget`
"""
ret = None
# try to filter widgets that are not visible (invalid inspect target)
if (hasattr(widget, 'visible') and not widget.visible):
return ret
if widget.collide_point(x, y):
ret = widget
x2, y2 = widget.to_local(x, y)
# reverse the loop - look at children on top first
for child in reversed(widget.children):
ret = self.pick(child, x2, y2) or ret
return ret
def on_activated(self, instance, activated):
if activated:
self._activate_console()
else:
self._deactivate_console()
def _activate_console(self):
if not self in self.win.children:
self.win.add_widget(self)
self.y = 0
for addon in self._addons:
addon.activate()
Logger.info('Console: console activated')
def _deactivate_console(self):
for addon in self._addons:
addon.deactivate()
self.grect.size = 0, 0
self.y = -self.height
self.widget = None
self.inspect_enabled = False
#self.win.remove_widget(self)
self._window_node = None
Logger.info('Console: console deactivated')
def keyboard_shortcut(self, win, scancode, *largs):
modifiers = largs[-1]
if scancode == 101 and modifiers == ['ctrl']:
self.activated = not self.activated
if self.activated:
self.inspect_enabled = True
return True
elif scancode == 27:
if self.inspect_enabled:
self.inspect_enabled = False
return True
if self.activated:
self.activated = False
return True
if not self.activated or not self.widget:
return
if scancode == 273: # top
self.widget = self.widget.parent
elif scancode == 274: # down
filtered_children = [c for c in self.widget.children
if not isinstance(c, Console)]
if filtered_children:
self.widget = filtered_children[0]
elif scancode == 276: # left
parent = self.widget.parent
filtered_children = [c for c in parent.children
if not isinstance(c, Console)]
index = filtered_children.index(self.widget)
index = max(0, index - 1)
self.widget = filtered_children[index]
elif scancode == 275: # right
parent = self.widget.parent
filtered_children = [c for c in parent.children
if not isinstance(c, Console)]
index = filtered_children.index(self.widget)
index = min(len(filtered_children) - 1, index + 1)
self.widget = filtered_children[index]
def create_console(win, ctx, *l):
ctx.console = Console(win=win)
win.bind(children=ctx.console.on_window_children,
on_keyboard=ctx.console.keyboard_shortcut)
def start(win, ctx):
"""Create an Console instance attached to the *ctx* and bound to the
Windows :meth:`~kivy.core.window.WindowBase.on_keyboard` event for capturing
the keyboard shortcut.
:Parameters:
`win`: A :class:`Window <kivy.core.window.WindowBase>`
The application Window to bind to.
`ctx`: A :class:`~kivy.uix.widget.Widget` or subclass
The Widget to be inspected.
"""
Clock.schedule_once(partial(create_console, win, ctx))
def stop(win, ctx):
"""Stop and unload any active Inspectors for the given *ctx*."""
if hasattr(ctx, "console"):
win.unbind(children=ctx.console.on_window_children,
on_keyboard=ctx.console.keyboard_shortcut)
win.remove_widget(ctx.console)
del ctx.console
| mit |
dougmolineux/APE_Server | deps/js/src/tests/manifest.py | 7 | 5940 | # Library for JSTest manifests.
#
# This includes classes for representing and parsing JS manifests.
import os, re, sys
from subprocess import *
from tests import TestCase
class XULInfo:
def __init__(self, abi, os, isdebug):
self.abi = abi
self.os = os
self.isdebug = isdebug
def as_js(self):
"""Return JS that when executed sets up variables so that JS expression
predicates on XUL build info evaluate properly."""
return 'var xulRuntime = { OS: "%s", XPCOMABI: "%s", shell: true }; var isDebugBuild=%s;' % (
self.os,
self.abi,
str(self.isdebug).lower())
@classmethod
def create(cls, jsdir):
"""Create a XULInfo based on the current platform's characteristics."""
# Our strategy is to find the autoconf.mk generated for the build and
# read the values from there.
# Find config/autoconf.mk.
dir = jsdir
while True:
path = os.path.join(dir, 'config/autoconf.mk')
if os.path.isfile(path):
break
if os.path.dirname(dir) == dir:
print "Can't find config/autoconf.mk on a directory containing the JS shell (searched from %s)"%jsdir
sys.exit(1)
dir = os.path.dirname(dir)
# Read the values.
val_re = re.compile(r'(TARGET_XPCOM_ABI|OS_TARGET|MOZ_DEBUG)\s*=\s*(.*)')
kw = {}
for line in open(path):
m = val_re.match(line)
if m:
key, val = m.groups()
val = val.rstrip()
if key == 'TARGET_XPCOM_ABI':
kw['abi'] = val
if key == 'OS_TARGET':
kw['os'] = val
if key == 'MOZ_DEBUG':
kw['isdebug'] = (val == '1')
return cls(**kw)
class XULInfoTester:
def __init__(self, xulinfo, js_bin):
self.js_prolog = xulinfo.as_js()
self.js_bin = js_bin
# Maps JS expr to evaluation result.
self.cache = {}
def test(self, cond):
"""Test a XUL predicate condition against this local info."""
ans = self.cache.get(cond, None)
if ans is None:
cmd = [ self.js_bin, '-e', self.js_prolog, '-e', 'print(!!(%s))'%cond ]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if out in ('true\n', 'true\r\n'):
ans = True
elif out in ('false\n', 'false\r\n'):
ans = False
else:
raise Exception("Failed to test XUL condition '%s'"%cond)
self.cache[cond] = ans
return ans
class NullXULInfoTester:
"""Can be used to parse manifests without a JS shell."""
def test(self, cond):
return False
def parse(filename, xul_tester, reldir = ''):
ans = []
comment_re = re.compile(r'#.*')
dir = os.path.dirname(filename)
try:
f = open(filename)
except IOError:
print "warning: include file not found: '%s'"%filename
return ans
for line in f:
sline = comment_re.sub('', line)
parts = sline.split()
if len(parts) == 0:
# line is empty or just a comment, skip
pass
elif parts[0] == 'include':
include_file = parts[1]
include_reldir = os.path.join(reldir, os.path.dirname(include_file))
ans += parse(os.path.join(dir, include_file), xul_tester, include_reldir)
elif parts[0] == 'url-prefix':
# Doesn't apply to shell tests
pass
else:
script = None
enable = True
expect = True
random = False
slow = False
pos = 0
while pos < len(parts):
if parts[pos] == 'fails':
expect = False
pos += 1
elif parts[pos] == 'skip':
expect = enable = False
pos += 1
elif parts[pos] == 'random':
random = True
pos += 1
elif parts[pos].startswith('fails-if'):
cond = parts[pos][len('fails-if('):-1]
if xul_tester.test(cond):
expect = False
pos += 1
elif parts[pos].startswith('asserts-if'):
# This directive means we may flunk some number of
# NS_ASSERTIONs in the browser. For the shell, ignore it.
pos += 1
elif parts[pos].startswith('skip-if'):
cond = parts[pos][len('skip-if('):-1]
if xul_tester.test(cond):
expect = enable = False
pos += 1
elif parts[pos].startswith('random-if'):
cond = parts[pos][len('random-if('):-1]
if xul_tester.test(cond):
random = True
pos += 1
elif parts[pos] == 'script':
script = parts[pos+1]
pos += 2
elif parts[pos] == 'slow':
slow = True
pos += 1
elif parts[pos] == 'silentfail':
# silentfails use tons of memory, and Darwin doesn't support ulimit.
if xul_tester.test("xulRuntime.OS == 'Darwin'"):
expect = enable = False
pos += 1
else:
print 'warning: invalid manifest line element "%s"'%parts[pos]
pos += 1
assert script is not None
ans.append(TestCase(os.path.join(reldir, script),
enable, expect, random, slow))
return ans
| gpl-2.0 |
srm912/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
cloudera/hue | apps/sqoop/src/sqoop/tests.py | 2 | 7006 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from nose.tools import assert_true, assert_equal
from nose.plugins.skip import SkipTest
from django.urls import reverse
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import add_to_group, grant_access
from useradmin.models import User
from sqoop.client.link import Link
from sqoop.client.job import Job
from sqoop.test_base import SqoopServerProvider
LOG = logging.getLogger(__name__)
class TestSqoopServerBase(SqoopServerProvider):
@classmethod
def setup_class(cls):
SqoopServerProvider.setup_class()
cls.client = make_logged_in_client(username='test', is_superuser=False)
cls.user = User.objects.get(username='test')
add_to_group('test')
grant_access("test", "test", "sqoop")
def create_link(self, name='test1', connector_id=1):
link = Link(name, connector_id)
link.linkConfig = self.client.get_connectors()[0].link_config
for _config in link.linkConfig:
for _input in _config.inputs:
if _input.name not in LINK_CONFIG_VALUES:
LOG.warning("Link config input mapping %s does not exist. Maybe it's new?" % _input.name)
elif LINK_CONFIG_VALUES[_input.name]:
_input.value = LINK_CONFIG_VALUES[_input.name]
return self.client.create_link(link)
def create_job(self, name="test1", from_link_id=1, to_link_id=2, from_connector_id=1, to_connector_id=2):
job = Job( name, from_link_id, to_link_id, from_connector_id, to_connector_id)
job.driver_config = self.client.get_driver().job_config
job.from_config = self.client.get_connectors()[0].job_config['FROM']
job.to_config = self.client.get_connectors()[0].job_config['TO']
for _from_config in job.from_config:
for _input in _from_config.inputs:
if _input.name not in FROM_JOB_CONFIG_VALUES:
LOG.warning("From Job config input mapping %s does not exist. Maybe it's new?" % _input.name)
elif FROM_JOB_CONFIG_VALUES[_input.name]:
_input.value = FROM_JOB_CONFIG_VALUES[_input.name]
for _to_config in job.to_config:
for _input in _to_config.inputs:
if _input.name not in TO_JOB_CONFIG_VALUES:
LOG.warning("To Job config input mapping. Maybe it's new?" % _input.name)
elif TO_JOB_CONFIG_VALUES[_input.name]:
_input.value = TO_JOB_CONFIG_VALUES[_input.name]
for _driver_config in job.driver_config:
for _input in _driver_config.inputs:
if _input.name not in DRIVER_CONFIG_VALUES:
LOG.warning("Driver Job config input mapping. Maybe it's new?" % _input.name)
elif DRIVER_CONFIG_VALUES[_input.name]:
_input.value = DRIVER_CONFIG_VALUES[_input.name]
return self.client.create_job(job)
def delete_sqoop_object(self, obj):
if isinstance(obj, Link):
self.client.delete_link(obj)
elif isinstance(obj, Job):
self.client.delete_job(obj)
def delete_sqoop_objects(self, objects):
for obj in objects:
self.delete_sqoop_object(obj)
class TestWithSqoopServer(TestSqoopServerBase):
def test_list_jobs(self):
resp = self.client.get(reverse('sqoop:jobs'))
content = json.loads(resp.content)
assert_true('jobs' in content, content)
class TestSqoopClientLinks(TestSqoopServerBase):
def setUp(self):
raise SkipTest() # These tests are outdated
def test_link(self):
link3 = None
try:
# Create
link = self.create_link(name='link1')
link2 = self.client.get_link(link.id)
assert_true(link2.id)
assert_equal(link.name, link2.name)
# Update
link2.name = 'link-new-1'
self.client.update_link(link2)
link3 = self.client.get_link(link2.id)
assert_true(link3.id)
assert_equal(link3.name, link3.name)
finally:
if link3:
self.client.delete_link(link3)
def test_get_links(self):
link = None
try:
link = self.create_link(name='link2')
links = self.client.get_links()
assert_true(len(links) > 0)
finally:
if link:
self.client.delete_link(link)
class TestSqoopClientJobs(TestSqoopServerBase):
def setUp(self):
raise SkipTest() # These tests are outdated
def test_job(self):
removable = []
# Create
from_link = self.create_link(name='link3from')
to_link = self.create_link(name='link3to')
try:
removable.append(from_link)
removable.append(to_link)
job = self.create_job("job1", from_link_id=from_link.id, to_link_id=to_link.id)
removable.insert(0, job)
assert_true(job.id)
job2 = self.client.get_job(job.id)
assert_true(job2.id)
assert_equal(job.id, job2.id)
# Update
job.name = 'job-new-1'
job3 = self.client.update_job(job)
assert_equal(job.name, job3.name)
finally:
self.delete_sqoop_objects(removable)
def test_get_jobs(self):
removable = []
from_link = self.create_link(name='link4from')
to_link = self.create_link(name='link4to')
try:
removable.append(from_link)
removable.append(to_link)
job = self.create_job("job2", from_link_id=from_link.id, to_link_id=to_link.id)
removable.insert(0, job)
assert_true(job.id)
jobs = self.client.get_jobs()
assert_true(len(jobs) > 0)
finally:
self.delete_sqoop_objects(removable)
LINK_CONFIG_VALUES = {
'linkConfig.jdbcDriver': 'org.apache.derby.jdbc.EmbeddedDriver',
'linkConfig.String': 'jdbc%3Aderby%3A%2Ftmp%2Ftest',
'linkConfig.username': 'abe',
'linkConfig.password': 'test',
'linkConfig.jdbcProperties': None
}
FROM_JOB_CONFIG_VALUES = {
'fromJobConfig.schemaName': None,
'fromJobConfig.tableName': 'test',
'fromJobConfig.sql': None,
'fromJobConfig.columns': 'name',
'fromJobConfig.partitionColumn': 'id',
'fromJobConfig.boundaryQuery': None,
'fromJobConfig.allowNullValueInPartitionColumn': None
}
TO_JOB_CONFIG_VALUES = {
'toJobConfig.outputFormat': 'TEXT_FILE',
'toJobConfig.outputDirectory': '/tmp/test.out',
'toJobConfig.storageType': 'HDFS'
}
DRIVER_CONFIG_VALUES = {
'throttlingConfig.numExtractor': '3',
'throttlingConfig.numLoaders': '3'
}
| apache-2.0 |
Flimm/linkchecker | third_party/dnspython/dns/rdataset.py | 9 | 11684 | # Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
import random
import StringIO
import struct
import dns.exception
import dns.rdatatype
import dns.rdataclass
import dns.rdata
import dns.set
# define SimpleSet here for backwards compatibility
SimpleSet = dns.set.Set
class DifferingCovers(dns.exception.DNSException):
"""Raised if an attempt is made to add a SIG/RRSIG whose covered type
is not the same as that of the other rdatas in the rdataset."""
pass
class IncompatibleTypes(dns.exception.DNSException):
"""Raised if an attempt is made to add rdata of an incompatible type."""
pass
class Rdataset(dns.set.Set):
"""A DNS rdataset.
@ivar rdclass: The class of the rdataset
@type rdclass: int
@ivar rdtype: The type of the rdataset
@type rdtype: int
@ivar covers: The covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
@type covers: int
@ivar ttl: The DNS TTL (Time To Live) value
@type ttl: int
"""
__slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Create a new rdataset of the specified class and type.
@see: the description of the class instance variables for the
meaning of I{rdclass} and I{rdtype}"""
super(Rdataset, self).__init__()
self.rdclass = rdclass
self.rdtype = rdtype
self.covers = covers
self.ttl = 0
def _clone(self):
obj = super(Rdataset, self)._clone()
obj.rdclass = self.rdclass
obj.rdtype = self.rdtype
obj.covers = self.covers
obj.ttl = self.ttl
return obj
def update_ttl(self, ttl):
"""Set the TTL of the rdataset to be the lesser of the set's current
TTL or the specified TTL. If the set contains no rdatas, set the TTL
to the specified TTL.
@param ttl: The TTL
@type ttl: int"""
if len(self) == 0:
self.ttl = ttl
elif ttl < self.ttl:
self.ttl = ttl
def add(self, rd, ttl=None):
"""Add the specified rdata to the rdataset.
If the optional I{ttl} parameter is supplied, then
self.update_ttl(ttl) will be called prior to adding the rdata.
@param rd: The rdata
@type rd: dns.rdata.Rdata object
@param ttl: The TTL
@type ttl: int"""
#
# If we're adding a signature, do some special handling to
# check that the signature covers the same type as the
# other rdatas in this rdataset. If this is the first rdata
# in the set, initialize the covers field.
#
if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
raise IncompatibleTypes
if not ttl is None:
self.update_ttl(ttl)
if self.rdtype == dns.rdatatype.RRSIG or \
self.rdtype == dns.rdatatype.SIG:
covers = rd.covers()
if len(self) == 0 and self.covers == dns.rdatatype.NONE:
self.covers = covers
elif self.covers != covers:
raise DifferingCovers
if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
self.clear()
super(Rdataset, self).add(rd)
def union_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).union_update(other)
def intersection_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).intersection_update(other)
def update(self, other):
"""Add all rdatas in other to self.
@param other: The rdataset from which to update
@type other: dns.rdataset.Rdataset object"""
self.update_ttl(other.ttl)
super(Rdataset, self).update(other)
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
def __str__(self):
return self.to_text()
def __hash__(self):
return hash((self.rdclass, self.rdtype, self.covers)) + \
super(Rdataset, self).__hash__()
def __eq__(self, other):
"""Two rdatasets are equal if they have the same class, type, and
covers, and contain the same rdata.
@rtype: bool"""
if not isinstance(other, Rdataset):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype or \
self.covers != other.covers:
return False
return super(Rdataset, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def to_text(self, name=None, origin=None, relativize=True,
override_rdclass=None, **kw):
"""Convert the rdataset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param name: If name is not None, emit a RRs with I{name} as
the owner name.
@type name: dns.name.Name object
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool"""
if not name is None:
name = name.choose_relativity(origin, relativize)
ntext = str(name)
pad = ' '
else:
ntext = ''
pad = ''
s = StringIO.StringIO()
if not override_rdclass is None:
rdclass = override_rdclass
else:
rdclass = self.rdclass
if len(self) == 0:
#
# Empty rdatasets are used for the question section, and in
# some dynamic updates, so we don't need to print out the TTL
# (which is meaningless anyway).
#
print >> s, '%s%s%s %s' % (ntext, pad,
dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype))
else:
for rd in self:
print >> s, '%s%s%d %s %s %s' % \
(ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype),
rd.to_text(origin=origin, relativize=relativize, **kw))
#
# We strip off the final \n for the caller's convenience in printing
#
return s.getvalue()[:-1]
def to_wire(self, name, file, compress=None, origin=None,
override_rdclass=None, want_shuffle=True):
"""Convert the rdataset to wire format.
@param name: The owner name of the RRset that will be emitted
@type name: dns.name.Name object
@param file: The file to which the wire format data will be appended
@type file: file
@param compress: The compression table to use; the default is None.
@type compress: dict
@param origin: The origin to be appended to any relative names when
they are emitted. The default is None.
@returns: the number of records emitted
@rtype: int
"""
if not override_rdclass is None:
rdclass = override_rdclass
want_shuffle = False
else:
rdclass = self.rdclass
file.seek(0, 2)
if len(self) == 0:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
file.write(stuff)
return 1
else:
if want_shuffle:
l = list(self)
random.shuffle(l)
else:
l = self
for rd in l:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass,
self.ttl, 0)
file.write(stuff)
start = file.tell()
rd.to_wire(file, compress, origin)
end = file.tell()
assert end - start < 65536
file.seek(start - 2)
stuff = struct.pack("!H", end - start)
file.write(stuff)
file.seek(0, 2)
return len(self)
def match(self, rdclass, rdtype, covers):
"""Returns True if this rdataset matches the specified class, type,
and covers"""
if self.rdclass == rdclass and \
self.rdtype == rdtype and \
self.covers == covers:
return True
return False
def from_text_list(rdclass, rdtype, ttl, text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified list of rdatas in text format.
@rtype: dns.rdataset.Rdataset object
"""
if isinstance(rdclass, (str, unicode)):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
r = Rdataset(rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(rdclass, rdtype, ttl, *text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified rdatas in text format.
@rtype: dns.rdataset.Rdataset object
"""
return from_text_list(rdclass, rdtype, ttl, text_rdatas)
def from_rdata_list(ttl, rdatas):
"""Create an rdataset with the specified TTL, and with
the specified list of rdata objects.
@rtype: dns.rdataset.Rdataset object
"""
if len(rdatas) == 0:
raise ValueError("rdata list must not be empty")
r = None
for rd in rdatas:
if r is None:
r = Rdataset(rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
first_time = False
r.add(rd)
return r
def from_rdata(ttl, *rdatas):
"""Create an rdataset with the specified TTL, and with
the specified rdata objects.
@rtype: dns.rdataset.Rdataset object
"""
return from_rdata_list(ttl, rdatas)
| gpl-2.0 |
garg10may/youtube-dl | youtube_dl/downloader/common.py | 95 | 13848 | from __future__ import division, unicode_literals
import os
import re
import sys
import time
from ..compat import compat_str
from ..utils import (
encodeFilename,
decodeArgument,
format_bytes,
timeconvert,
)
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead.
Available options:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps.
test: Download only first bytes to test the downloader.
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
(experimenatal)
external_downloader_args: A list of additional command-line arguments for the
external downloader.
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):
"""Create a FileDownloader object with the given options."""
self.ydl = ydl
self._progress_hooks = []
self.params = params
self.add_progress_hook(self.report_progress)
@staticmethod
def format_seconds(seconds):
(mins, secs) = divmod(seconds, 60)
(hours, mins) = divmod(mins, 60)
if hours > 99:
return '--:--:--'
if hours == 0:
return '%02d:%02d' % (mins, secs)
else:
return '%02d:%02d:%02d' % (hours, mins, secs)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return None
return float(byte_counter) / float(data_len) * 100.0
@staticmethod
def format_percent(percent):
if percent is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % percent)
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return None
if now is None:
now = time.time()
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def format_eta(eta):
if eta is None:
return '--:--'
return FileDownloader.format_seconds(eta)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def format_speed(speed):
if speed is None:
return '%10s' % '---b/s'
return '%10s' % ('%s/s' % format_bytes(speed))
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
if rate > new_max:
return int(new_max)
if rate < new_min:
return int(new_min)
return int(rate)
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier))
def to_screen(self, *args, **kargs):
self.ydl.to_screen(*args, **kargs)
def to_stderr(self, message):
self.ydl.to_screen(message)
def to_console_title(self, message):
self.ydl.to_console_title(message)
def trouble(self, *args, **kargs):
self.ydl.trouble(*args, **kargs)
def report_warning(self, *args, **kargs):
self.ydl.report_warning(*args, **kargs)
def report_error(self, *args, **kargs):
self.ydl.report_error(*args, **kargs)
def slow_down(self, start_time, now, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0:
return
if now is None:
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep(max((byte_counter // rate_limit) - elapsed, 0))
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == '-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + '.part'
def undo_temp_name(self, filename):
if filename.endswith('.part'):
return filename[:-len('.part')]
return filename
def try_rename(self, old_filename, new_filename):
try:
if old_filename == new_filename:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.report_error('unable to rename file: %s' % compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(encodeFilename(filename)):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
return filetime
# Ignore obviously invalid dates
if filetime == 0:
return
try:
os.utime(filename, (time.time(), filetime))
except Exception:
pass
return filetime
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen('[download] Destination: ' + filename)
def _report_progress_status(self, msg, is_last_line=False):
fullmsg = '[download] ' + msg
if self.params.get('progress_with_newline', False):
self.to_screen(fullmsg)
else:
if os.name == 'nt':
prev_len = getattr(self, '_report_progress_prev_line_length',
0)
if prev_len > len(fullmsg):
fullmsg += ' ' * (prev_len - len(fullmsg))
self._report_progress_prev_line_length = len(fullmsg)
clear_line = '\r'
else:
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
self.to_console_title('youtube-dl ' + msg)
def report_progress(self, s):
if s['status'] == 'finished':
if self.params.get('noprogress', False):
self.to_screen('[download] Download completed')
else:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
if s.get('elapsed') is not None:
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
else:
msg_template = '100%% of %(_total_bytes_str)s'
self._report_progress_status(
msg_template % s, is_last_line=True)
if self.params.get('noprogress'):
return
if s['status'] != 'downloading':
return
if s.get('eta') is not None:
s['_eta_str'] = self.format_eta(s['eta'])
else:
s['_eta_str'] = 'Unknown ETA'
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate'])
else:
if s.get('downloaded_bytes') == 0:
s['_percent_str'] = self.format_percent(0)
else:
s['_percent_str'] = 'Unknown %'
if s.get('speed') is not None:
s['_speed_str'] = self.format_speed(s['speed'])
else:
s['_speed_str'] = 'Unknown speed'
if s.get('total_bytes') is not None:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'
elif s.get('total_bytes_estimate') is not None:
s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate'])
msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'
else:
if s.get('downloaded_bytes') is not None:
s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes'])
if s.get('elapsed'):
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'
else:
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
else:
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
self._report_progress_status(msg_template % s)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen('[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen('[download] Unable to resume')
def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict
Return True on success and False otherwise
"""
nooverwrites_and_exists = (
self.params.get('nooverwrites', False) and
os.path.exists(encodeFilename(filename))
)
continuedl_and_exists = (
self.params.get('continuedl', True) and
os.path.isfile(encodeFilename(filename)) and
not self.params.get('nopart', False)
)
# Check file already present
if filename != '-' and nooverwrites_and_exists or continuedl_and_exists:
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'total_bytes': os.path.getsize(encodeFilename(filename)),
})
return True
sleep_interval = self.params.get('sleep_interval')
if sleep_interval:
self.to_screen('[download] Sleeping %s seconds...' % sleep_interval)
time.sleep(sleep_interval)
return self.real_download(filename, info_dict)
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
raise NotImplementedError('This method must be implemented by subclasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:
ph(status)
def add_progress_hook(self, ph):
# See YoutubeDl.py (search for progress_hooks) for a description of
# this interface
self._progress_hooks.append(ph)
def _debug_cmd(self, args, exe=None):
if not self.params.get('verbose', False):
return
str_args = [decodeArgument(a) for a in args]
if exe is None:
exe = os.path.basename(str_args[0])
try:
import pipes
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
except ImportError:
shell_quote = repr
self.to_screen('[debug] %s command line: %s' % (
exe, shell_quote(str_args)))
| unlicense |
flacjacket/sympy | sympy/mpmath/tests/test_matrices.py | 11 | 5643 | from sympy.mpmath import *
def test_matrix_basic():
A1 = matrix(3)
for i in range(3):
A1[i,i] = 1
assert A1 == eye(3)
assert A1 == matrix(A1)
A2 = matrix(3, 2)
assert not A2._matrix__data
A3 = matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert list(A3) == list(range(1, 10))
A3[1,1] = 0
assert not (1, 1) in A3._matrix__data
A4 = matrix([[1, 2, 3], [4, 5, 6]])
A5 = matrix([[6, -1], [3, 2], [0, -3]])
assert A4 * A5 == matrix([[12, -6], [39, -12]])
assert A1 * A3 == A3 * A1 == A3
try:
A2 * A2
assert False
except ValueError:
pass
l = [[10, 20, 30], [40, 0, 60], [70, 80, 90]]
A6 = matrix(l)
assert A6.tolist() == l
assert A6 == eval(repr(A6))
A6 = matrix(A6, force_type=float)
assert A6 == eval(repr(A6))
assert A6*1j == eval(repr(A6*1j))
assert A3 * 10 == 10 * A3 == A6
assert A2.rows == 3
assert A2.cols == 2
A3.rows = 2
A3.cols = 2
assert len(A3._matrix__data) == 3
assert A4 + A4 == 2*A4
try:
A4 + A2
except ValueError:
pass
assert sum(A1 - A1) == 0
A7 = matrix([[1, 2], [3, 4], [5, 6], [7, 8]])
x = matrix([10, -10])
assert A7*x == matrix([-10, -10, -10, -10])
A8 = ones(5)
assert sum((A8 + 1) - (2 - zeros(5))) == 0
assert (1 + ones(4)) / 2 - 1 == zeros(4)
assert eye(3)**10 == eye(3)
try:
A7**2
assert False
except ValueError:
pass
A9 = randmatrix(3)
A10 = matrix(A9)
A9[0,0] = -100
assert A9 != A10
assert nstr(A9)
def test_matrix_slices():
A = matrix([ [1, 2, 3],
[4, 5 ,6],
[7, 8 ,9]])
V = matrix([1,2,3,4,5])
# Get slice
assert A[:,:] == A
assert A[:,1] == matrix([[2],[5],[8]])
assert A[2,:] == matrix([[7, 8 ,9]])
assert A[1:3,1:3] == matrix([[5,6],[8,9]])
assert V[2:4] == matrix([3,4])
try:
A6 = A[:,1:6]
assert False
except IndexError:
pass
# Assign slice with matrix
A1 = matrix(3)
A1[:,:] = A
assert A1[:,:] == matrix([[1, 2, 3],
[4, 5 ,6],
[7, 8 ,9]])
A1[0,:] = matrix([[10, 11, 12]])
assert A1 == matrix([ [10, 11, 12],
[4, 5 ,6],
[7, 8 ,9]])
A1[:,2] = matrix([[13], [14], [15]])
assert A1 == matrix([ [10, 11, 13],
[4, 5 ,14],
[7, 8 ,15]])
A1[:2,:2] = matrix([[16, 17], [18 , 19]])
assert A1 == matrix([ [16, 17, 13],
[18, 19 ,14],
[7, 8 ,15]])
V[1:3] = 10
assert V == matrix([1,10,10,4,5])
try:
A1[2,:] = A[:,1]
assert False
except ValueError:
pass
try:
A1[2,1:20] = A[:,:]
assert False
except IndexError:
pass
# Assign slice with scalar
A1[:,2] = 10
assert A1 == matrix([ [16, 17, 10],
[18, 19 ,10],
[7, 8 ,10]])
A1[:,:] = 40
for x in A1:
assert x == 40
def test_matrix_power():
A = matrix([[1, 2], [3, 4]])
assert A**2 == A*A
assert A**3 == A*A*A
assert A**-1 == inverse(A)
assert A**-2 == inverse(A*A)
def test_matrix_transform():
A = matrix([[1, 2], [3, 4], [5, 6]])
assert A.T == A.transpose() == matrix([[1, 3, 5], [2, 4, 6]])
swap_row(A, 1, 2)
assert A == matrix([[1, 2], [5, 6], [3, 4]])
l = [1, 2]
swap_row(l, 0, 1)
assert l == [2, 1]
assert extend(eye(3), [1,2,3]) == matrix([[1,0,0,1],[0,1,0,2],[0,0,1,3]])
def test_matrix_conjugate():
A = matrix([[1 + j, 0], [2, j]])
assert A.conjugate() == matrix([[mpc(1, -1), 0], [2, mpc(0, -1)]])
assert A.transpose_conj() == A.H == matrix([[mpc(1, -1), 2],
[0, mpc(0, -1)]])
def test_matrix_creation():
assert diag([1, 2, 3]) == matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
A1 = ones(2, 3)
assert A1.rows == 2 and A1.cols == 3
for a in A1:
assert a == 1
A2 = zeros(3, 2)
assert A2.rows == 3 and A2.cols == 2
for a in A2:
assert a == 0
assert randmatrix(10) != randmatrix(10)
one = mpf(1)
assert hilbert(3) == matrix([[one, one/2, one/3],
[one/2, one/3, one/4],
[one/3, one/4, one/5]])
def test_norms():
# matrix norms
A = matrix([[1, -2], [-3, -1], [2, 1]])
assert mnorm(A,1) == 6
assert mnorm(A,inf) == 4
assert mnorm(A,'F') == sqrt(20)
# vector norms
assert norm(-3) == 3
x = [1, -2, 7, -12]
assert norm(x, 1) == 22
assert round(norm(x, 2), 10) == 14.0712472795
assert round(norm(x, 10), 10) == 12.0054633727
assert norm(x, inf) == 12
def test_vector():
x = matrix([0, 1, 2, 3, 4])
assert x == matrix([[0], [1], [2], [3], [4]])
assert x[3] == 3
assert len(x._matrix__data) == 4
assert list(x) == list(range(5))
x[0] = -10
x[4] = 0
assert x[0] == -10
assert len(x) == len(x.T) == 5
assert x.T*x == matrix([[114]])
def test_matrix_copy():
A = ones(6)
B = A.copy()
assert A == B
B[0,0] = 0
assert A != B
def test_matrix_numpy():
from sympy.external import import_module
numpy = import_module('numpy')
if not numpy:
return
l = [[1, 2], [3, 4], [5, 6]]
a = numpy.matrix(l)
assert matrix(l) == matrix(a)
| bsd-3-clause |
def-/commandergenius | project/jni/python/src/Lib/test/test_htmlparser.py | 56 | 10661 | """Tests for HTMLParser.py."""
import HTMLParser
import pprint
import unittest
from test import test_support
class EventCollector(HTMLParser.HTMLParser):
def __init__(self):
self.events = []
self.append = self.events.append
HTMLParser.HTMLParser.__init__(self)
def get_events(self):
# Normalize the list of events so that buffer artefacts don't
# separate runs of contiguous characters.
L = []
prevtype = None
for event in self.events:
type = event[0]
if type == prevtype == "data":
L[-1] = ("data", L[-1][1] + event[1])
else:
L.append(event)
prevtype = type
self.events = L
return L
# structure markup
def handle_starttag(self, tag, attrs):
self.append(("starttag", tag, attrs))
def handle_startendtag(self, tag, attrs):
self.append(("startendtag", tag, attrs))
def handle_endtag(self, tag):
self.append(("endtag", tag))
# all other markup
def handle_comment(self, data):
self.append(("comment", data))
def handle_charref(self, data):
self.append(("charref", data))
def handle_data(self, data):
self.append(("data", data))
def handle_decl(self, data):
self.append(("decl", data))
def handle_entityref(self, data):
self.append(("entityref", data))
def handle_pi(self, data):
self.append(("pi", data))
def unknown_decl(self, decl):
self.append(("unknown decl", decl))
class EventCollectorExtra(EventCollector):
def handle_starttag(self, tag, attrs):
EventCollector.handle_starttag(self, tag, attrs)
self.append(("starttag_text", self.get_starttag_text()))
class TestCaseBase(unittest.TestCase):
def _run_check(self, source, expected_events, collector=EventCollector):
parser = collector()
for s in source:
parser.feed(s)
parser.close()
events = parser.get_events()
if events != expected_events:
self.fail("received events did not match expected events\n"
"Expected:\n" + pprint.pformat(expected_events) +
"\nReceived:\n" + pprint.pformat(events))
def _run_check_extra(self, source, events):
self._run_check(source, events, EventCollectorExtra)
def _parse_error(self, source):
def parse(source=source):
parser = HTMLParser.HTMLParser()
parser.feed(source)
parser.close()
self.assertRaises(HTMLParser.HTMLParseError, parse)
class HTMLParserTestCase(TestCaseBase):
def test_processing_instruction_only(self):
self._run_check("<?processing instruction>", [
("pi", "processing instruction"),
])
self._run_check("<?processing instruction ?>", [
("pi", "processing instruction ?"),
])
def test_simple_html(self):
self._run_check("""
<!DOCTYPE html PUBLIC 'foo'>
<HTML>&entity; 
<!--comment1a
-></foo><bar><<?pi?></foo<bar
comment1b-->
<Img sRc='Bar' isMAP>sample
text
“
<!--comment2a-- --comment2b--><!>
</Html>
""", [
("data", "\n"),
("decl", "DOCTYPE html PUBLIC 'foo'"),
("data", "\n"),
("starttag", "html", []),
("entityref", "entity"),
("charref", "32"),
("data", "\n"),
("comment", "comment1a\n-></foo><bar><<?pi?></foo<bar\ncomment1b"),
("data", "\n"),
("starttag", "img", [("src", "Bar"), ("ismap", None)]),
("data", "sample\ntext\n"),
("charref", "x201C"),
("data", "\n"),
("comment", "comment2a-- --comment2b"),
("data", "\n"),
("endtag", "html"),
("data", "\n"),
])
def test_unclosed_entityref(self):
self._run_check("&entityref foo", [
("entityref", "entityref"),
("data", " foo"),
])
def test_doctype_decl(self):
inside = """\
DOCTYPE html [
<!ELEMENT html - O EMPTY>
<!ATTLIST html
version CDATA #IMPLIED
profile CDATA 'DublinCore'>
<!NOTATION datatype SYSTEM 'http://xml.python.org/notations/python-module'>
<!ENTITY myEntity 'internal parsed entity'>
<!ENTITY anEntity SYSTEM 'http://xml.python.org/entities/something.xml'>
<!ENTITY % paramEntity 'name|name|name'>
%paramEntity;
<!-- comment -->
]"""
self._run_check("<!%s>" % inside, [
("decl", inside),
])
def test_bad_nesting(self):
# Strangely, this *is* supposed to test that overlapping
# elements are allowed. HTMLParser is more geared toward
# lexing the input that parsing the structure.
self._run_check("<a><b></a></b>", [
("starttag", "a", []),
("starttag", "b", []),
("endtag", "a"),
("endtag", "b"),
])
def test_bare_ampersands(self):
self._run_check("this text & contains & ampersands &", [
("data", "this text & contains & ampersands &"),
])
def test_bare_pointy_brackets(self):
self._run_check("this < text > contains < bare>pointy< brackets", [
("data", "this < text > contains < bare>pointy< brackets"),
])
def test_attr_syntax(self):
output = [
("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", None)])
]
self._run_check("""<a b='v' c="v" d=v e>""", output)
self._run_check("""<a b = 'v' c = "v" d = v e>""", output)
self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
def test_attr_values(self):
self._run_check("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""",
[("starttag", "a", [("b", "xxx\n\txxx"),
("c", "yyy\t\nyyy"),
("d", "\txyz\n")])
])
self._run_check("""<a b='' c="">""", [
("starttag", "a", [("b", ""), ("c", "")]),
])
# Regression test for SF patch #669683.
self._run_check("<e a=rgb(1,2,3)>", [
("starttag", "e", [("a", "rgb(1,2,3)")]),
])
# Regression test for SF bug #921657.
self._run_check("<a href=mailto:xyz@example.com>", [
("starttag", "a", [("href", "mailto:xyz@example.com")]),
])
def test_attr_entity_replacement(self):
self._run_check("""<a b='&><"''>""", [
("starttag", "a", [("b", "&><\"'")]),
])
def test_attr_funky_names(self):
self._run_check("""<a a.b='v' c:d=v e-f=v>""", [
("starttag", "a", [("a.b", "v"), ("c:d", "v"), ("e-f", "v")]),
])
def test_illegal_declarations(self):
self._parse_error('<!spacer type="block" height="25">')
def test_starttag_end_boundary(self):
self._run_check("""<a b='<'>""", [("starttag", "a", [("b", "<")])])
self._run_check("""<a b='>'>""", [("starttag", "a", [("b", ">")])])
def test_buffer_artefacts(self):
output = [("starttag", "a", [("b", "<")])]
self._run_check(["<a b='<'>"], output)
self._run_check(["<a ", "b='<'>"], output)
self._run_check(["<a b", "='<'>"], output)
self._run_check(["<a b=", "'<'>"], output)
self._run_check(["<a b='<", "'>"], output)
self._run_check(["<a b='<'", ">"], output)
output = [("starttag", "a", [("b", ">")])]
self._run_check(["<a b='>'>"], output)
self._run_check(["<a ", "b='>'>"], output)
self._run_check(["<a b", "='>'>"], output)
self._run_check(["<a b=", "'>'>"], output)
self._run_check(["<a b='>", "'>"], output)
self._run_check(["<a b='>'", ">"], output)
output = [("comment", "abc")]
self._run_check(["", "<!--abc-->"], output)
self._run_check(["<", "!--abc-->"], output)
self._run_check(["<!", "--abc-->"], output)
self._run_check(["<!-", "-abc-->"], output)
self._run_check(["<!--", "abc-->"], output)
self._run_check(["<!--a", "bc-->"], output)
self._run_check(["<!--ab", "c-->"], output)
self._run_check(["<!--abc", "-->"], output)
self._run_check(["<!--abc-", "->"], output)
self._run_check(["<!--abc--", ">"], output)
self._run_check(["<!--abc-->", ""], output)
def test_starttag_junk_chars(self):
self._parse_error("</>")
self._parse_error("</$>")
self._parse_error("</")
self._parse_error("</a")
self._parse_error("<a<a>")
self._parse_error("</a<a>")
self._parse_error("<!")
self._parse_error("<a $>")
self._parse_error("<a")
self._parse_error("<a foo='bar'")
self._parse_error("<a foo='bar")
self._parse_error("<a foo='>'")
self._parse_error("<a foo='>")
self._parse_error("<a foo=>")
def test_declaration_junk_chars(self):
self._parse_error("<!DOCTYPE foo $ >")
def test_startendtag(self):
self._run_check("<p/>", [
("startendtag", "p", []),
])
self._run_check("<p></p>", [
("starttag", "p", []),
("endtag", "p"),
])
self._run_check("<p><img src='foo' /></p>", [
("starttag", "p", []),
("startendtag", "img", [("src", "foo")]),
("endtag", "p"),
])
def test_get_starttag_text(self):
s = """<foo:bar \n one="1"\ttwo=2 >"""
self._run_check_extra(s, [
("starttag", "foo:bar", [("one", "1"), ("two", "2")]),
("starttag_text", s)])
def test_cdata_content(self):
s = """<script> <!-- not a comment --> ¬-an-entity-ref; </script>"""
self._run_check(s, [
("starttag", "script", []),
("data", " <!-- not a comment --> ¬-an-entity-ref; "),
("endtag", "script"),
])
s = """<script> <not a='start tag'> </script>"""
self._run_check(s, [
("starttag", "script", []),
("data", " <not a='start tag'> "),
("endtag", "script"),
])
def test_entityrefs_in_attributes(self):
self._run_check("<html foo='€&aa&unsupported;'>", [
("starttag", "html", [("foo", u"\u20AC&aa&unsupported;")])
])
def test_main():
test_support.run_unittest(HTMLParserTestCase)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
JStheguy/tgstation | tools/midi2piano/midi2piano.py | 89 | 9532 | """
This module allows user to convert MIDI melodies to SS13 sheet music ready
for copy-and-paste
"""
from functools import reduce
import midi as mi
import easygui as egui
import pyperclip as pclip
LINE_LENGTH_LIM = 50
LINES_LIMIT = 200
TICK_LAG = 0.5
OVERALL_IMPORT_LIM = 2*LINE_LENGTH_LIM*LINES_LIMIT
END_OF_LINE_CHAR = """
""" # BYOND can't parse \n and I am forced to define my own NEWLINE char
OCTAVE_TRANSPOSE = 0 # Change here to transpose melodies by octaves
FLOAT_PRECISION = 2 # Change here to allow more or less numbers after dot in floats
OCTAVE_KEYS = 12
HIGHEST_OCTAVE = 8
time_quanta = 100 * TICK_LAG
"""
class Meta():
version = 1.0
integer = 1
anti_integer = -1
maximum = 1000
epsilon = 0.51
delta_epsilon = -0.1
integral = []
tensor = [[],[],[]]
o_complexity = epsilon**2
random_variance = 0.01
"""
# UTILITY FUNCTIONS
def condition(event):
"""
This function check if given MIDI event is meaningful
"""
if event[0] == 'track_name' and event[2] == 'Drums': # Percussion
return False
if event[0] == 'note': # Only thing that matters
return True
return False
def notenum2string(num, accidentals, octaves):
"""
This function converts given notenum to SS13 note according to previous
runs expressed using _accidentals_ and _octaves_
"""
names = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
convert_table = {1:0, 3:1, 6:2, 8:3, 10:4}
inclusion_table = {0:0, 2:1, 5:2, 7:3, 9:4}
num += OCTAVE_KEYS * OCTAVE_TRANSPOSE
octave = int(num / OCTAVE_KEYS)
if octave < 1 or octave > HIGHEST_OCTAVE:
return ["", accidentals, octaves]
accidentals = accidentals.copy()
octaves = octaves.copy()
output_octaves = list(octaves)
name_indx = num % OCTAVE_KEYS
accidental = (len(names[name_indx]) == 2)
output_octaves[name_indx] = octave
add_n = False
if accidental:
accidentals[convert_table[name_indx]] = True
else:
if name_indx in inclusion_table:
add_n = accidentals[inclusion_table[name_indx]]
accidentals[inclusion_table[name_indx]] = False
return [
(
names[name_indx]+
("n" if add_n else "")+
str((octave if octave != octaves[name_indx] else ""))
),
accidentals,
output_octaves
]
def dur2mod(dur, bpm_mod=1.0):
"""
This functions returns float representation of duration ready to be
added to the note after /
"""
mod = bpm_mod / dur
mod = round(mod, FLOAT_PRECISION)
return str(mod).rstrip('0').rstrip('.')
# END OF UTILITY FUNCTIONS
# CONVERSION FUNCTIONS
def obtain_midi_file():
"""
Asks user to select MIDI and returns this file opened in binary mode for reading
"""
file = egui.fileopenbox(msg='Choose MIDI file to convert',
title='MIDI file selection',
filetypes=[['*.mid', 'MID files']])
if not file:
return None
file = open(file, mode='rb').read()
return file
def midi2score_without_ticks(midi_file):
"""
Transforms aforementioned file into a score, truncates it and returns it
"""
opus = mi.midi2opus(midi_file)
opus = mi.to_millisecs(opus)
score = mi.opus2score(opus)
return score[1:] # Ticks don't matter anymore, it is always 1000
def filter_events_from_score(score):
"""
Filters out irrevelant events and returns new score
"""
return list(map( # For each score track
lambda score_track: list(filter( # Filter irrevelant events
condition,
score_track
)),
score
))
def filter_empty_tracks(score):
"""
Filters out empty tracks and returns new score
"""
return list(filter(
lambda score_track: score_track,
score))
def filter_start_time_and_note_num(score):
"""
Recreates score with only note numbers and start time of each note and returns new score
"""
return list(map(
lambda score_track: list(map(
lambda event: [event[1], event[4]],
score_track)),
score))
def merge_events(score):
"""Merges all tracks together and returns new score"""
return list(reduce(
lambda lst1, lst2: lst1+lst2,
score))
def sort_score_by_event_times(score):
"""Sorts events by start time and returns new score"""
return list(map(
lambda index: score[index],
sorted(
list(range(len(score))),
key=lambda indx: score[indx][0])
))
def convert_into_delta_times(score):
"""
Transform start_time into delta_time and returns new score
"""
return list(map(
lambda super_event: (
[
super_event[1][0]-super_event[0][0],
super_event[0][1]
]), # [ [1, 2], [3, 4] ] -> [ [2, 2] ]
zip(score[:-1], score[1:]) # Shifted association. [1, 2, 3] -> [ (1, 2), (2, 3) ]
))+[[1000, score[-1][1]]] # Add 1 second note to the end
def perform_roundation(score):
"""
Rounds delta times to the nearest multiple of time quanta as BYOND can't
process duration less than that and returns new score
"""
return list(map(
lambda event: [time_quanta*round(event[0]/time_quanta), event[1]],
score))
def obtain_common_duration(score):
"""
Returns the most frequent duration throughout the whole melody
"""
# Parse durations and filter out 0s
durs = list(filter(lambda x: x, list(map(lambda event: event[0], score))))
unique_durs = []
for dur in durs:
if dur not in unique_durs:
unique_durs.append(dur)
# How many such durations occur throughout the melody?
counter = [durs.count(dur) for dur in unique_durs]
highest_counter = max(counter) # Highest counter
dur_n_count = list(zip(durs, counter))
dur_n_count = list(filter(lambda e: e[1] == highest_counter, dur_n_count))
return dur_n_count[0][0] # Will be there
def reduce_score_to_chords(score):
"""
Reforms score into a chord-duration list:
[[chord_notes], duration_of_chord]
and returns it
"""
new_score = []
new_chord = [[], 0]
# [ [chord notes], duration of chord ]
for event in score:
new_chord[0].append(event[1]) # Append new note to the chord
if event[0] == 0:
continue # Add new notes to the chord until non-zero duration is hit
new_chord[1] = event[0] # This is the duration of chord
new_score.append(new_chord) # Append chord to the list
new_chord = [[], 0] # Reset the chord
return new_score
def obtain_sheet_music(score, most_frequent_dur):
"""
Returns unformated sheet music from score
"""
result = ""
octaves = [3 for i in range(12)]
accidentals = [False for i in range(7)]
for event in score:
for note_indx in range(len(event[0])):
data = notenum2string(event[0][note_indx], accidentals, octaves)
result += data[0]
accidentals = data[1]
octaves = data[2]
if note_indx != len(event[0])-1:
result += '-'
if event[1] != most_frequent_dur: # Quarters are default
result += '/'
result += dur2mod(event[1], most_frequent_dur)
result += ','
return result
def explode_sheet_music(sheet_music):
"""
Splits unformatted sheet music into formated lines of LINE_LEN_LIM
and such and returns a list of such lines
"""
split_music = sheet_music.split(',')
split_music = list(map(lambda note: note+',', split_music))
split_list = []
counter = 0
line_counter = 1
for note in split_music:
if line_counter > LINES_LIMIT-1:
break
if counter+len(note) > LINE_LENGTH_LIM-2:
split_list[-1] = split_list[-1].rstrip(',')
split_list[-1] += END_OF_LINE_CHAR
counter = 0
line_counter += 1
split_list.append(note)
counter += len(note)
return split_list
def finalize_sheet_music(split_music, most_frequent_dur):
"""
Recreates sheet music from exploded sheet music, truncates it and returns it
"""
sheet_music = ""
for note in split_music:
sheet_music += note
sheet_music = sheet_music.rstrip(',') # Trim the last ,
sheet_music = "BPM: " + str(int(60000 / most_frequent_dur)) + END_OF_LINE_CHAR + sheet_music
return sheet_music[:min(len(sheet_music), OVERALL_IMPORT_LIM)]
# END OF CONVERSION FUNCTIONS
def main_cycle():
"""
Activate the script
"""
while True:
midi_file = obtain_midi_file()
if not midi_file:
return # Cancel
score = midi2score_without_ticks(midi_file)
score = filter_events_from_score(score)
score = filter_start_time_and_note_num(score)
score = filter_empty_tracks(score)
score = merge_events(score)
score = sort_score_by_event_times(score)
score = convert_into_delta_times(score)
score = perform_roundation(score)
most_frequent_dur = obtain_common_duration(score)
score = reduce_score_to_chords(score)
sheet_music = obtain_sheet_music(score, most_frequent_dur)
split_music = explode_sheet_music(sheet_music)
sheet_music = finalize_sheet_music(split_music, most_frequent_dur)
pclip.copy(sheet_music)
main_cycle()
| agpl-3.0 |
wtsi-hgi/cookie-monster | cookiemonster/tests/elmo/test_elmo.py | 1 | 8322 | """
HTTP API Test
=============
High-level testing of the HTTP API.
Legalese
--------
Copyright (c) 2016 Genome Research Ltd.
Author: Christopher Harrison <ch12@sanger.ac.uk>
This file is part of Cookie Monster.
Cookie Monster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
from unittest.mock import MagicMock
from cookiemonster.tests._utils.docker_couchdb import CouchDBContainer
from cookiemonster.tests._utils.docker_helpers import get_open_port
import json
from typing import Any
from time import sleep
from datetime import datetime, timedelta, timezone
from http.client import HTTPConnection, HTTPResponse
from hgicommon.collections import Metadata
from cookiemonster.common.models import Enrichment, Cookie
from cookiemonster.common.helpers import EnrichmentJSONDecoder
from cookiemonster.cookiejar import BiscuitTin
from cookiemonster.elmo import HTTP_API, APIDependency
def _decode_json_response(r:HTTPResponse) -> Any:
""" Decode JSON HTTP response """
charset = r.getheader('charset', 'utf-8')
if r.headers.get_content_type() != 'application/json':
return None
return json.loads(r.read().decode(charset))
class TestElmo(unittest.TestCase):
def setUp(self):
"""
Test set up:
* Build, if necessary, and start a CouchDB container and
connect as a BiscuitTin instance
* Start the HTTP API service on a free port, with the necessary
dependencies injected
* Create an HTTP client connection to the API service
"""
self.couchdb_container = CouchDBContainer()
# Configuration for Cookie Jar
self.HOST = self.couchdb_container.couchdb_fqdn
self.DB = 'elmo-test'
self.jar = BiscuitTin(self.HOST, self.DB, 1, timedelta(0))
# Configuration for HTTP service
self.API_PORT = get_open_port()
self.api = HTTP_API()
self.api.inject(APIDependency.CookieJar, self.jar)
self.api.inject(APIDependency.System, None)
self.api.listen(self.API_PORT)
self.http = HTTPConnection('localhost', self.API_PORT)
self.REQ_HEADER = {'Accept': 'application/json'}
# Block until service is up (or timeout)
start_time = finish_time = datetime.now()
service_up = False
while finish_time - start_time < timedelta(seconds=5):
response = None
try:
self.http.request('HEAD', '/')
response = self.http.getresponse()
except:
sleep(0.1)
finally:
self.http.close()
finish_time = datetime.now()
if isinstance(response, HTTPResponse):
service_up = True
break
if not service_up:
self.tearDown()
raise ConnectionError('Couldn\'t start API service in a reasonable amount of time')
def tearDown(self):
""" Tear down test set up """
self.http.close()
self.api.stop()
self.couchdb_container.tear_down()
def test_queue(self):
"""
HTTP API: GET /queue
"""
self.http.request('GET', '/queue', headers=self.REQ_HEADER)
r = self.http.getresponse()
self.assertEqual(r.status, 200)
self.assertEqual(r.headers.get_content_type(), 'application/json')
data = _decode_json_response(r)
self.assertIn('queue_length', data)
self.assertEqual(data['queue_length'], self.jar.queue_length()) # Should be 0
self.http.close()
# Add item to the queue
self.jar.mark_for_processing('/foo')
self.http.request('GET', '/queue', headers=self.REQ_HEADER)
data = _decode_json_response(self.http.getresponse())
self.assertEqual(data['queue_length'], self.jar.queue_length()) # Should be 1
def test_reprocess(self):
"""
HTTP API: POST /queue/reprocess
"""
# Add mocked update notifier to Cookie Jar
dirty_cookie_listener = MagicMock()
self.jar.add_listener(dirty_cookie_listener)
cookie_identifier = '/foo'
request = {'identifier': cookie_identifier}
self.http.request('POST', '/queue/reprocess', body=json.dumps(request), headers=self.REQ_HEADER)
r = self.http.getresponse()
self.assertEqual(r.status, 200)
self.assertEqual(r.headers.get_content_type(), 'application/json')
data = _decode_json_response(r)
self.assertEqual(data, request)
self.http.close()
# Check queue has been updated
self.assertEqual(self.jar.queue_length(), 1)
self.assertEqual(dirty_cookie_listener.call_count, 1)
@staticmethod
def _url_for_identifier(identifier:str):
""" URL for identifier """
if identifier[0] == "/":
return '/cookiejar?identifier={}'.format(identifier)
else:
return '/cookiejar/{}'.format(identifier)
def _fetch_test(self, identifier:str):
""" Generic fetch test """
source = 'foobar'
timestamp = datetime.now().replace(microsecond=0, tzinfo=timezone.utc)
metadata = Metadata({'foo': 123, 'bar': 'quux'})
enrichment = Enrichment(source, timestamp, metadata)
self.jar.enrich_cookie(identifier, enrichment)
self.http.request('GET', TestElmo._url_for_identifier(identifier), headers=self.REQ_HEADER)
r = self.http.getresponse()
self.assertEqual(r.status, 200)
self.assertEqual(r.headers.get_content_type(), 'application/json')
data = _decode_json_response(r)
fetched_identifier = data['identifier']
fetched_enrichment = json.loads(json.dumps(data['enrichments']), cls=EnrichmentJSONDecoder)[0]
self.assertEqual(fetched_identifier, identifier)
self.assertEqual(fetched_enrichment, enrichment)
def test_fetch_by_qs(self):
"""
HTTP API: GET /cookiejar?identifier=<identifier>
"""
self._fetch_test('/path/to/foo')
def test_fetch_by_route(self):
"""
HTTP API: GET /cookiejar/<identifier>
"""
self._fetch_test('foo_bar')
def _delete_test(self, identifier:str):
""" Generic delete test """
self.jar.mark_for_processing(identifier)
self.jar.mark_as_complete(identifier)
cookie = self.jar.fetch_cookie(identifier)
self.assertIsInstance(cookie, Cookie)
self.http.request('DELETE', TestElmo._url_for_identifier(identifier), headers=self.REQ_HEADER)
r = self.http.getresponse()
self.assertEqual(r.status, 200)
self.assertEqual(r.headers.get_content_type(), 'application/json')
data = _decode_json_response(r)
self.assertEqual(data, {'deleted':identifier})
deleted_cookie = self.jar.fetch_cookie(identifier)
self.assertIsNone(deleted_cookie)
def test_delete_by_qs(self):
"""
HTTP API: DELETE /cookiejar?identifier=<identifier>
"""
self._delete_test('/path/to/foo')
def test_delete_by_route(self):
"""
HTTP API: DELETE /cookiejar/<identifier>
"""
self._delete_test('foo_bar')
def test_thread_dump(self):
"""
HTTP API: GET /debug/threads
Note: This test only proves that the endpoint returns an OK
response and JSON data.
TODO At least validate the returned data's schema
"""
self.http.request('GET', '/debug/threads', headers=self.REQ_HEADER)
r = self.http.getresponse()
self.assertEqual(r.status, 200)
self.assertEqual(r.headers.get_content_type(), 'application/json')
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
RaspberryPi-Samples/py-my-key | setup.py | 1 | 4660 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
import io
here = path.abspath(path.dirname(__file__))
NAME = 'py_my_key'
filename = path.join(NAME, 'version.py')
with open(filename) as f:
exec(f.read())
filename = path.join(here, 'README.rst')
with io.open(filename, 'rt', encoding='UTF-8') as f:
readme = f.read()
filename = path.join(here, 'HISTORY.rst')
with io.open(filename, 'rt', encoding='UTF-8') as f:
history = f.read()
requirements = [
# TODO: put package requirements here
'tzlocal',
'pingo',
'sqlalchemy'
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name=NAME,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version=__version__,
description='Access control with RaspberryPi and NFC card reader',
long_description=readme + '\n\n' + history,
# The project's main homepage.
url=__url__,
# Author details
author=__author__,
author_email=__email__,
# Choose your license
license=__license__,
zip_safe=False,
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Environment :: Console',
#'Topic :: Software Development :: Build Tools',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Cython',
'Programming Language :: Python',
#'Programming Language :: Python :: 2',
#'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3',
#'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.4',
'Topic :: Database',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
],
# What does your project relate to?
keywords='',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
include_package_data=True,
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
test_suite='tests',
install_requires=requirements,
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
'dev': ['check-manifest', 'nose'],
'test': ['coverage', 'nose'],
'hw_nxp_rpi': ['nxppy'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['logging.conf'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'py_my_key=py_my_key.cli_rpi_access:main',
'db_admin=py_my_key.cli_db_admin:main',
],
},
)
| isc |
sfletc/scram2_plot | scram_plot/profile_plot.py | 1 | 16126 | import numpy
from pylab import * # @UnusedWildImport
import matplotlib.pyplot as plt # @Reimport
import os.path
class DNA(object):
"""
DNA class
"""
dna_alphabet = set("AGCTN")
def __init__(self, sequence):
self.sequence = sequence.upper()
def __len__(self):
return len(self.sequence)
def __getitem__(self, key):
return self.sequence[key]
def __hash__(self):
return hash(self.sequence)
def __repr__(self):
return self.sequence
def __eq__(self, other):
return self.sequence == other.sequence
def profile_plot(nt_list, search_terms, in_files, cutoff, plot_y_lim, win, pub, save_plot, bin_reads):
"""
Profile plot function
:param nt_list: list of read length ints to plot
:param search_terms: header search terms list
:param in_files: alignment files prefix
:param cutoff: highest count of the most abundant alignment of 21,22,24 nt profiles
:param plot_y_lim: set y limits on plot
:param win: smoothing window size
:param pub: remove box and axis labels
"""
select_win = False
alignment_file_list = _alignment_file_list(in_files, nt_list)
substring = " ".join(search_terms)
all_keys = _get_all_headers(alignment_file_list)
for header in all_keys:
if substring.lower() in header.lower():
nt_pos = 0
header_alignment_tuple = ()
ref_len_tuple = ()
#Get alignments for the search key (each nt length)
for alignment_file in alignment_file_list:
header_alignment_tuple, ref_len_tuple = _get_selected_alignments(alignment_file, header,
header_alignment_tuple,
ref_len_tuple, nt_list[nt_pos])
nt_pos+=1
#Check if one total alignment count for the provided lengths is above the cutoff
above_cutoff = False
for alignment in header_alignment_tuple:
if alignment[2] >= cutoff:
above_cutoff = True
if above_cutoff:
#Check header length - truncate for the save file name if too long
_above_cutoff(bin_reads, header, header_alignment_tuple, in_files, nt_list, plot_y_lim, pub,
ref_len_tuple, save_plot, select_win, win)
def _above_cutoff(bin_reads, header, header_alignment_tuple, in_files, nt_list, plot_y_lim, pub, ref_len_tuple,
save_plot, select_win, win):
"""
Plot if above cutoff
:param bin_reads: bool whether to bin reads
:param header: header
:param header_alignment_tuple: header alignment tuple
:param in_files: path/to/file/prefix
:param nt_list: list of read lengths
:param plot_y_lim: y axes limit
:param pub: bool for whther to remove axes and lgened
:param ref_len_tuple: ref len tuple
:param save_plot: bool whether to save plot
:param select_win: bool wether to auto-select window size
:param win: window size
"""
if header[0] == '"':
plot_name = _save_file_name(in_files, header[1:-2])
else:
plot_name = _save_file_name(in_files, header)
print("Plotting:\n")
print(header)
# Get the ref len
max_ref_len = max(ref_len_tuple)
# Calculate window size
if bin_reads and win == 0:
win = 250
else:
win, select_win = _select_win_size(max_ref_len, select_win, win)
# Convert alignments to y values for plotting (i.e. fill in zeros)
graph_processed_list = []
nt_pos = 0
for alignment in header_alignment_tuple:
if not bin_reads:
graph_processed_list.append(_list_aligned_reads(alignment, max_ref_len, int(nt_list[nt_pos])))
else:
graph_processed_list.append(_bin_aligned_reads(alignment, max_ref_len, int(nt_list[nt_pos])))
nt_pos += 1
# Smooth y-values
plot_data = _smooth_all_plot_data(graph_processed_list, win)
# Plot
_plot_profile_plot(nt_list, graph_processed_list[0][0], plot_data, header, plot_y_lim, pub, save_plot, plot_name,
win)
def _alignment_file_list(in_files, nt_list):
"""
Generate alignment file list
:param in_files: path/to/alignment prefix
:param nt_list: list of read length ints to plot
:return: list of file paths to laod
"""
print("\nLoading scram alignment files:\n")
alignment_file_list = []
for nt in nt_list:
fname = in_files + "_" + nt + ".csv"
if os.path.isfile(fname):
try:
print("{0} \n".format(fname))
in_file, _ = _import_scram_profile(fname)
alignment_file_list.append(in_file)
except:
print("\nCannot load and process {}".format(fname))
sys.exit()
else:
print("\n{} does not exist at this location".format(fname))
sys.exit()
return alignment_file_list
def _import_scram_profile(in_file):
"""
Import a SCRAM csv file to a dictionary
:param in_file: path/to/profile string
:return: alignments dictionary and srna length in the alignment
"""
alignments = {}
srna_len = 0
with open(in_file, 'r') as f:
first_line = True
for line in f:
if first_line:
first_line = False
else:
line = line.strip().rsplit(',', 7)
srna_len = len(line[2])
if line[0] not in alignments:
alignments[line[0]] = [(int(line[1]), DNA(line[2]), int(line[3]), line[4], float(line[5]),
float(line[6]))]
else:
alignments[line[0]].append(
(int(line[1]), DNA(line[2]), int(line[3]), line[4], float(line[5]), float(line[6])))
return alignments, srna_len
def _get_all_headers(alignment_file_list):
"""
Get headers
:param alignment_file_list:
:return: set of headers
"""
print("Extracting headers:\n")
all_keys = set()
for nt in alignment_file_list:
for header in nt.keys():
all_keys.add(header)
return all_keys
def _get_selected_alignments(alignment_file, header, header_alignment_tuple, ref_len_tuple, nt):
"""
Get selected alignments
:param alignment_file: alignment file
:param header: header
:param header_alignment_tuple: header,alignment tuple
:param ref_len_tuple: ref lengths tuple
:param nt: read length
:return: header,alignment tuple and ref lengths tuple
"""
alignment, ref_len = _extract_header_alignment(header, alignment_file, nt)
header_alignment_tuple = header_alignment_tuple + (alignment,)
ref_len_tuple = ref_len_tuple + (ref_len,)
return header_alignment_tuple, ref_len_tuple
def _extract_header_alignment(header, alignments, nt):
"""
With a provided complete header, extract the alignment and process to correct format for fill in zeros
:param header: reference sequence header string
:param alignments: alignments dictionary
:return: sorted_fwd_alignment, sorted_rvs_alignment, aln_count list
"""
sorted_fwd_alignment = []
sorted_rvs_alignment = []
aln_count = 0.0
ref_len = 0
if header in alignments:
extracted_alignments = alignments[header]
for alignment in extracted_alignments:
ref_len = alignment[0]
if alignment[3] =="+":
sorted_fwd_alignment.append((alignment[2], alignment[4], alignment[5]))
elif alignment[3] =="-":
sorted_rvs_alignment.append((alignment[2], -alignment[4], alignment[5]))
aln_count += alignment[4]
return [sorted_fwd_alignment, sorted_rvs_alignment, aln_count], ref_len
def _select_win_size(max_ref_len, select_win, win):
"""
Set smoothing window size
:param max_ref_len: length of reference
:param select_win: True if window size to be selected
:param win: window size
:return: window size, bool whther to select win
"""
if win == 0 or select_win:
win = int(max_ref_len / 30)
select_win = True
if win % 2 != 0:
win += 1
if win < 6:
win = 1
return win, select_win
def _list_aligned_reads(fwd_rvs_align_list, ref_len, nt):
"""
Generate alignment counts for every nucleotide in the reference
:param fwd_rvs_align_list: list of sorted forwards and reverse alignments
:param ref_len: number of nucleotides in the reference sequence (int)
:return: reference_x_axis ([0,0,...] (list(int)) - length of refseq seq,
fwd_alignment_y_axis [2,4,5.2,6,....] (list(float)) - sense strand alignment count (positive),
fwd_rvs_align_list [-3,-4,-5.6,...] (list(float)) - antisense strand alignment count (negative)
"""
sorted_fwd_alignment = fwd_rvs_align_list[0]
sorted_rvs_alignment = fwd_rvs_align_list[1]
fwd_alignment_y_axis_upper = [0] * ref_len
fwd_alignment_y_axis_lower = [0] * ref_len
revs_alignment_y_axis_upper = [0] * ref_len
revs_alignment_y_axis_lower = [0] * ref_len
reference_x_axis = list(range(0, ref_len))
for i in sorted_fwd_alignment:
for j in range(nt):
fwd_alignment_y_axis_upper[i[0]+j-1] += (i[1] + i[2])
fwd_alignment_y_axis_lower[i[0]+j-1] += (i[1] - i[2])
for i in sorted_rvs_alignment:
for j in range(nt):
revs_alignment_y_axis_upper[i[0]+j-1] += (i[1] + i[2])
revs_alignment_y_axis_lower[i[0]+j-1] += (i[1] - i[2])
return reference_x_axis, fwd_alignment_y_axis_upper, fwd_alignment_y_axis_lower, \
revs_alignment_y_axis_upper, revs_alignment_y_axis_lower
def _bin_aligned_reads(fwd_rvs_align_list, ref_len, nt):
"""
Use instead of fill_in_zeros_se for long references (i.e. chromosomes)
:param fwd_rvs_align_list: fwd_rvs_align_list
:param ref_len: length of reference
:param nt: read length aligned
:return: empty ref list of 0s and bin list
"""
bin_list=[10000*[0],10000*[0],10000*[0],10000*[0]]
bin_size = ref_len / 10000
align_count=0
for sorted_alignment in range(2):
for direction in fwd_rvs_align_list[sorted_alignment]:
bin_number=int(direction[0]/bin_size)
bin_list[align_count][bin_number]+=(direction[1] + direction[2])
bin_list[align_count+1][bin_number]+=(direction[1] - direction[2])
align_count = 2
reference_x_axis = list(range(0, 10000))
return [reference_x_axis,]+bin_list
def _smooth_all_plot_data(graph_processed_list, win):
"""
Smooth all plot data
:param graph_processed_list: list of graph_processed
:param win: window size
:return: smoother for plot list
"""
smoothed_for_plot_list = []
for graph_processed in graph_processed_list:
single_nt_size_tuple=()
for direction_se in [1,2,3,4]:
single_nt_size_tuple+=(_smooth(numpy.array(graph_processed[direction_se]), win,
window='blackman'),)
smoothed_for_plot_list.append(single_nt_size_tuple)
return smoothed_for_plot_list
def _smooth(x, window_len, window='hamming'):
"""
Smoothing function from scipy cookbook
:param x: list of vals to smooth
:param window_len: window length
:param window: type of smoothing window
:return: list of smoothed vals
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 6:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = numpy.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]
if window == 'flat': # moving average
w = numpy.ones(window_len, 'd')
else:
w = eval('numpy.' + window + '(window_len)')
y = numpy.convolve(w / w.sum(), s, mode='valid')
return y[int(window_len / 2 - 1):-int(window_len / 2)]
def _plot_profile_plot(nt_list, x_ref, smoothed_for_plot_tuple, header, plot_y_lim, pub, save_plot, plot_name, win):
"""
Plot profile plot
:param nt_list: list of read lengths to plot
:param x_ref: x axis reference
:param smoothed_for_plot_tuple: smoothed for plot tuple
:param header: header
:param plot_y_lim: y limits
:param pub: bool to remove axes and legends
:param save_plot: bool to save plot to file
:param plot_name: plot name
:param win: smoothing windows
"""
fig = plt.figure(figsize=(10, 5))
nt_pos = 0
for smoothed_for_plot in smoothed_for_plot_tuple:
plt.plot(x_ref, smoothed_for_plot[0], color=_nt_colour(int(nt_list[nt_pos])), label='{0} nt'.format(nt_list[
nt_pos]),
lw=1, alpha=0.2)
plt.plot(x_ref, smoothed_for_plot[1], color=_nt_colour(int(nt_list[nt_pos])), lw=1, alpha=0.2)
plt.fill_between(x_ref, smoothed_for_plot[0], smoothed_for_plot[1], color=_nt_colour(int(nt_list[nt_pos])),
alpha=0.5)
plt.plot(x_ref, smoothed_for_plot[2], color=_nt_colour(int(nt_list[nt_pos])), lw=1, alpha=0.2)
plt.plot(x_ref, smoothed_for_plot[3], color=_nt_colour(int(nt_list[nt_pos])), lw=1, alpha=0.2)
plt.fill_between(x_ref, smoothed_for_plot[2], smoothed_for_plot[3], color=_nt_colour(int(nt_list[nt_pos])),
alpha=0.5)
nt_pos += 1
axhline(y=0)
if pub:
_pub_plot()
else:
xlabel(header)
if win != 1:
ylabel('Coverage (smoothed RPMR; win = {})'.format(win))
else:
ylabel('Coverage (RPMR)')
plt.legend(loc='best', fancybox=True, framealpha=0.5)
if plot_y_lim != 0:
ylim(-plot_y_lim, plot_y_lim)
if save_plot:
plt.savefig('{0}.png'.format(plot_name), dpi=300)
plt.show()
def _pub_plot():
"""
Remove axis, labels, legend from plot
"""
plt.tick_params(
axis='both', # changes apply to the x-axis
direction='in',
which='both', # both major and minor ticks are affected
bottom=True, # ticks along the bottom edge are off
top=True,
right=True,
left=True, # ticks along the top edge are off
labelbottom=False,
labelleft=False,
labelright=False,
labelsize=15) # labels along the bottom edge are off
_clear_frame()
def _save_file_name(in_files, header):
"""
Construct save file name
:param in_files:
:param header:
:return:
"""
out_file_name = in_files + "_"
for i in header:
if len(out_file_name) > 100:
break
else:
if i == " " or not i.isalnum():
out_file_name += "_"
else:
out_file_name += i
return out_file_name
def _clear_frame(ax=None):
"""
Removes frame for publishing plots
"""
if ax is None:
ax = plt.gca()
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(True)
for spine in ax.spines.values():
spine.set_visible(False)
def _nt_colour(nt):
"""
Set default colours for 21, 22 and 24 nt sRNAs
:param nt: aligned read length (int)
:return: colour code (str)
"""
hex_dict = {18: '#669999', 19: '#33cccc', 20: '#33cccc', 21: '#00CC00',
22: '#FF3399', 23: '#d8d408', 24: '#3333FF', 25: '#cccc00',
26: '#660033', 27: '#996600', 28: '#336699', 29: '#ff6600',
30: '#ff99ff', 31: '#669900', 32: '#993333', "mir": '#ff7b00'}
if nt not in hex_dict:
return "black"
else:
return hex_dict[nt]
| mit |
sudheesh001/oh-mainline | vendor/packages/twisted/twisted/web/error.py | 18 | 12534 | # -*- test-case-name: twisted.web.test.test_error -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Exception definitions for L{twisted.web}.
"""
import operator, warnings
from twisted.web import http
class Error(Exception):
"""
A basic HTTP error.
@type status: C{str}
@ivar status: Refers to an HTTP status code, for example L{http.NOT_FOUND}.
@type message: C{str}
@param message: A short error message, for example "NOT FOUND".
@type response: C{str}
@ivar response: A complete HTML document for an error page.
"""
def __init__(self, code, message=None, response=None):
"""
Initializes a basic exception.
@type code: C{str}
@param code: Refers to an HTTP status code, for example
L{http.NOT_FOUND}. If no C{message} is given, C{code} is mapped to a
descriptive string that is used instead.
@type message: C{str}
@param message: A short error message, for example "NOT FOUND".
@type response: C{str}
@param response: A complete HTML document for an error page.
"""
if not message:
try:
message = http.responses.get(int(code))
except ValueError:
# If code wasn't a stringified int, can't map the
# status code to a descriptive string so keep message
# unchanged.
pass
Exception.__init__(self, code, message, response)
self.status = code
self.message = message
self.response = response
def __str__(self):
return '%s %s' % (self[0], self[1])
class PageRedirect(Error):
"""
A request resulted in an HTTP redirect.
@type location: C{str}
@ivar location: The location of the redirect which was not followed.
"""
def __init__(self, code, message=None, response=None, location=None):
"""
Initializes a page redirect exception.
@type code: C{str}
@param code: Refers to an HTTP status code, for example
L{http.NOT_FOUND}. If no C{message} is given, C{code} is mapped to a
descriptive string that is used instead.
@type message: C{str}
@param message: A short error message, for example "NOT FOUND".
@type response: C{str}
@param response: A complete HTML document for an error page.
@type location: C{str}
@param location: The location response-header field value. It is an
absolute URI used to redirect the receiver to a location other than
the Request-URI so the request can be completed.
"""
if not message:
try:
message = http.responses.get(int(code))
except ValueError:
# If code wasn't a stringified int, can't map the
# status code to a descriptive string so keep message
# unchanged.
pass
if location and message:
message = "%s to %s" % (message, location)
Error.__init__(self, code, message, response)
self.location = location
class InfiniteRedirection(Error):
"""
HTTP redirection is occurring endlessly.
@type location: C{str}
@ivar location: The first URL in the series of redirections which was
not followed.
"""
def __init__(self, code, message=None, response=None, location=None):
"""
Initializes an infinite redirection exception.
@type code: C{str}
@param code: Refers to an HTTP status code, for example
L{http.NOT_FOUND}. If no C{message} is given, C{code} is mapped to a
descriptive string that is used instead.
@type message: C{str}
@param message: A short error message, for example "NOT FOUND".
@type response: C{str}
@param response: A complete HTML document for an error page.
@type location: C{str}
@param location: The location response-header field value. It is an
absolute URI used to redirect the receiver to a location other than
the Request-URI so the request can be completed.
"""
if not message:
try:
message = http.responses.get(int(code))
except ValueError:
# If code wasn't a stringified int, can't map the
# status code to a descriptive string so keep message
# unchanged.
pass
if location and message:
message = "%s to %s" % (message, location)
Error.__init__(self, code, message, response)
self.location = location
class UnsupportedMethod(Exception):
"""
Raised by a resource when faced with a strange request method.
RFC 2616 (HTTP 1.1) gives us two choices when faced with this situtation:
If the type of request is known to us, but not allowed for the requested
resource, respond with NOT_ALLOWED. Otherwise, if the request is something
we don't know how to deal with in any case, respond with NOT_IMPLEMENTED.
When this exception is raised by a Resource's render method, the server
will make the appropriate response.
This exception's first argument MUST be a sequence of the methods the
resource *does* support.
"""
allowedMethods = ()
def __init__(self, allowedMethods, *args):
Exception.__init__(self, allowedMethods, *args)
self.allowedMethods = allowedMethods
if not operator.isSequenceType(allowedMethods):
why = "but my first argument is not a sequence."
s = ("First argument must be a sequence of"
" supported methods, %s" % (why,))
raise TypeError, s
class SchemeNotSupported(Exception):
"""
The scheme of a URI was not one of the supported values.
"""
from twisted.web import resource as _resource
class ErrorPage(_resource.ErrorPage):
"""
Deprecated alias for L{twisted.web.resource.ErrorPage}.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"twisted.web.error.ErrorPage is deprecated since Twisted 9.0. "
"See twisted.web.resource.ErrorPage.", DeprecationWarning,
stacklevel=2)
_resource.ErrorPage.__init__(self, *args, **kwargs)
class NoResource(_resource.NoResource):
"""
Deprecated alias for L{twisted.web.resource.NoResource}.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"twisted.web.error.NoResource is deprecated since Twisted 9.0. "
"See twisted.web.resource.NoResource.", DeprecationWarning,
stacklevel=2)
_resource.NoResource.__init__(self, *args, **kwargs)
class ForbiddenResource(_resource.ForbiddenResource):
"""
Deprecated alias for L{twisted.web.resource.ForbiddenResource}.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"twisted.web.error.ForbiddenResource is deprecated since Twisted "
"9.0. See twisted.web.resource.ForbiddenResource.",
DeprecationWarning, stacklevel=2)
_resource.ForbiddenResource.__init__(self, *args, **kwargs)
class RenderError(Exception):
"""
Base exception class for all errors which can occur during template
rendering.
"""
class MissingRenderMethod(RenderError):
"""
Tried to use a render method which does not exist.
@ivar element: The element which did not have the render method.
@ivar renderName: The name of the renderer which could not be found.
"""
def __init__(self, element, renderName):
RenderError.__init__(self, element, renderName)
self.element = element
self.renderName = renderName
def __repr__(self):
return '%r: %r had no render method named %r' % (
self.__class__.__name__, self.element, self.renderName)
class MissingTemplateLoader(RenderError):
"""
L{MissingTemplateLoader} is raised when trying to render an Element without
a template loader, i.e. a C{loader} attribute.
@ivar element: The Element which did not have a document factory.
"""
def __init__(self, element):
RenderError.__init__(self, element)
self.element = element
def __repr__(self):
return '%r: %r had no loader' % (self.__class__.__name__,
self.element)
class UnexposedMethodError(Exception):
"""
Raised on any attempt to get a method which has not been exposed.
"""
class UnfilledSlot(Exception):
"""
During flattening, a slot with no associated data was encountered.
"""
class UnsupportedType(Exception):
"""
During flattening, an object of a type which cannot be flattened was
encountered.
"""
class FlattenerError(Exception):
"""
An error occurred while flattening an object.
@ivar _roots: A list of the objects on the flattener's stack at the time
the unflattenable object was encountered. The first element is least
deeply nested object and the last element is the most deeply nested.
"""
def __init__(self, exception, roots, traceback):
self._exception = exception
self._roots = roots
self._traceback = traceback
Exception.__init__(self, exception, roots, traceback)
def _formatRoot(self, obj):
"""
Convert an object from C{self._roots} to a string suitable for
inclusion in a render-traceback (like a normal Python traceback, but
can include "frame" source locations which are not in Python source
files).
@param obj: Any object which can be a render step I{root}.
Typically, L{Tag}s, strings, and other simple Python types.
@return: A string representation of C{obj}.
@rtype: L{str}
"""
# There's a circular dependency between this class and 'Tag', although
# only for an isinstance() check.
from twisted.web.template import Tag
if isinstance(obj, (str, unicode)):
# It's somewhat unlikely that there will ever be a str in the roots
# list. However, something like a MemoryError during a str.replace
# call (eg, replacing " with ") could possibly cause this.
# Likewise, UTF-8 encoding a unicode string to a byte string might
# fail like this.
if len(obj) > 40:
if isinstance(obj, str):
prefix = 1
else:
prefix = 2
return repr(obj[:20])[:-1] + '<...>' + repr(obj[-20:])[prefix:]
else:
return repr(obj)
elif isinstance(obj, Tag):
if obj.filename is None:
return 'Tag <' + obj.tagName + '>'
else:
return "File \"%s\", line %d, column %d, in \"%s\"" % (
obj.filename, obj.lineNumber,
obj.columnNumber, obj.tagName)
else:
return repr(obj)
def __repr__(self):
"""
Present a string representation which includes a template traceback, so
we can tell where this error occurred in the template, as well as in
Python.
"""
# Avoid importing things unnecessarily until we actually need them;
# since this is an 'error' module we should be extra paranoid about
# that.
from traceback import format_list
if self._roots:
roots = ' ' + '\n '.join([
self._formatRoot(r) for r in self._roots]) + '\n'
else:
roots = ''
if self._traceback:
traceback = '\n'.join([
line
for entry in format_list(self._traceback)
for line in entry.splitlines()]) + '\n'
else:
traceback = ''
return (
'Exception while flattening:\n' +
roots + traceback +
self._exception.__class__.__name__ + ': ' +
str(self._exception) + '\n')
def __str__(self):
return repr(self)
__all__ = [
'Error', 'PageRedirect', 'InfiniteRedirection', 'ErrorPage', 'NoResource',
'ForbiddenResource', 'RenderError', 'MissingRenderMethod',
'MissingTemplateLoader', 'UnexposedMethodError', 'UnfilledSlot',
'UnsupportedType', 'FlattenerError'
]
| agpl-3.0 |
saukrIppl/seahub | thirdpart/openpyxl-2.3.0-py2.7.egg/openpyxl/descriptors/serialisable.py | 4 | 4329 | from __future__ import absolute_import
# copyright openpyxl 2010-2015
from keyword import kwlist
KEYWORDS = frozenset(kwlist)
from . import _Serialiasable
from .sequence import Sequence, NestedSequence
from .namespace import namespaced
from openpyxl.compat import safe_string
from openpyxl.xml.functions import (
Element,
localname,
)
seq_types = (list, tuple)
class Serialisable(_Serialiasable):
"""
Objects can serialise to XML their attributes and child objects.
The following class attributes are created by the metaclass at runtime:
__attrs__ = attributes
__nested__ = single-valued child treated as an attribute
__elements__ = child elements
"""
__attrs__ = None
__nested__ = None
__elements__ = None
__namespaced__ = None
idx_base = 0
@property
def tagname(self):
raise(NotImplementedError)
namespace = None
@classmethod
def from_tree(cls, node):
"""
Create object from XML
"""
attrib = dict(node.attrib)
for key, ns in cls.__namespaced__:
if ns in attrib:
attrib[key] = attrib[ns]
del attrib[ns]
for el in node:
tag = localname(el)
if tag in KEYWORDS:
tag = "_" + tag
desc = getattr(cls, tag, None)
if desc is None or isinstance(desc, property):
continue
if hasattr(desc, 'from_tree'):
#descriptor manages conversion
obj = desc.from_tree(el)
else:
if hasattr(desc.expected_type, "from_tree"):
#complex type
obj = desc.expected_type.from_tree(el)
else:
#primitive
obj = el.text
if isinstance(desc, NestedSequence):
attrib[tag] = obj
elif isinstance(desc, Sequence):
attrib.setdefault(tag, [])
attrib[tag].append(obj)
else:
attrib[tag] = obj
return cls(**attrib)
def to_tree(self, tagname=None, idx=None, namespace=None):
if tagname is None:
tagname = self.tagname
# keywords have to be masked
if tagname.startswith("_"):
tagname = tagname[1:]
tagname = namespaced(self, tagname, namespace)
namespace = getattr(self, "namespace", namespace)
attrs = dict(self)
for key, ns in self.__namespaced__:
if key in attrs:
attrs[ns] = attrs[key]
del attrs[key]
el = Element(tagname, attrs)
for child_tag in self.__elements__:
desc = getattr(self.__class__, child_tag, None)
obj = getattr(self, child_tag)
if isinstance(obj, seq_types):
if isinstance(desc, NestedSequence):
# wrap sequence in container
if not obj:
continue
nodes = [desc.to_tree(child_tag, obj, namespace)]
elif isinstance(desc, Sequence):
# sequence
desc.idx_base = self.idx_base
nodes = (desc.to_tree(child_tag, obj, namespace))
else: # property
nodes = (v.to_tree(child_tag, namespace) for v in obj)
for node in nodes:
el.append(node)
else:
if child_tag in self.__nested__:
node = desc.to_tree(child_tag, obj, namespace)
elif obj is None:
continue
else:
node = obj.to_tree(child_tag)
if node is not None:
el.append(node)
return el
def __iter__(self):
for attr in self.__attrs__:
value = getattr(self, attr)
if value is not None:
yield attr, safe_string(value)
def __eq__(self, other):
if not dict(self) == dict(other):
return False
for el in self.__elements__:
if getattr(self, el) != getattr(other, el):
return False
return True
def __ne__(self, other):
return not self == other
| apache-2.0 |
yingding/emotionGame | Ros/catkin_ws/build/catkin_generated/installspace/_setup_util.py | 3 | 12407 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/sebtut/catkin_ws/devel;/opt/ros/indigo'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| mit |
jayofdoom/cloud-init-debian-pkg | cloudinit/cs_utils.py | 5 | 3321 | # vi: ts=4 expandtab
#
# Copyright (C) 2014 CloudSigma
#
# Author: Kiril Vladimiroff <kiril.vladimiroff@cloudsigma.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
cepko implements easy-to-use communication with CloudSigma's VMs through
a virtual serial port without bothering with formatting the messages
properly nor parsing the output with the specific and sometimes
confusing shell tools for that purpose.
Having the server definition accessible by the VM can ve useful in various
ways. For example it is possible to easily determine from within the VM,
which network interfaces are connected to public and which to private network.
Another use is to pass some data to initial VM setup scripts, like setting the
hostname to the VM name or passing ssh public keys through server meta.
For more information take a look at the Server Context section of CloudSigma
API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
"""
import json
import platform
import serial
SERIAL_PORT = '/dev/ttyS1'
if platform.system() == 'Windows':
SERIAL_PORT = 'COM2'
class Cepko(object):
"""
One instance of that object could be use for one or more
queries to the serial port.
"""
request_pattern = "<\n{}\n>"
def get(self, key="", request_pattern=None):
if request_pattern is None:
request_pattern = self.request_pattern
return CepkoResult(request_pattern.format(key))
def all(self):
return self.get()
def meta(self, key=""):
request_pattern = self.request_pattern.format("/meta/{}")
return self.get(key, request_pattern)
def global_context(self, key=""):
request_pattern = self.request_pattern.format("/global_context/{}")
return self.get(key, request_pattern)
class CepkoResult(object):
"""
CepkoResult executes the request to the virtual serial port as soon
as the instance is initialized and stores the result in both raw and
marshalled format.
"""
def __init__(self, request):
self.request = request
self.raw_result = self._execute()
self.result = self._marshal(self.raw_result)
def _execute(self):
connection = serial.Serial(SERIAL_PORT)
connection.write(self.request)
return connection.readline().strip('\x04\n')
def _marshal(self, raw_result):
try:
return json.loads(raw_result)
except ValueError:
return raw_result
def __len__(self):
return self.result.__len__()
def __getitem__(self, key):
return self.result.__getitem__(key)
def __contains__(self, item):
return self.result.__contains__(item)
def __iter__(self):
return self.result.__iter__()
| gpl-3.0 |
Novasoft-India/OperERP-AM-Motors | openerp/addons/account/wizard/account_report_general_ledger.py | 56 | 3202 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_report_general_ledger(osv.osv_memory):
_inherit = "account.common.account.report"
_name = "account.report.general.ledger"
_description = "General Ledger Report"
_columns = {
'landscape': fields.boolean("Landscape Mode"),
'initial_balance': fields.boolean('Include Initial Balances',
help='If you selected to filter by date or period, this field allow you to add a row to display the amount of debit/credit/balance that precedes the filter you\'ve set.'),
'amount_currency': fields.boolean("With Currency", help="It adds the currency column on report if the currency differs from the company currency."),
'sortby': fields.selection([('sort_date', 'Date'), ('sort_journal_partner', 'Journal & Partner')], 'Sort by', required=True),
'journal_ids': fields.many2many('account.journal', 'account_report_general_ledger_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'landscape': True,
'amount_currency': True,
'sortby': 'sort_date',
'initial_balance': False,
}
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear=False, context=None):
res = {}
if not fiscalyear:
res['value'] = {'initial_balance': False}
return res
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['landscape', 'initial_balance', 'amount_currency', 'sortby'])[0])
if not data['form']['fiscalyear_id']:# GTK client problem onchange does not consider in save record
data['form'].update({'initial_balance': False})
if data['form']['landscape']:
return { 'type': 'ir.actions.report.xml', 'report_name': 'account.general.ledger_landscape', 'datas': data}
return { 'type': 'ir.actions.report.xml', 'report_name': 'account.general.ledger', 'datas': data}
account_report_general_ledger()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
licongyu95/learning_python | mysite/mysite/apps/books/models.py | 1 | 1167 | # coding=utf-8
from django.db import models
# Create your models here.
class Publisher(models.Model):
name = models.CharField(max_length=30)
address = models.CharField(max_length=50)
city = models.CharField(max_length=60)
state_province = models.CharField(max_length=30)
country = models.CharField(max_length=50)
website = models.URLField()
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
class Author(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=40)
#email = models.EmailField('e-mail',blank=True)
email = models.EmailField(verbose_name='e-mail',blank=True)
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
class Meta:
ordering = ['first_name']
class Book(models.Model):
title = models.CharField(max_length=100)
authors = models.ManyToManyField(Author)
publisher = models.ForeignKey(Publisher)
publication_date =models.DateField()
def __unicode__(self):
return self.title
class Meta:
ordering = ['title']
| unlicense |
cloudera/hue | desktop/core/ext-py/openpyxl-2.6.4/openpyxl/descriptors/serialisable.py | 2 | 7386 | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
from copy import copy
from keyword import kwlist
KEYWORDS = frozenset(kwlist)
from . import Descriptor
from . import _Serialiasable
from .sequence import (
Sequence,
NestedSequence,
MultiSequencePart,
)
from .namespace import namespaced
from openpyxl.compat import safe_string
from openpyxl.xml.functions import (
Element,
localname,
)
seq_types = (list, tuple)
class Serialisable(_Serialiasable):
"""
Objects can serialise to XML their attributes and child objects.
The following class attributes are created by the metaclass at runtime:
__attrs__ = attributes
__nested__ = single-valued child treated as an attribute
__elements__ = child elements
"""
__attrs__ = None
__nested__ = None
__elements__ = None
__namespaced__ = None
idx_base = 0
@property
def tagname(self):
raise(NotImplementedError)
namespace = None
@classmethod
def from_tree(cls, node):
"""
Create object from XML
"""
# strip known namespaces from attributes
attrib = dict(node.attrib)
for key, ns in cls.__namespaced__:
if ns in attrib:
attrib[key] = attrib[ns]
del attrib[ns]
# strip attributes with unknown namespaces
for key in list(attrib):
if key.startswith('{'):
del attrib[key]
elif key in KEYWORDS:
attrib["_" + key] = attrib[key]
del attrib[key]
elif "-" in key:
n = key.replace("-", "_")
attrib[n] = attrib[key]
del attrib[key]
if node.text and "attr_text" in cls.__attrs__:
attrib["attr_text"] = node.text
for el in node:
tag = localname(el)
if tag in KEYWORDS:
tag = "_" + tag
desc = getattr(cls, tag, None)
if desc is None or isinstance(desc, property):
continue
if hasattr(desc, 'from_tree'):
#descriptor manages conversion
obj = desc.from_tree(el)
else:
if hasattr(desc.expected_type, "from_tree"):
#complex type
obj = desc.expected_type.from_tree(el)
else:
#primitive
obj = el.text
if isinstance(desc, NestedSequence):
attrib[tag] = obj
elif isinstance(desc, Sequence):
attrib.setdefault(tag, [])
attrib[tag].append(obj)
elif isinstance(desc, MultiSequencePart):
attrib.setdefault(desc.store, [])
attrib[desc.store].append(obj)
else:
attrib[tag] = obj
return cls(**attrib)
def to_tree(self, tagname=None, idx=None, namespace=None):
if tagname is None:
tagname = self.tagname
# keywords have to be masked
if tagname.startswith("_"):
tagname = tagname[1:]
tagname = namespaced(self, tagname, namespace)
namespace = getattr(self, "namespace", namespace)
attrs = dict(self)
for key, ns in self.__namespaced__:
if key in attrs:
attrs[ns] = attrs[key]
del attrs[key]
el = Element(tagname, attrs)
if "attr_text" in self.__attrs__:
el.text = safe_string(getattr(self, "attr_text"))
for child_tag in self.__elements__:
desc = getattr(self.__class__, child_tag, None)
obj = getattr(self, child_tag)
if hasattr(desc, "namespace") and hasattr(obj, 'namespace'):
obj.namespace = desc.namespace
if isinstance(obj, seq_types):
if isinstance(desc, NestedSequence):
# wrap sequence in container
if not obj:
continue
nodes = [desc.to_tree(child_tag, obj, namespace)]
elif isinstance(desc, Sequence):
# sequence
desc.idx_base = self.idx_base
nodes = (desc.to_tree(child_tag, obj, namespace))
else: # property
nodes = (v.to_tree(child_tag, namespace) for v in obj)
for node in nodes:
el.append(node)
else:
if child_tag in self.__nested__:
node = desc.to_tree(child_tag, obj, namespace)
elif obj is None:
continue
else:
node = obj.to_tree(child_tag)
if node is not None:
el.append(node)
return el
def __iter__(self):
for attr in self.__attrs__:
value = getattr(self, attr)
if attr.startswith("_"):
attr = attr[1:]
elif attr != "attr_text" and "_" in attr:
desc = getattr(self.__class__, attr)
if getattr(desc, "hyphenated", False):
attr = attr.replace("_", "-")
if attr != "attr_text" and value is not None:
yield attr, safe_string(value)
def __eq__(self, other):
if not self.__class__ == other.__class__:
return False
elif not dict(self) == dict(other):
return False
for el in self.__elements__:
if getattr(self, el) != getattr(other, el):
return False
return True
def __ne__(self, other):
return not self == other
def __repr__(self):
s = u"<{0}.{1} object>\nParameters:".format(
self.__module__,
self.__class__.__name__
)
args = []
for k in self.__attrs__ + self.__elements__:
v = getattr(self, k)
if isinstance(v, Descriptor):
v = None
args.append(u"{0}={1}".format(k, repr(v)))
args = u", ".join(args)
return u"\n".join([s, args])
def __hash__(self):
fields = []
for attr in self.__attrs__ + self.__elements__:
val = getattr(self, attr)
if isinstance(val, list):
val = tuple(val)
fields.append(val)
return hash(tuple(fields))
def __add__(self, other):
if type(self) != type(other):
raise TypeError("Cannot combine instances of different types")
vals = {}
for attr in self.__attrs__:
vals[attr] = getattr(self, attr) or getattr(other, attr)
for el in self.__elements__:
a = getattr(self, el)
b = getattr(other, el)
if a and b:
vals[el] = a + b
else:
vals[el] = a or b
return self.__class__(**vals)
def __copy__(self):
# serialise to xml and back to avoid shallow copies
xml = self.to_tree(tagname="dummy")
cp = self.__class__.from_tree(xml)
# copy any non-persisted attributed
for k in self.__dict__:
if k not in self.__attrs__ + self.__elements__:
v = copy(getattr(self, k))
setattr(cp, k, v)
return cp
| apache-2.0 |
unreal666/outwiker | plugins/datagraph/datagraph/defines.py | 3 | 2042 | # -*- coding: UTF-8 -*-
"""Default values of the parameters"""
GRAPH_WIDTH_NAME = u'width'
GRAPH_WIDTH = u'700'
GRAPH_HEIGHT_NAME = u'height'
GRAPH_HEIGHT = u'300'
GRAPH_XAXIS_NAME = u'x'
GRAPH_YAXIS_NAME = u'y'
GRAPH_TITLE_NAME = u'title'
GRAPH_TOOLTIP_NAME = u'tooltip'
GRAPH_LEGEND_NAME = u'legend'
# Sequence of the default colors
CURVE_COLOR_NAME = u'color'
CURVE_COLORS = [u'#0051FF', u'#FF0000', u'#19D400', u'#000000',
u'#FF8214', u'#B700FF', u'#1E9E19', u'#9C571F',
u'#8C8741']
CURVE_SYMBOLS = [u'circle',
u'square',
u'diamond',
u'triangle',
u'triangle-down']
CURVE_STYLES = [u'solid', u'longdash', u'shortdash', u'shortdot',
u'shortdashdot', u'shortdashdotdot', u'dot', u'dash',
u'dashdot', u'longdashdot', u'longdashdotdot']
CURVE_STYLE_NAME = u'style'
CURVE_STYLE_AUTO = u'auto'
# Default curve thickness
CURVE_WIDTH_NAME = u'width'
CURVE_WIDTH = u'3'
# Numbers of the columns in data
CURVE_YCOL_NUMBER_NAME = u'ycol'
CURVE_YCOL_NUMBER = None
CURVE_XCOL_NUMBER_NAME = u'xcol'
CURVE_XCOL_NUMBER = None
# X coordinates are the row number
CURVE_XCOL_NUMBER_VALUE = u'number'
# Data source
CURVE_DATA_NAME = u'data'
CURVE_DATA_OBJECT_NAME = u'data'
# If CURVE_DATA is None, data reads from command content
# else CURVE_DATA is name of the Attachment
CURVE_DATA = None
CURVE_TITLE_NAME = u'title'
CURVE_HIDE_NAME = u'hide'
DATA_COLUMNS_SEPARATOR_NAME = u'colsep'
DATA_COLUMNS_SEPARATOR_DEFAULT = r'\s+'
# For selection render engine (at the time is not used)
RENDER_NAME = u'render'
RENDER_HIGHCHARTS = u'highcharts'
# Axis properties
AXIS_TITLE_NAME = u'title'
AXIS_MIN_NAME = u'min'
AXIS_MAX_NAME = u'max'
# Axis types
AXIS_TYPE_NAME = u'type'
AXIS_TYPE_DATE = u'datetime'
# Data properties
DATA_FORMAT_COL = u'formatcol'
DATA_SKIP_ROWS_NAME = u'skiprows'
AXIS_MAJOR_TICK_INTERVAL_NAME = u'tickstep'
TOOLBAR_DATAGRAPH = 'Plugin_DataGraph'
MENU_DATAGRAPH = 'Plugin_DataGraph'
| gpl-3.0 |
CredoReference/edx-platform | openedx/core/djangoapps/bookmarks/tests/test_tasks.py | 9 | 7284 | """
Tests for tasks.
"""
import ddt
from nose.plugins.attrib import attr
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import check_mongo_calls, ItemFactory
from ..models import XBlockCache
from ..tasks import _calculate_course_xblocks_data, _update_xblocks_cache
from .test_models import BookmarksTestsBase
@attr(shard=9)
@ddt.ddt
class XBlockCacheTaskTests(BookmarksTestsBase):
"""
Test the XBlockCache model.
"""
def setUp(self):
super(XBlockCacheTaskTests, self).setUp()
self.course_expected_cache_data = {
self.course.location: [
[],
], self.chapter_1.location: [
[
self.course.location,
],
], self.chapter_2.location: [
[
self.course.location,
],
], self.sequential_1.location: [
[
self.course.location,
self.chapter_1.location,
],
], self.sequential_2.location: [
[
self.course.location,
self.chapter_1.location,
],
], self.vertical_1.location: [
[
self.course.location,
self.chapter_1.location,
self.sequential_1.location,
],
], self.vertical_2.location: [
[
self.course.location,
self.chapter_1.location,
self.sequential_2.location,
],
], self.vertical_3.location: [
[
self.course.location,
self.chapter_1.location,
self.sequential_2.location,
],
],
}
self.other_course_expected_cache_data = { # pylint: disable=invalid-name
self.other_course.location: [
[],
], self.other_chapter_1.location: [
[
self.other_course.location,
],
], self.other_sequential_1.location: [
[
self.other_course.location,
self.other_chapter_1.location,
],
], self.other_sequential_2.location: [
[
self.other_course.location,
self.other_chapter_1.location,
],
], self.other_vertical_1.location: [
[
self.other_course.location,
self.other_chapter_1.location,
self.other_sequential_1.location,
],
[
self.other_course.location,
self.other_chapter_1.location,
self.other_sequential_2.location,
]
], self.other_vertical_2.location: [
[
self.other_course.location,
self.other_chapter_1.location,
self.other_sequential_1.location,
],
],
}
@ddt.data(
(ModuleStoreEnum.Type.mongo, 2, 2, 4),
(ModuleStoreEnum.Type.mongo, 4, 2, 4),
(ModuleStoreEnum.Type.mongo, 2, 3, 5),
(ModuleStoreEnum.Type.mongo, 4, 3, 5),
(ModuleStoreEnum.Type.mongo, 2, 4, 6),
# (ModuleStoreEnum.Type.mongo, 4, 4, 6), Too slow.
(ModuleStoreEnum.Type.split, 2, 2, 3),
(ModuleStoreEnum.Type.split, 4, 2, 3),
(ModuleStoreEnum.Type.split, 2, 3, 3),
(ModuleStoreEnum.Type.split, 2, 4, 3),
)
@ddt.unpack
def test_calculate_course_xblocks_data_queries(self, store_type, children_per_block, depth, expected_mongo_calls):
course = self.create_course_with_blocks(children_per_block, depth, store_type)
# clear cache to get consistent query counts
self.clear_caches()
with check_mongo_calls(expected_mongo_calls):
blocks_data = _calculate_course_xblocks_data(course.id)
self.assertGreater(len(blocks_data), children_per_block ** depth)
@ddt.data(
('course',),
('other_course',)
)
@ddt.unpack
def test_calculate_course_xblocks_data(self, course_attr):
"""
Test that the xblocks data is calculated correctly.
"""
course = getattr(self, course_attr)
blocks_data = _calculate_course_xblocks_data(course.id)
expected_cache_data = getattr(self, course_attr + '_expected_cache_data')
for usage_key, __ in expected_cache_data.items():
for path_index, path in enumerate(blocks_data[unicode(usage_key)]['paths']):
for path_item_index, path_item in enumerate(path):
self.assertEqual(
path_item['usage_key'], expected_cache_data[usage_key][path_index][path_item_index]
)
@ddt.data(
('course', 47),
('other_course', 34)
)
@ddt.unpack
def test_update_xblocks_cache(self, course_attr, expected_sql_queries):
"""
Test that the xblocks data is persisted correctly.
"""
course = getattr(self, course_attr)
with self.assertNumQueries(expected_sql_queries):
_update_xblocks_cache(course.id)
expected_cache_data = getattr(self, course_attr + '_expected_cache_data')
for usage_key, __ in expected_cache_data.items():
xblock_cache = XBlockCache.objects.get(usage_key=usage_key)
for path_index, path in enumerate(xblock_cache.paths):
for path_item_index, path_item in enumerate(path):
self.assertEqual(
path_item.usage_key, expected_cache_data[usage_key][path_index][path_item_index + 1]
)
with self.assertNumQueries(3):
_update_xblocks_cache(course.id)
def test_update_xblocks_cache_with_display_name_none(self):
"""
Test that the xblocks data is persisted correctly with display_name=None.
"""
block_with_display_name_none = ItemFactory.create(
parent_location=self.sequential_2.location,
category='vertical', display_name=None
)
_update_xblocks_cache(self.course.id)
self.course_expected_cache_data.update(
{
block_with_display_name_none.location: [
[
self.course.location,
self.chapter_1.location,
self.sequential_2.location,
]
]
}
)
for usage_key, __ in self.course_expected_cache_data.items():
xblock_cache = XBlockCache.objects.get(usage_key=usage_key)
for path_index, path in enumerate(xblock_cache.paths):
for path_item_index, path_item in enumerate(path):
self.assertEqual(
path_item.usage_key,
self.course_expected_cache_data[usage_key][path_index][path_item_index + 1]
)
| agpl-3.0 |
rande/python-element | element/standalone/skeleton/proxy.py | 1 | 2620 | #
# Copyright 2014 Thomas Rabaix <thomas.rabaix@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script initialize a small reverse proxy with a basic support for
# rendering esi:include tag.
import sys, os, tornado, argparse, logging
from element.proxy import ProxyState, FilesWatcher, ProxyTCPServer
logging.basicConfig(format="[%(asctime)-15s] proxy.%(levelname)s: %(message)s")
gen_log = logging.getLogger("tornado.general")
gen_log.setLevel(logging.INFO)
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--port', '-p', help="Define the main listening port", default=5000)
parser.add_argument('--subport', '-sp', help="Define the python element port", default=5001)
parser.add_argument('--bind', '-b', help="Define the domain to bind to", default='localhost')
options, argv = parser.parse_known_args(sys.argv)
child_process = [
'%s' % (sys.executable), 'start.py', 'tornado:start',
# Debug parameters
'--verbose', '-d',
# Start only one child, otherwise the Suprocess module will not be able to
# properly kill sub children process
# There is no need to have more than once ...
'-np', '1',
# The subprocess will listen to port 5001, and the master to 5001
'-p', str(options.subport),
# The bind parameter is used to define the host used to render absolute urls
'--bind', '%s:%d' % (options.bind, options.port)
]
print("Starting HTTP proxy on port %d" % options.port)
print(" > %s" % " ".join(child_process))
state = ProxyState(child_process)
server = ProxyTCPServer(options.subport)
server.bind(options.port)
# this proxy should not be used in production, so keep it simple with only one process
server.start(1)
w = FilesWatcher([
os.path.dirname(os.path.abspath(__file__)),
# '/home/vagrant/python/element'
# add more path to watch
])
w.add_reload_hook(state.restart)
w.start()
state.start()
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.start() | apache-2.0 |
carloscanova/elephant | elephant/current_source_density.py | 3 | 13665 | # -*- coding: utf-8 -*-
"""'Current Source Density analysis (CSD) is a class of methods of analysis of
extracellular electric potentials recorded at multiple sites leading to
estimates of current sources generating the measured potentials. It is usually
applied to low-frequency part of the potential (called the Local Field
Potential, LFP) and to simultaneous recordings or to recordings taken with
fixed time reference to the onset of specific stimulus (Evoked Potentials)'
(Definition by Prof.Daniel K. Wójcik for Encyclopedia of Computational
Neuroscience)
CSD is also called as Source Localization or Source Imaging in the EEG circles.
Here are CSD methods for different types of electrode configurations.
1D - laminar probe like electrodes.
2D - Microelectrode Array like
3D - UtahArray or multiple laminar probes.
The following methods have been implemented so far
1D - StandardCSD, DeltaiCSD, SplineiCSD, StepiCSD, KCSD1D
2D - KCSD2D, MoIKCSD (Saline layer on top of slice)
3D - KCSD3D
Each of these methods listed have some advantages. The KCSD methods for
instance can handle broken or irregular electrode configurations electrode
Keywords: LFP; CSD; Multielectrode; Laminar electrode; Barrel cortex
Citation Policy: See ./current_source_density_src/README.md
Contributors to this current source density estimation module are:
Chaitanya Chintaluri(CC), Espen Hagen(EH) and Michał Czerwinski(MC).
EH implemented the iCSD methods and StandardCSD
CC implemented the kCSD methods, kCSD1D(MC and CC)
CC and EH developed the interface to elephant.
"""
from __future__ import division
import neo
import quantities as pq
import numpy as np
from scipy import io
from scipy.integrate import simps
from elephant.current_source_density_src import KCSD
from elephant.current_source_density_src import icsd
import elephant.current_source_density_src.utility_functions as utils
utils.patch_quantities()
available_1d = ['StandardCSD', 'DeltaiCSD', 'StepiCSD', 'SplineiCSD', 'KCSD1D']
available_2d = ['KCSD2D', 'MoIKCSD']
available_3d = ['KCSD3D']
kernel_methods = ['KCSD1D', 'KCSD2D', 'KCSD3D', 'MoIKCSD']
icsd_methods = ['DeltaiCSD', 'StepiCSD', 'SplineiCSD']
py_iCSD_toolbox = ['StandardCSD'] + icsd_methods
def estimate_csd(lfp, coords=None, method=None,
process_estimate=True, **kwargs):
"""
Fuction call to compute the current source density (CSD) from extracellular
potential recordings(local-field potentials - LFP) using laminar electrodes
or multi-contact electrodes with 2D or 3D geometries.
Parameters
----------
lfp : list(neo.AnalogSignal type objects)
positions of electrodes can be added as neo.RecordingChannel
coordinate or sent externally as a func argument (See coords)
coords : [Optional] corresponding spatial coordinates of the electrodes
Defaults to None
Otherwise looks for RecordingChannels coordinate
method : string
Pick a method corresonding to the setup, in this implementation
For Laminar probe style (1D), use 'KCSD1D' or 'StandardCSD',
or 'DeltaiCSD' or 'StepiCSD' or 'SplineiCSD'
For MEA probe style (2D), use 'KCSD2D', or 'MoIKCSD'
For array of laminar probes (3D), use 'KCSD3D'
Defaults to None
process_estimate : bool
In the py_iCSD_toolbox this corresponds to the filter_csd -
the parameters are passed as kwargs here ie., f_type and f_order
In the kcsd methods this corresponds to cross_validate -
the parameters are passed as kwargs here ie., lambdas and Rs
Defaults to True
kwargs : parameters to each method
The parameters corresponding to the method chosen
See the documentation of the individual method
Default is {} - picks the best parameters,
Returns
-------
Estimated CSD
neo.AnalogSignal Object
annotated with the spatial coordinates
Raises
------
AttributeError
No units specified for electrode spatial coordinates
ValueError
Invalid function arguments, wrong method name, or
mismatching coordinates
TypeError
Invalid cv_param argument passed
"""
if not isinstance(lfp, neo.AnalogSignal):
raise TypeError('Parameter `lfp` must be a list(neo.AnalogSignal \
type objects')
if coords is None:
coords = lfp.channel_index.coordinates
# for ii in lfp:
# coords.append(ii.channel_index.coordinate.rescale(pq.mm))
else:
scaled_coords = []
for coord in coords:
try:
scaled_coords.append(coord.rescale(pq.mm))
except AttributeError:
raise AttributeError('No units given for electrode spatial \
coordinates')
coords = scaled_coords
if method is None:
raise ValueError('Must specify a method of CSD implementation')
if len(coords) != len(lfp):
raise ValueError('Number of signals and coords is not same')
for ii in coords: # CHECK for Dimensionality of electrodes
if len(ii) > 3:
raise ValueError('Invalid number of coordinate positions')
dim = len(coords[0]) # TODO : Generic co-ordinates!
if dim == 1 and (method not in available_1d):
raise ValueError('Invalid method, Available options are:',
available_1d)
if dim == 2 and (method not in available_2d):
raise ValueError('Invalid method, Available options are:',
available_2d)
if dim == 3 and (method not in available_3d):
raise ValueError('Invalid method, Available options are:',
available_3d)
if method in kernel_methods:
input_array = np.zeros((len(lfp), lfp[0].magnitude.shape[0]))
for ii, jj in enumerate(lfp):
input_array[ii, :] = jj.rescale(pq.mV).magnitude
kernel_method = getattr(KCSD, method) # fetch the class 'KCSD1D'
lambdas = kwargs.pop('lambdas', None)
Rs = kwargs.pop('Rs', None)
k = kernel_method(np.array(coords), input_array, **kwargs)
if process_estimate:
k.cross_validate(lambdas, Rs)
estm_csd = k.values()
estm_csd = np.rollaxis(estm_csd, -1, 0)
output = neo.AnalogSignal(estm_csd * pq.uA / pq.mm**3,
t_start=lfp.t_start,
sampling_rate=lfp.sampling_rate)
if dim == 1:
output.annotate(x_coords=k.estm_x)
elif dim == 2:
output.annotate(x_coords=k.estm_x, y_coords=k.estm_y)
elif dim == 3:
output.annotate(x_coords=k.estm_x, y_coords=k.estm_y,
z_coords=k.estm_z)
elif method in py_iCSD_toolbox:
coords = np.array(coords) * coords[0].units
if method in icsd_methods:
try:
coords = coords.rescale(kwargs['diam'].units)
except KeyError: # Then why specify as a default in icsd?
# All iCSD methods explicitly assume a source
# diameter in contrast to the stdCSD that
# implicitly assume infinite source radius
raise ValueError("Parameter diam must be specified for iCSD \
methods: {}".format(", ".join(icsd_methods)))
if 'f_type' in kwargs:
if (kwargs['f_type'] is not 'identity') and \
(kwargs['f_order'] is None):
raise ValueError("The order of {} filter must be \
specified".format(kwargs['f_type']))
lfp = neo.AnalogSignal(np.asarray(lfp).T, units=lfp.units,
sampling_rate=lfp.sampling_rate)
csd_method = getattr(icsd, method) # fetch class from icsd.py file
csd_estimator = csd_method(lfp=lfp.magnitude.T * lfp.units,
coord_electrode=coords.flatten(),
**kwargs)
csd_pqarr = csd_estimator.get_csd()
if process_estimate:
csd_pqarr_filtered = csd_estimator.filter_csd(csd_pqarr)
output = neo.AnalogSignal(csd_pqarr_filtered.T,
t_start=lfp.t_start,
sampling_rate=lfp.sampling_rate)
else:
output = neo.AnalogSignal(csd_pqarr.T, t_start=lfp.t_start,
sampling_rate=lfp.sampling_rate)
output.annotate(x_coords=coords)
return output
def generate_lfp(csd_profile, ele_xx, ele_yy=None, ele_zz=None,
xlims=[0., 1.], ylims=[0., 1.], zlims=[0., 1.], res=50):
"""Forward modelling for the getting the potentials for testing CSD
Parameters
----------
csd_profile : fuction that computes True CSD profile
Available options are (see ./csd/utility_functions.py)
1D : gauss_1d_dipole
2D : large_source_2D and small_source_2D
3D : gauss_3d_dipole
ele_xx : np.array
Positions of the x coordinates of the electrodes
ele_yy : np.array
Positions of the y coordinates of the electrodes
Defaults ot None, use in 2D or 3D cases only
ele_zz : np.array
Positions of the z coordinates of the electrodes
Defaults ot None, use in 3D case only
x_lims : [start, end]
The starting spatial coordinate and the ending for integration
Defaults to [0.,1.]
y_lims : [start, end]
The starting spatial coordinate and the ending for integration
Defaults to [0.,1.], use only in 2D and 3D case
z_lims : [start, end]
The starting spatial coordinate and the ending for integration
Defaults to [0.,1.], use only in 3D case
res : int
The resolution of the integration
Defaults to 50
Returns
-------
LFP : list(neo.AnalogSignal type objects)
The potentials created by the csd profile at the electrode positions
The electrode postions are attached as RecordingChannel's coordinate
"""
def integrate_1D(x0, csd_x, csd, h):
m = np.sqrt((csd_x - x0)**2 + h**2) - abs(csd_x - x0)
y = csd * m
I = simps(y, csd_x)
return I
def integrate_2D(x, y, xlin, ylin, csd, h, X, Y):
Ny = ylin.shape[0]
m = np.sqrt((x - X)**2 + (y - Y)**2)
m[m < 0.0000001] = 0.0000001
y = np.arcsinh(2 * h / m) * csd
I = np.zeros(Ny)
for i in range(Ny):
I[i] = simps(y[:, i], ylin)
F = simps(I, xlin)
return F
def integrate_3D(x, y, z, xlim, ylim, zlim, csd, xlin, ylin, zlin,
X, Y, Z):
Nz = zlin.shape[0]
Ny = ylin.shape[0]
m = np.sqrt((x - X)**2 + (y - Y)**2 + (z - Z)**2)
m[m < 0.0000001] = 0.0000001
z = csd / m
Iy = np.zeros(Ny)
for j in range(Ny):
Iz = np.zeros(Nz)
for i in range(Nz):
Iz[i] = simps(z[:, j, i], zlin)
Iy[j] = simps(Iz, ylin)
F = simps(Iy, xlin)
return F
dim = 1
if ele_zz is not None:
dim = 3
elif ele_yy is not None:
dim = 2
x = np.linspace(xlims[0], xlims[1], res)
if dim >= 2:
y = np.linspace(ylims[0], ylims[1], res)
if dim == 3:
z = np.linspace(zlims[0], zlims[1], res)
sigma = 1.0
h = 50.
pots = np.zeros(len(ele_xx))
if dim == 1:
chrg_x = np.linspace(xlims[0], xlims[1], res)
csd = csd_profile(chrg_x)
for ii in range(len(ele_xx)):
pots[ii] = integrate_1D(ele_xx[ii], chrg_x, csd, h)
pots /= 2. * sigma # eq.: 26 from Potworowski et al
ele_pos = ele_xx
elif dim == 2:
chrg_x, chrg_y = np.mgrid[xlims[0]:xlims[1]:np.complex(0, res),
ylims[0]:ylims[1]:np.complex(0, res)]
csd = csd_profile(chrg_x, chrg_y)
for ii in range(len(ele_xx)):
pots[ii] = integrate_2D(ele_xx[ii], ele_yy[ii],
x, y, csd, h, chrg_x, chrg_y)
pots /= 2 * np.pi * sigma
ele_pos = np.vstack((ele_xx, ele_yy)).T
elif dim == 3:
chrg_x, chrg_y, chrg_z = np.mgrid[xlims[0]:xlims[1]:np.complex(0, res),
ylims[0]:ylims[1]:np.complex(0, res),
zlims[0]:zlims[1]:np.complex(0, res)]
csd = csd_profile(chrg_x, chrg_y, chrg_z)
xlin = chrg_x[:, 0, 0]
ylin = chrg_y[0, :, 0]
zlin = chrg_z[0, 0, :]
for ii in range(len(ele_xx)):
pots[ii] = integrate_3D(ele_xx[ii], ele_yy[ii], ele_zz[ii],
xlims, ylims, zlims, csd,
xlin, ylin, zlin,
chrg_x, chrg_y, chrg_z)
pots /= 4 * np.pi * sigma
ele_pos = np.vstack((ele_xx, ele_yy, ele_zz)).T
pots = np.reshape(pots, (-1, 1)) * pq.mV
ele_pos = ele_pos * pq.mm
lfp = []
ch = neo.ChannelIndex(index=range(len(pots)))
for ii in range(len(pots)):
lfp.append(pots[ii])
# lfp = neo.AnalogSignal(lfp, sampling_rate=1000*pq.Hz, units='mV')
asig = neo.AnalogSignal(lfp, sampling_rate=pq.kHz, units='mV')
ch.coordinates = ele_pos
ch.analogsignals.append(asig)
ch.create_relationship()
return asig
| bsd-3-clause |
jpablio/Directrices-JPV | tools/migrate_branch.py | 2 | 14379 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script helps to create a new branch for a new Odoo version from the
another existing branch, making the needed changes on contents.
Installation
============
For using this utility, you need to install these dependencies:
* github3.py library for handling Github calls. To install it, use:
`sudo pip install github3.py`.
Configuration
=============
You must have a file called oca.cfg on the same folder of the script for
storing credentials parameters. You can generate an skeleton config running
this script for a first time.
Usage
=====
oca-migrate-branch [-h] [-p PROJECTS [PROJECTS ...]] [-e EMAIL]
[-t TARGET_ORG]
source target
positional arguments:
source Source branch (existing)
target Target branch (to create)
optional arguments:
-h, --help show this help message and exit
-p PROJECTS [PROJECTS ...], --projects PROJECTS [PROJECTS ...]
List of specific projects to migrate
-e EMAIL, --email EMAIL
Provides an email address used to commit on GitHub if
the one associated to the GitHub account is not public
-t TARGET_ORG, --target-org TARGET_ORG
By default, the GitHub organization used is OCA. This
arg lets you provide an alternative organization
This script will perform the following operations for each project:
* Create a branch starting from branch 'source' with 'target' as name. If it
already exists, then the project is skipped.
* Mark all modules as installable = False.
* Replace in README.md all references to source branch by the target branch.
* Replace in .travis.yml all references to source branch by the target branch.
* Remove __unported__ dir.
* Make target branch the default branch in the repository.
* Create a milestone (if not exist) for new version.
* Create an issue enumerating the modules to migrate, with the milestone
assigned, and with the labels "help wanted" and "work in progress" (if
exist).
Known issues / Roadmap
======================
* Modules without installable key in the manifest are filled with this key,
but the indentation for this added line is assumed to be 4 spaces, and the
closing brace indentation is 0.
* Issue enumerating the module list contains a list to a Wiki page that should
be formatted this way:
https://github.com/OCA/maintainer-tools/wiki/Migration-to-version-{branch}
* Make the created branch protected (no support yet from github3 library).
Credits
=======
Contributors
------------
* Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
Maintainer
----------
.. image:: https://odoo-community.org/logo.png
:alt: Odoo Community Association
:target: https://odoo-community.org
This module is maintained by the OCA.
OCA, or the Odoo Community Association, is a nonprofit organization whose
mission is to support the collaborative development of Odoo features and
promote its widespread use.
To contribute to this module, please visit http://odoo-community.org.
"""
import argparse
import re
from . import github_login
from . import oca_projects
from .config import read_config
MANIFESTS = ('__openerp__.py', '__manifest__.py')
class BranchMigrator(object):
def __init__(self, source, target, target_org=None, email=None):
# Read config
config = read_config()
self.gh_token = config.get('GitHub', 'token')
# Connect to GitHub
self.github = github_login.login()
gh_user = self.github.user()
if not gh_user.email and not email:
raise Exception(
'Email required to commit to github. Please provide one on '
'the command line or make the one of your github profile '
'public.')
self.gh_credentials = {'name': gh_user.name or str(gh_user),
'email': gh_user.email or email}
self.gh_source_branch = source
self.gh_target_branch = target
self.gh_org = target_org or 'OCA'
def _replace_content(self, repo, path, replace_list, gh_file=None):
if not gh_file:
# Re-read path for retrieving content
gh_file = repo.contents(path, self.gh_target_branch)
content = gh_file.decoded
for replace in replace_list:
content = re.sub(replace[0], replace[1], content, flags=re.DOTALL)
new_file_blob = repo.create_blob(content, encoding='utf-8')
return {
'path': path,
'mode': '100644',
'type': 'blob',
'sha': new_file_blob
}
def _create_commit(self, repo, tree_data, message, use_sha=True):
"""Create a GitHub commit.
:param repo: github3 repo reference
:param tree_data: list with dictionary for the entries of the commit
:param message: message to use in the commit
:param use_sha: if False, the tree_data structure will be considered
the full one, deleting the rest of the entries not listed in this one.
"""
if not tree_data:
return
branch = repo.branch(self.gh_target_branch)
tree_sha = branch.commit.commit.tree.sha if use_sha else None
tree = repo.create_tree(tree_data, tree_sha)
commit = repo.create_commit(
message=message, tree=tree.sha, parents=[branch.commit.sha],
author=self.gh_credentials, committer=self.gh_credentials)
repo.ref('heads/{}'.format(branch.name)).update(commit.sha)
return commit
def _mark_modules_uninstallable(self, repo, root_contents):
"""Make uninstallable the existing modules in the repo."""
tree_data = []
modules = []
for root_content in root_contents.values():
if root_content.type != 'dir':
continue
module_contents = repo.contents(
root_content.path, self.gh_target_branch)
for manifest_file in MANIFESTS:
manifest = module_contents.get(manifest_file)
if manifest:
break
if manifest:
modules.append(root_content.path)
# Re-read path for retrieving content
gh_file = repo.contents(manifest.path, self.gh_target_branch)
manifest_dict = eval(gh_file.decoded)
if manifest_dict.get('installable') is None:
src = ",?\s*}"
dest = ",\n 'installable': False,\n}"
else:
src = '["\']installable["\']: *True'
dest = "'installable': False"
tree_data.append(self._replace_content(
repo, manifest.path, [(src, dest)], gh_file=gh_file))
self._create_commit(
repo, tree_data, "[MIG] Make modules uninstallable")
return modules
def _rename_manifests(self, repo, root_contents):
""" Rename __openerp__.py to __manifest__.py as per Odoo 10.0 API """
branch = repo.branch(self.gh_target_branch)
tree = repo.tree(branch.commit.sha).recurse().tree
tree_data = []
for entry in tree:
if entry.type == 'tree':
continue
path = entry.path
if path.endswith('__openerp__.py'):
path = path.replace('__openerp__.py', '__manifest__.py')
tree_data.append({
'path': path,
'sha': entry.sha,
'type': entry.type,
'mode': entry.mode,
})
self._create_commit(
repo, tree_data, "[MIG] Rename manifest files", use_sha=False)
def _delete_setup_dirs(self, repo, root_contents, modules):
if 'setup' not in root_contents:
return
exclude_paths = ['setup/%s' % module for module in modules]
branch = repo.branch(self.gh_target_branch)
tree = repo.tree(branch.commit.sha).recurse().tree
tree_data = []
for entry in tree:
if entry.type == 'tree':
continue
for path in exclude_paths:
if entry.path == path or entry.path.startswith(path + '/'):
break
else:
tree_data.append({
'path': entry.path,
'sha': entry.sha,
'type': entry.type,
'mode': entry.mode,
})
self._create_commit(
repo, tree_data, "[MIG] Remove setup module directories",
use_sha=False)
def _delete_unported_dir(self, repo, root_contents):
if '__unported__' not in root_contents.keys():
return
branch = repo.branch(self.gh_target_branch)
tree = repo.tree(branch.commit.sha).tree
tree_data = []
# Reconstruct tree without __unported__ entry
for entry in tree:
if '__unported__' not in entry.path:
tree_data.append({
'path': entry.path,
'sha': entry.sha,
'type': entry.type,
'mode': entry.mode,
})
self._create_commit(
repo, tree_data, "[MIG] Remove __unported__ dir", use_sha=False)
def _update_metafiles(self, repo, root_contents):
"""Update metafiles (README.md, .travis.yml...) for pointing to
the new branch.
"""
tree_data = []
source_string = self.gh_source_branch.replace('.', '\.')
target_string = self.gh_target_branch
source_string_dash = self.gh_source_branch.replace('.', '-')
target_string_dash = self.gh_target_branch.replace('.', '-')
if root_contents.get('README.md'):
tree_data.append(self._replace_content(
repo, 'README.md',
[(source_string, target_string),
(source_string_dash, target_string_dash),
("\[//]: # \(addons\).*\[//]: # \(end addons\)",
"[//]: # (addons)\n[//]: # (end addons)")]))
if root_contents.get('.travis.yml'):
tree_data.append(self._replace_content(
repo, '.travis.yml',
[(source_string, target_string),
(source_string_dash, target_string_dash)]))
self._create_commit(
repo, tree_data, "[MIG] Update metafiles")
def _make_default_branch(self, repo):
repo.edit(repo.name, default_branch=self.gh_target_branch)
def _create_branch_milestone(self, repo):
for milestone in repo.iter_milestones():
if milestone.title == self.gh_target_branch:
return milestone
return repo.create_milestone(self.gh_target_branch)
def _create_migration_issue(self, repo, modules, milestone):
title = "Migration to version %s" % self.gh_target_branch
# Check first if it already exists
for issue in repo.iter_issues(milestone=milestone.number):
if issue.title == title:
return issue
body = ("# Todo\n\nhttps://github.com/OCA/maintainer-tools/wiki/"
"Migration-to-version-%s\n\n# Modules to migrate\n\n" %
self.gh_target_branch)
body += "\n".join(["- [ ] %s" % x for x in modules])
# Make sure labels exists
labels = []
for label in repo.iter_labels():
if label.name in ['help wanted', 'work in progress']:
labels.append(label.name)
return repo.create_issue(
title=title, body=body, milestone=milestone.number, labels=labels)
def _migrate_project(self, project):
print "Migrating project %s/%s" % (self.gh_org, project)
# Create new branch
repo = self.github.repository(self.gh_org, project)
source_branch = repo.branch(self.gh_source_branch)
if not source_branch:
print "Source branch non existing. Skipping..."
return
branch = repo.branch(self.gh_target_branch)
if branch:
print "Branch already exists. Skipping..."
return
repo.create_ref(
'refs/heads/%s' % self.gh_target_branch,
source_branch.commit.sha)
root_contents = repo.contents('', self.gh_target_branch)
modules = self._mark_modules_uninstallable(repo, root_contents)
if self.gh_target_branch == '10.0':
self._rename_manifests(repo, root_contents)
self._delete_unported_dir(repo, root_contents)
self._delete_setup_dirs(repo, root_contents, modules)
self._update_metafiles(repo, root_contents)
self._make_default_branch(repo)
milestone = self._create_branch_milestone(repo)
self._create_migration_issue(repo, sorted(modules), milestone)
def do_migration(self, projects=None):
if not projects:
projects = oca_projects.get_repositories()
for project in projects:
self._migrate_project(project)
def get_parser():
parser = argparse.ArgumentParser(
description='Migrate one OCA branch from one version to another, '
'applying the needed transformations',
add_help=True)
parser.add_argument('source', help="Source branch (existing)")
parser.add_argument('target', help="Target branch (to create)")
parser.add_argument(
'-p', '--projects', dest='projects', nargs='+',
default=[], help='List of specific projects to migrate')
parser.add_argument(
'-e', '--email', dest='email',
help=('Provides an email address used to commit on GitHub if the one '
'associated to the GitHub account is not public'))
parser.add_argument(
'-t', '--target-org', dest='target_org',
help=('By default, the GitHub organization used is OCA. This arg lets '
'you provide an alternative organization'))
return parser
def main():
args = get_parser().parse_args()
migrator = BranchMigrator(
source=args.source, target=args.target, target_org=args.target_org,
email=args.email)
migrator.do_migration(projects=args.projects)
if __name__ == '__main__':
main()
| agpl-3.0 |
patcon/open-cabinet | venv/lib/python2.7/site-packages/django/contrib/gis/utils/wkt.py | 589 | 1923 | """
Utilities for manipulating Geometry WKT.
"""
from django.utils import six
def precision_wkt(geom, prec):
"""
Returns WKT text of the geometry according to the given precision (an
integer or a string). If the precision is an integer, then the decimal
places of coordinates WKT will be truncated to that number:
>>> from django.contrib.gis.geos import Point
>>> pnt = Point(5, 23)
>>> pnt.wkt
'POINT (5.0000000000000000 23.0000000000000000)'
>>> precision_wkt(pnt, 1)
'POINT (5.0 23.0)'
If the precision is a string, it must be valid Python format string
(e.g., '%20.7f') -- thus, you should know what you're doing.
"""
if isinstance(prec, int):
num_fmt = '%%.%df' % prec
elif isinstance(prec, six.string_types):
num_fmt = prec
else:
raise TypeError
# TODO: Support 3D geometries.
coord_fmt = ' '.join([num_fmt, num_fmt])
def formatted_coords(coords):
return ','.join(coord_fmt % c[:2] for c in coords)
def formatted_poly(poly):
return ','.join('(%s)' % formatted_coords(r) for r in poly)
def formatted_geom(g):
gtype = str(g.geom_type).upper()
yield '%s(' % gtype
if gtype == 'POINT':
yield formatted_coords((g.coords,))
elif gtype in ('LINESTRING', 'LINEARRING'):
yield formatted_coords(g.coords)
elif gtype in ('POLYGON', 'MULTILINESTRING'):
yield formatted_poly(g)
elif gtype == 'MULTIPOINT':
yield formatted_coords(g.coords)
elif gtype == 'MULTIPOLYGON':
yield ','.join('(%s)' % formatted_poly(p) for p in g)
elif gtype == 'GEOMETRYCOLLECTION':
yield ','.join(''.join(wkt for wkt in formatted_geom(child)) for child in g)
else:
raise TypeError
yield ')'
return ''.join(wkt for wkt in formatted_geom(geom))
| mit |
SamTube405/GCom | cassandra/apache-cassandra-2.0.1/pylib/cqlshlib/wcwidth.py | 113 | 16049 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# adapted from http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
# -thepaul
# This is an implementation of wcwidth() and wcswidth() (defined in
# IEEE Std 1002.1-2001) for Unicode.
#
# http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html
# http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html
#
# In fixed-width output devices, Latin characters all occupy a single
# "cell" position of equal width, whereas ideographic CJK characters
# occupy two such cells. Interoperability between terminal-line
# applications and (teletype-style) character terminals using the
# UTF-8 encoding requires agreement on which character should advance
# the cursor by how many cell positions. No established formal
# standards exist at present on which Unicode character shall occupy
# how many cell positions on character terminals. These routines are
# a first attempt of defining such behavior based on simple rules
# applied to data provided by the Unicode Consortium.
#
# For some graphical characters, the Unicode standard explicitly
# defines a character-cell width via the definition of the East Asian
# FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes.
# In all these cases, there is no ambiguity about which width a
# terminal shall use. For characters in the East Asian Ambiguous (A)
# class, the width choice depends purely on a preference of backward
# compatibility with either historic CJK or Western practice.
# Choosing single-width for these characters is easy to justify as
# the appropriate long-term solution, as the CJK practice of
# displaying these characters as double-width comes from historic
# implementation simplicity (8-bit encoded characters were displayed
# single-width and 16-bit ones double-width, even for Greek,
# Cyrillic, etc.) and not any typographic considerations.
#
# Much less clear is the choice of width for the Not East Asian
# (Neutral) class. Existing practice does not dictate a width for any
# of these characters. It would nevertheless make sense
# typographically to allocate two character cells to characters such
# as for instance EM SPACE or VOLUME INTEGRAL, which cannot be
# represented adequately with a single-width glyph. The following
# routines at present merely assign a single-cell width to all
# neutral characters, in the interest of simplicity. This is not
# entirely satisfactory and should be reconsidered before
# establishing a formal standard in this area. At the moment, the
# decision which Not East Asian (Neutral) characters should be
# represented by double-width glyphs cannot yet be answered by
# applying a simple rule from the Unicode database content. Setting
# up a proper standard for the behavior of UTF-8 character terminals
# will require a careful analysis not only of each Unicode character,
# but also of each presentation form, something the author of these
# routines has avoided to do so far.
#
# http://www.unicode.org/unicode/reports/tr11/
#
# Markus Kuhn -- 2007-05-26 (Unicode 5.0)
#
# Permission to use, copy, modify, and distribute this software
# for any purpose and without fee is hereby granted. The author
# disclaims all warranties with regard to this software.
#
# Latest C version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
# auxiliary function for binary search in interval table
def bisearch(ucs, table):
min = 0
max = len(table) - 1
if ucs < table[0][0] or ucs > table[max][1]:
return 0
while max >= min:
mid = (min + max) / 2
if ucs > table[mid][1]:
min = mid + 1
elif ucs < table[mid][0]:
max = mid - 1
else:
return 1
return 0
# The following two functions define the column width of an ISO 10646
# character as follows:
#
# - The null character (U+0000) has a column width of 0.
#
# - Other C0/C1 control characters and DEL will lead to a return
# value of -1.
#
# - Non-spacing and enclosing combining characters (general
# category code Mn or Me in the Unicode database) have a
# column width of 0.
#
# - SOFT HYPHEN (U+00AD) has a column width of 1.
#
# - Other format characters (general category code Cf in the Unicode
# database) and ZERO WIDTH SPACE (U+200B) have a column width of 0.
#
# - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF)
# have a column width of 0.
#
# - Spacing characters in the East Asian Wide (W) or East Asian
# Full-width (F) category as defined in Unicode Technical
# Report #11 have a column width of 2.
#
# - All remaining characters (including all printable
# ISO 8859-1 and WGL4 characters, Unicode control characters,
# etc.) have a column width of 1.
#
# This implementation assumes that wchar_t characters are encoded
# in ISO 10646.
# sorted list of non-overlapping intervals of non-spacing characters
# generated by "uniset +cat=Me +cat=Mn +cat=Cf -00AD +1160-11FF +200B c"
combining = (
( 0x0300, 0x036F ), ( 0x0483, 0x0486 ), ( 0x0488, 0x0489 ),
( 0x0591, 0x05BD ), ( 0x05BF, 0x05BF ), ( 0x05C1, 0x05C2 ),
( 0x05C4, 0x05C5 ), ( 0x05C7, 0x05C7 ), ( 0x0600, 0x0603 ),
( 0x0610, 0x0615 ), ( 0x064B, 0x065E ), ( 0x0670, 0x0670 ),
( 0x06D6, 0x06E4 ), ( 0x06E7, 0x06E8 ), ( 0x06EA, 0x06ED ),
( 0x070F, 0x070F ), ( 0x0711, 0x0711 ), ( 0x0730, 0x074A ),
( 0x07A6, 0x07B0 ), ( 0x07EB, 0x07F3 ), ( 0x0901, 0x0902 ),
( 0x093C, 0x093C ), ( 0x0941, 0x0948 ), ( 0x094D, 0x094D ),
( 0x0951, 0x0954 ), ( 0x0962, 0x0963 ), ( 0x0981, 0x0981 ),
( 0x09BC, 0x09BC ), ( 0x09C1, 0x09C4 ), ( 0x09CD, 0x09CD ),
( 0x09E2, 0x09E3 ), ( 0x0A01, 0x0A02 ), ( 0x0A3C, 0x0A3C ),
( 0x0A41, 0x0A42 ), ( 0x0A47, 0x0A48 ), ( 0x0A4B, 0x0A4D ),
( 0x0A70, 0x0A71 ), ( 0x0A81, 0x0A82 ), ( 0x0ABC, 0x0ABC ),
( 0x0AC1, 0x0AC5 ), ( 0x0AC7, 0x0AC8 ), ( 0x0ACD, 0x0ACD ),
( 0x0AE2, 0x0AE3 ), ( 0x0B01, 0x0B01 ), ( 0x0B3C, 0x0B3C ),
( 0x0B3F, 0x0B3F ), ( 0x0B41, 0x0B43 ), ( 0x0B4D, 0x0B4D ),
( 0x0B56, 0x0B56 ), ( 0x0B82, 0x0B82 ), ( 0x0BC0, 0x0BC0 ),
( 0x0BCD, 0x0BCD ), ( 0x0C3E, 0x0C40 ), ( 0x0C46, 0x0C48 ),
( 0x0C4A, 0x0C4D ), ( 0x0C55, 0x0C56 ), ( 0x0CBC, 0x0CBC ),
( 0x0CBF, 0x0CBF ), ( 0x0CC6, 0x0CC6 ), ( 0x0CCC, 0x0CCD ),
( 0x0CE2, 0x0CE3 ), ( 0x0D41, 0x0D43 ), ( 0x0D4D, 0x0D4D ),
( 0x0DCA, 0x0DCA ), ( 0x0DD2, 0x0DD4 ), ( 0x0DD6, 0x0DD6 ),
( 0x0E31, 0x0E31 ), ( 0x0E34, 0x0E3A ), ( 0x0E47, 0x0E4E ),
( 0x0EB1, 0x0EB1 ), ( 0x0EB4, 0x0EB9 ), ( 0x0EBB, 0x0EBC ),
( 0x0EC8, 0x0ECD ), ( 0x0F18, 0x0F19 ), ( 0x0F35, 0x0F35 ),
( 0x0F37, 0x0F37 ), ( 0x0F39, 0x0F39 ), ( 0x0F71, 0x0F7E ),
( 0x0F80, 0x0F84 ), ( 0x0F86, 0x0F87 ), ( 0x0F90, 0x0F97 ),
( 0x0F99, 0x0FBC ), ( 0x0FC6, 0x0FC6 ), ( 0x102D, 0x1030 ),
( 0x1032, 0x1032 ), ( 0x1036, 0x1037 ), ( 0x1039, 0x1039 ),
( 0x1058, 0x1059 ), ( 0x1160, 0x11FF ), ( 0x135F, 0x135F ),
( 0x1712, 0x1714 ), ( 0x1732, 0x1734 ), ( 0x1752, 0x1753 ),
( 0x1772, 0x1773 ), ( 0x17B4, 0x17B5 ), ( 0x17B7, 0x17BD ),
( 0x17C6, 0x17C6 ), ( 0x17C9, 0x17D3 ), ( 0x17DD, 0x17DD ),
( 0x180B, 0x180D ), ( 0x18A9, 0x18A9 ), ( 0x1920, 0x1922 ),
( 0x1927, 0x1928 ), ( 0x1932, 0x1932 ), ( 0x1939, 0x193B ),
( 0x1A17, 0x1A18 ), ( 0x1B00, 0x1B03 ), ( 0x1B34, 0x1B34 ),
( 0x1B36, 0x1B3A ), ( 0x1B3C, 0x1B3C ), ( 0x1B42, 0x1B42 ),
( 0x1B6B, 0x1B73 ), ( 0x1DC0, 0x1DCA ), ( 0x1DFE, 0x1DFF ),
( 0x200B, 0x200F ), ( 0x202A, 0x202E ), ( 0x2060, 0x2063 ),
( 0x206A, 0x206F ), ( 0x20D0, 0x20EF ), ( 0x302A, 0x302F ),
( 0x3099, 0x309A ), ( 0xA806, 0xA806 ), ( 0xA80B, 0xA80B ),
( 0xA825, 0xA826 ), ( 0xFB1E, 0xFB1E ), ( 0xFE00, 0xFE0F ),
( 0xFE20, 0xFE23 ), ( 0xFEFF, 0xFEFF ), ( 0xFFF9, 0xFFFB ),
( 0x10A01, 0x10A03 ), ( 0x10A05, 0x10A06 ), ( 0x10A0C, 0x10A0F ),
( 0x10A38, 0x10A3A ), ( 0x10A3F, 0x10A3F ), ( 0x1D167, 0x1D169 ),
( 0x1D173, 0x1D182 ), ( 0x1D185, 0x1D18B ), ( 0x1D1AA, 0x1D1AD ),
( 0x1D242, 0x1D244 ), ( 0xE0001, 0xE0001 ), ( 0xE0020, 0xE007F ),
( 0xE0100, 0xE01EF )
)
# sorted list of non-overlapping intervals of East Asian Ambiguous
# characters, generated by "uniset +WIDTH-A -cat=Me -cat=Mn -cat=Cf c"
ambiguous = (
( 0x00A1, 0x00A1 ), ( 0x00A4, 0x00A4 ), ( 0x00A7, 0x00A8 ),
( 0x00AA, 0x00AA ), ( 0x00AE, 0x00AE ), ( 0x00B0, 0x00B4 ),
( 0x00B6, 0x00BA ), ( 0x00BC, 0x00BF ), ( 0x00C6, 0x00C6 ),
( 0x00D0, 0x00D0 ), ( 0x00D7, 0x00D8 ), ( 0x00DE, 0x00E1 ),
( 0x00E6, 0x00E6 ), ( 0x00E8, 0x00EA ), ( 0x00EC, 0x00ED ),
( 0x00F0, 0x00F0 ), ( 0x00F2, 0x00F3 ), ( 0x00F7, 0x00FA ),
( 0x00FC, 0x00FC ), ( 0x00FE, 0x00FE ), ( 0x0101, 0x0101 ),
( 0x0111, 0x0111 ), ( 0x0113, 0x0113 ), ( 0x011B, 0x011B ),
( 0x0126, 0x0127 ), ( 0x012B, 0x012B ), ( 0x0131, 0x0133 ),
( 0x0138, 0x0138 ), ( 0x013F, 0x0142 ), ( 0x0144, 0x0144 ),
( 0x0148, 0x014B ), ( 0x014D, 0x014D ), ( 0x0152, 0x0153 ),
( 0x0166, 0x0167 ), ( 0x016B, 0x016B ), ( 0x01CE, 0x01CE ),
( 0x01D0, 0x01D0 ), ( 0x01D2, 0x01D2 ), ( 0x01D4, 0x01D4 ),
( 0x01D6, 0x01D6 ), ( 0x01D8, 0x01D8 ), ( 0x01DA, 0x01DA ),
( 0x01DC, 0x01DC ), ( 0x0251, 0x0251 ), ( 0x0261, 0x0261 ),
( 0x02C4, 0x02C4 ), ( 0x02C7, 0x02C7 ), ( 0x02C9, 0x02CB ),
( 0x02CD, 0x02CD ), ( 0x02D0, 0x02D0 ), ( 0x02D8, 0x02DB ),
( 0x02DD, 0x02DD ), ( 0x02DF, 0x02DF ), ( 0x0391, 0x03A1 ),
( 0x03A3, 0x03A9 ), ( 0x03B1, 0x03C1 ), ( 0x03C3, 0x03C9 ),
( 0x0401, 0x0401 ), ( 0x0410, 0x044F ), ( 0x0451, 0x0451 ),
( 0x2010, 0x2010 ), ( 0x2013, 0x2016 ), ( 0x2018, 0x2019 ),
( 0x201C, 0x201D ), ( 0x2020, 0x2022 ), ( 0x2024, 0x2027 ),
( 0x2030, 0x2030 ), ( 0x2032, 0x2033 ), ( 0x2035, 0x2035 ),
( 0x203B, 0x203B ), ( 0x203E, 0x203E ), ( 0x2074, 0x2074 ),
( 0x207F, 0x207F ), ( 0x2081, 0x2084 ), ( 0x20AC, 0x20AC ),
( 0x2103, 0x2103 ), ( 0x2105, 0x2105 ), ( 0x2109, 0x2109 ),
( 0x2113, 0x2113 ), ( 0x2116, 0x2116 ), ( 0x2121, 0x2122 ),
( 0x2126, 0x2126 ), ( 0x212B, 0x212B ), ( 0x2153, 0x2154 ),
( 0x215B, 0x215E ), ( 0x2160, 0x216B ), ( 0x2170, 0x2179 ),
( 0x2190, 0x2199 ), ( 0x21B8, 0x21B9 ), ( 0x21D2, 0x21D2 ),
( 0x21D4, 0x21D4 ), ( 0x21E7, 0x21E7 ), ( 0x2200, 0x2200 ),
( 0x2202, 0x2203 ), ( 0x2207, 0x2208 ), ( 0x220B, 0x220B ),
( 0x220F, 0x220F ), ( 0x2211, 0x2211 ), ( 0x2215, 0x2215 ),
( 0x221A, 0x221A ), ( 0x221D, 0x2220 ), ( 0x2223, 0x2223 ),
( 0x2225, 0x2225 ), ( 0x2227, 0x222C ), ( 0x222E, 0x222E ),
( 0x2234, 0x2237 ), ( 0x223C, 0x223D ), ( 0x2248, 0x2248 ),
( 0x224C, 0x224C ), ( 0x2252, 0x2252 ), ( 0x2260, 0x2261 ),
( 0x2264, 0x2267 ), ( 0x226A, 0x226B ), ( 0x226E, 0x226F ),
( 0x2282, 0x2283 ), ( 0x2286, 0x2287 ), ( 0x2295, 0x2295 ),
( 0x2299, 0x2299 ), ( 0x22A5, 0x22A5 ), ( 0x22BF, 0x22BF ),
( 0x2312, 0x2312 ), ( 0x2460, 0x24E9 ), ( 0x24EB, 0x254B ),
( 0x2550, 0x2573 ), ( 0x2580, 0x258F ), ( 0x2592, 0x2595 ),
( 0x25A0, 0x25A1 ), ( 0x25A3, 0x25A9 ), ( 0x25B2, 0x25B3 ),
( 0x25B6, 0x25B7 ), ( 0x25BC, 0x25BD ), ( 0x25C0, 0x25C1 ),
( 0x25C6, 0x25C8 ), ( 0x25CB, 0x25CB ), ( 0x25CE, 0x25D1 ),
( 0x25E2, 0x25E5 ), ( 0x25EF, 0x25EF ), ( 0x2605, 0x2606 ),
( 0x2609, 0x2609 ), ( 0x260E, 0x260F ), ( 0x2614, 0x2615 ),
( 0x261C, 0x261C ), ( 0x261E, 0x261E ), ( 0x2640, 0x2640 ),
( 0x2642, 0x2642 ), ( 0x2660, 0x2661 ), ( 0x2663, 0x2665 ),
( 0x2667, 0x266A ), ( 0x266C, 0x266D ), ( 0x266F, 0x266F ),
( 0x273D, 0x273D ), ( 0x2776, 0x277F ), ( 0xE000, 0xF8FF ),
( 0xFFFD, 0xFFFD ), ( 0xF0000, 0xFFFFD ), ( 0x100000, 0x10FFFD )
)
def mk_wcwidth(ucs):
# test for 8-bit control characters
if ucs == 0:
return 0
if ucs < 32 or (ucs >= 0x7f and ucs < 0xa0):
return -1
# binary search in table of non-spacing characters
if bisearch(ucs, combining):
return 0
# if we arrive here, ucs is not a combining or C0/C1 control character
return 1 + \
int(ucs >= 0x1100 and
(ucs <= 0x115f or # Hangul Jamo init. consonants
ucs == 0x2329 or ucs == 0x232a or
(ucs >= 0x2e80 and ucs <= 0xa4cf and
ucs != 0x303f) or # CJK ... Yi
(ucs >= 0xac00 and ucs <= 0xd7a3) or # Hangul Syllables
(ucs >= 0xf900 and ucs <= 0xfaff) or # CJK Compatibility Ideographs
(ucs >= 0xfe10 and ucs <= 0xfe19) or # Vertical forms
(ucs >= 0xfe30 and ucs <= 0xfe6f) or # CJK Compatibility Forms
(ucs >= 0xff00 and ucs <= 0xff60) or # Fullwidth Forms
(ucs >= 0xffe0 and ucs <= 0xffe6) or
(ucs >= 0x20000 and ucs <= 0x2fffd) or
(ucs >= 0x30000 and ucs <= 0x3fffd)))
def mk_wcswidth(pwcs):
width = 0
for c in pwcs:
w = mk_wcwidth(c)
if w < 0:
return -1
else:
width += w
return width
# The following functions are the same as mk_wcwidth() and
# mk_wcswidth(), except that spacing characters in the East Asian
# Ambiguous (A) category as defined in Unicode Technical Report #11
# have a column width of 2. This variant might be useful for users of
# CJK legacy encodings who want to migrate to UCS without changing
# the traditional terminal character-width behaviour. It is not
# otherwise recommended for general use.
def mk_wcwidth_cjk(ucs):
# binary search in table of non-spacing characters
if bisearch(ucs, ambiguous):
return 2
return mk_wcwidth(ucs)
def mk_wcswidth_cjk(pwcs):
width = 0
for c in pwcs:
w = mk_wcwidth_cjk(c)
if w < 0:
return -1
width += w
return width
# python-y versions, dealing with unicode objects
def wcwidth(c):
return mk_wcwidth(ord(c))
def wcswidth(s):
return mk_wcswidth(map(ord, s))
def wcwidth_cjk(c):
return mk_wcwidth_cjk(ord(c))
def wcswidth_cjk(s):
return mk_wcswidth_cjk(map(ord, s))
if __name__ == "__main__":
samples = (
('MUSIC SHARP SIGN', 1),
('FULLWIDTH POUND SIGN', 2),
('FULLWIDTH LATIN CAPITAL LETTER P', 2),
('CJK RADICAL BOLT OF CLOTH', 2),
('LATIN SMALL LETTER A', 1),
('LATIN SMALL LETTER AE', 1),
('SPACE', 1),
('NO-BREAK SPACE', 1),
('CJK COMPATIBILITY IDEOGRAPH-F920', 2),
('MALAYALAM VOWEL SIGN UU', 0),
('ZERO WIDTH SPACE', 0),
('ZERO WIDTH NO-BREAK SPACE', 0),
('COMBINING PALATALIZED HOOK BELOW', 0),
('COMBINING GRAVE ACCENT', 0),
)
nonprinting = u'\r\n\t\a\b\f\v\x7f'
import unicodedata
for name, printwidth in samples:
uchr = unicodedata.lookup(name)
calculatedwidth = wcwidth(uchr)
assert calculatedwidth == printwidth, \
'width for %r should be %d, but is %d?' % (uchr, printwidth, calculatedwidth)
for c in nonprinting:
calculatedwidth = wcwidth(c)
assert calculatedwidth < 0, \
'%r is a control character, but wcwidth gives %d' % (c, calculatedwidth)
assert wcwidth('\0') == 0 # special case
# depending on how python is compiled, code points above U+FFFF may not be
# treated as single characters, so ord() won't work. test a few of these
# manually.
assert mk_wcwidth(0xe01ef) == 0
assert mk_wcwidth(0x10ffff) == 1
assert mk_wcwidth(0x3fffd) == 2
teststr = u'B\0ig br\u00f8wn moose\ub143\u200b'
calculatedwidth = wcswidth(teststr)
assert calculatedwidth == 17, 'expected 17, got %d' % calculatedwidth
calculatedwidth = wcswidth_cjk(teststr)
assert calculatedwidth == 18, 'expected 18, got %d' % calculatedwidth
assert wcswidth(u'foobar\u200b\a') < 0
print 'tests pass.'
| gpl-3.0 |
hchen1202/django-react | virtualenv/lib/python3.6/site-packages/pip/compat/__init__.py | 342 | 4672 | """Stuff that differs in different Python versions and platform
distributions."""
from __future__ import absolute_import, division
import os
import sys
from pip._vendor.six import text_type
try:
from logging.config import dictConfig as logging_dictConfig
except ImportError:
from pip.compat.dictconfig import dictConfig as logging_dictConfig
try:
from collections import OrderedDict
except ImportError:
from pip._vendor.ordereddict import OrderedDict
try:
import ipaddress
except ImportError:
try:
from pip._vendor import ipaddress
except ImportError:
import ipaddr as ipaddress
ipaddress.ip_address = ipaddress.IPAddress
ipaddress.ip_network = ipaddress.IPNetwork
try:
import sysconfig
def get_stdlib():
paths = [
sysconfig.get_path("stdlib"),
sysconfig.get_path("platstdlib"),
]
return set(filter(bool, paths))
except ImportError:
from distutils import sysconfig
def get_stdlib():
paths = [
sysconfig.get_python_lib(standard_lib=True),
sysconfig.get_python_lib(standard_lib=True, plat_specific=True),
]
return set(filter(bool, paths))
__all__ = [
"logging_dictConfig", "ipaddress", "uses_pycache", "console_to_str",
"native_str", "get_path_uid", "stdlib_pkgs", "WINDOWS", "samefile",
"OrderedDict",
]
if sys.version_info >= (3, 4):
uses_pycache = True
from importlib.util import cache_from_source
else:
import imp
uses_pycache = hasattr(imp, 'cache_from_source')
if uses_pycache:
cache_from_source = imp.cache_from_source
else:
cache_from_source = None
if sys.version_info >= (3,):
def console_to_str(s):
try:
return s.decode(sys.__stdout__.encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def native_str(s, replace=False):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace' if replace else 'strict')
return s
else:
def console_to_str(s):
return s
def native_str(s, replace=False):
# Replace is ignored -- unicode to UTF-8 can't fail
if isinstance(s, text_type):
return s.encode('utf-8')
return s
def total_seconds(td):
if hasattr(td, "total_seconds"):
return td.total_seconds()
else:
val = td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6
return val / 10 ** 6
def get_path_uid(path):
"""
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError(
"%s is a symlink; Will not return uid for symlinks" % path
)
return file_uid
def expanduser(path):
"""
Expand ~ and ~user constructions.
Includes a workaround for http://bugs.python.org/issue14768
"""
expanded = os.path.expanduser(path)
if path.startswith('~/') and expanded.startswith('//'):
expanded = expanded[1:]
return expanded
# packages in the stdlib that may have installation metadata, but should not be
# considered 'installed'. this theoretically could be determined based on
# dist.location (py27:`sysconfig.get_paths()['stdlib']`,
# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
# make this ineffective, so hard-coding
stdlib_pkgs = ('python', 'wsgiref')
if sys.version_info >= (2, 7):
stdlib_pkgs += ('argparse',)
# windows detection, covers cpython and ironpython
WINDOWS = (sys.platform.startswith("win") or
(sys.platform == 'cli' and os.name == 'nt'))
def samefile(file1, file2):
"""Provide an alternative for os.path.samefile on Windows/Python2"""
if hasattr(os.path, 'samefile'):
return os.path.samefile(file1, file2)
else:
path1 = os.path.normcase(os.path.abspath(file1))
path2 = os.path.normcase(os.path.abspath(file2))
return path1 == path2
| mit |
thfield/sf-base-election-data | venv/lib/python3.4/site-packages/django/contrib/flatpages/models.py | 136 | 1531 | from __future__ import unicode_literals
from django.contrib.sites.models import Site
from django.core.urlresolvers import get_script_prefix
from django.db import models
from django.utils.encoding import iri_to_uri, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, db_index=True)
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'), blank=True)
enable_comments = models.BooleanField(_('enable comments'), default=False)
template_name = models.CharField(_('template name'), max_length=70, blank=True,
help_text=_(
"Example: 'flatpages/contact_page.html'. If this isn't provided, "
"the system will use 'flatpages/default.html'."
),
)
registration_required = models.BooleanField(_('registration required'),
help_text=_("If this is checked, only logged-in users will be able to view the page."),
default=False)
sites = models.ManyToManyField(Site)
class Meta:
db_table = 'django_flatpage'
verbose_name = _('flat page')
verbose_name_plural = _('flat pages')
ordering = ('url',)
def __str__(self):
return "%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
# Handle script prefix manually because we bypass reverse()
return iri_to_uri(get_script_prefix().rstrip('/') + self.url)
| bsd-3-clause |
sometallgit/AutoUploader | Python27/Lib/distutils/version.py | 259 | 11433 | #
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id$
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* __cmp__ compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(string.atoi, [major, minor, patch]))
else:
self.version = tuple(map(string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi(prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join(map(str, self.version[0:2]), '.')
else:
vstring = string.join(map(str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance(other, StringType):
other = StrictVersion(other)
compare = cmp(self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp(self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter(lambda x: x and x != '.',
self.component_re.split(vstring))
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str(self)
def __cmp__ (self, other):
if isinstance(other, StringType):
other = LooseVersion(other)
return cmp(self.version, other.version)
# end class LooseVersion
| mit |
rnixx/kivy | kivy/core/window/window_pygame.py | 2 | 17115 | '''
Window Pygame: windowing provider based on Pygame
.. warning::
Pygame has been deprecated and will be removed in the release after Kivy
1.11.0.
'''
__all__ = ('WindowPygame', )
# fail early if possible
import pygame
from kivy.compat import PY2
from kivy.core.window import WindowBase
from kivy.core import CoreCriticalException
from os import environ
from os.path import exists, join
from kivy.config import Config
from kivy import kivy_data_dir
from kivy.base import ExceptionManager
from kivy.logger import Logger
from kivy.base import stopTouchApp, EventLoop
from kivy.utils import platform, deprecated
from kivy.resources import resource_find
try:
android = None
if platform == 'android':
import android
except ImportError:
pass
# late binding
glReadPixels = GL_RGBA = GL_UNSIGNED_BYTE = None
class WindowPygame(WindowBase):
@deprecated(
msg='Pygame has been deprecated and will be removed after 1.11.0')
def __init__(self, *largs, **kwargs):
super(WindowPygame, self).__init__(*largs, **kwargs)
def create_window(self, *largs):
# ensure the mouse is still not up after window creation, otherwise, we
# have some weird bugs
self.dispatch('on_mouse_up', 0, 0, 'all', [])
# force display to show (available only for fullscreen)
displayidx = Config.getint('graphics', 'display')
if 'SDL_VIDEO_FULLSCREEN_HEAD' not in environ and displayidx != -1:
environ['SDL_VIDEO_FULLSCREEN_HEAD'] = '%d' % displayidx
# init some opengl, same as before.
self.flags = pygame.HWSURFACE | pygame.OPENGL | pygame.DOUBLEBUF
# right now, activate resizable window only on linux.
# on window / macosx, the opengl context is lost, and we need to
# reconstruct everything. Check #168 for a state of the work.
if platform in ('linux', 'macosx', 'win') and \
Config.getboolean('graphics', 'resizable'):
self.flags |= pygame.RESIZABLE
try:
pygame.display.init()
except pygame.error as e:
raise CoreCriticalException(e.message)
multisamples = Config.getint('graphics', 'multisamples')
if multisamples > 0:
pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLEBUFFERS, 1)
pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLESAMPLES,
multisamples)
pygame.display.gl_set_attribute(pygame.GL_DEPTH_SIZE, 16)
pygame.display.gl_set_attribute(pygame.GL_STENCIL_SIZE, 1)
pygame.display.set_caption(self.title)
if self.position == 'auto':
self._pos = None
elif self.position == 'custom':
self._pos = self.left, self.top
else:
raise ValueError('position token in configuration accept only '
'"auto" or "custom"')
if self._fake_fullscreen:
if not self.borderless:
self.fullscreen = self._fake_fullscreen = False
elif not self.fullscreen or self.fullscreen == 'auto':
self.borderless = self._fake_fullscreen = False
if self.fullscreen == 'fake':
self.borderless = self._fake_fullscreen = True
Logger.warning("The 'fake' fullscreen option has been "
"deprecated, use Window.borderless or the "
"borderless Config option instead.")
if self.fullscreen == 'fake' or self.borderless:
Logger.debug('WinPygame: Set window to borderless mode.')
self.flags |= pygame.NOFRAME
# If no position set in borderless mode, we always need
# to set the position. So use 0, 0.
if self._pos is None:
self._pos = (0, 0)
environ['SDL_VIDEO_WINDOW_POS'] = '%d,%d' % self._pos
elif self.fullscreen in ('auto', True):
Logger.debug('WinPygame: Set window to fullscreen mode')
self.flags |= pygame.FULLSCREEN
elif self._pos is not None:
environ['SDL_VIDEO_WINDOW_POS'] = '%d,%d' % self._pos
# never stay with a None pos, application using w.center will be fired.
self._pos = (0, 0)
# prepare keyboard
repeat_delay = int(Config.get('kivy', 'keyboard_repeat_delay'))
repeat_rate = float(Config.get('kivy', 'keyboard_repeat_rate'))
pygame.key.set_repeat(repeat_delay, int(1000. / repeat_rate))
# set window icon before calling set_mode
try:
filename_icon = self.icon or Config.get('kivy', 'window_icon')
if filename_icon == '':
logo_size = 32
if platform == 'macosx':
logo_size = 512
elif platform == 'win':
logo_size = 64
filename_icon = 'kivy-icon-{}.png'.format(logo_size)
filename_icon = resource_find(
join(kivy_data_dir, 'logo', filename_icon))
self.set_icon(filename_icon)
except:
Logger.exception('Window: cannot set icon')
# try to use mode with multisamples
try:
self._pygame_set_mode()
except pygame.error as e:
if multisamples:
Logger.warning('WinPygame: Video: failed (multisamples=%d)' %
multisamples)
Logger.warning('WinPygame: trying without antialiasing')
pygame.display.gl_set_attribute(
pygame.GL_MULTISAMPLEBUFFERS, 0)
pygame.display.gl_set_attribute(
pygame.GL_MULTISAMPLESAMPLES, 0)
multisamples = 0
try:
self._pygame_set_mode()
except pygame.error as e:
raise CoreCriticalException(e.message)
else:
raise CoreCriticalException(e.message)
if pygame.RESIZABLE & self.flags:
self._pygame_set_mode()
info = pygame.display.Info()
self._size = (info.current_w, info.current_h)
# self.dispatch('on_resize', *self._size)
# in order to debug futur issue with pygame/display, let's show
# more debug output.
Logger.debug('Window: Display driver ' + pygame.display.get_driver())
Logger.debug('Window: Actual window size: %dx%d',
info.current_w, info.current_h)
if platform != 'android':
# unsupported platform, such as android that doesn't support
# gl_get_attribute.
Logger.debug(
'Window: Actual color bits r%d g%d b%d a%d',
pygame.display.gl_get_attribute(pygame.GL_RED_SIZE),
pygame.display.gl_get_attribute(pygame.GL_GREEN_SIZE),
pygame.display.gl_get_attribute(pygame.GL_BLUE_SIZE),
pygame.display.gl_get_attribute(pygame.GL_ALPHA_SIZE))
Logger.debug(
'Window: Actual depth bits: %d',
pygame.display.gl_get_attribute(pygame.GL_DEPTH_SIZE))
Logger.debug(
'Window: Actual stencil bits: %d',
pygame.display.gl_get_attribute(pygame.GL_STENCIL_SIZE))
Logger.debug(
'Window: Actual multisampling samples: %d',
pygame.display.gl_get_attribute(pygame.GL_MULTISAMPLESAMPLES))
super(WindowPygame, self).create_window()
# set mouse visibility
self._set_cursor_state(self.show_cursor)
# if we are on android platform, automatically create hooks
if android:
from kivy.support import install_android
install_android()
def close(self):
pygame.display.quit()
super(WindowPygame, self).close()
def on_title(self, instance, value):
if self.initialized:
pygame.display.set_caption(self.title)
def set_icon(self, filename):
if not exists(filename):
return False
try:
if platform == 'win':
try:
if self._set_icon_win(filename):
return True
except:
# fallback on standard loading then.
pass
# for all others platform, or if the ico is not available, use the
# default way to set it.
self._set_icon_standard(filename)
super(WindowPygame, self).set_icon(filename)
except:
Logger.exception('WinPygame: unable to set icon')
def _set_icon_standard(self, filename):
if PY2:
try:
im = pygame.image.load(filename)
except UnicodeEncodeError:
im = pygame.image.load(filename.encode('utf8'))
else:
im = pygame.image.load(filename)
if im is None:
raise Exception('Unable to load window icon (not found)')
pygame.display.set_icon(im)
def _set_icon_win(self, filename):
# ensure the window ico is ended by ico
if not filename.endswith('.ico'):
filename = '{}.ico'.format(filename.rsplit('.', 1)[0])
if not exists(filename):
return False
import win32api
import win32gui
import win32con
hwnd = pygame.display.get_wm_info()['window']
icon_big = win32gui.LoadImage(
None, filename, win32con.IMAGE_ICON,
48, 48, win32con.LR_LOADFROMFILE)
icon_small = win32gui.LoadImage(
None, filename, win32con.IMAGE_ICON,
16, 16, win32con.LR_LOADFROMFILE)
win32api.SendMessage(
hwnd, win32con.WM_SETICON, win32con.ICON_SMALL, icon_small)
win32api.SendMessage(
hwnd, win32con.WM_SETICON, win32con.ICON_BIG, icon_big)
return True
def _set_cursor_state(self, value):
pygame.mouse.set_visible(value)
def screenshot(self, *largs, **kwargs):
global glReadPixels, GL_RGBA, GL_UNSIGNED_BYTE
filename = super(WindowPygame, self).screenshot(*largs, **kwargs)
if filename is None:
return None
if glReadPixels is None:
from kivy.graphics.opengl import (glReadPixels, GL_RGBA,
GL_UNSIGNED_BYTE)
width, height = self.system_size
data = glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE)
data = bytes(bytearray(data))
surface = pygame.image.fromstring(data, (width, height), 'RGBA', True)
pygame.image.save(surface, filename)
Logger.debug('Window: Screenshot saved at <%s>' % filename)
return filename
def flip(self):
pygame.display.flip()
super(WindowPygame, self).flip()
@deprecated
def toggle_fullscreen(self):
if self.flags & pygame.FULLSCREEN:
self.flags &= ~pygame.FULLSCREEN
else:
self.flags |= pygame.FULLSCREEN
self._pygame_set_mode()
def mainloop(self):
for event in pygame.event.get():
# kill application (SIG_TERM)
if event.type == pygame.QUIT:
if self.dispatch('on_request_close'):
continue
EventLoop.quit = True
self.close()
# mouse move
elif event.type == pygame.MOUSEMOTION:
x, y = event.pos
self.mouse_pos = x, self.system_size[1] - y
# don't dispatch motion if no button are pressed
if event.buttons == (0, 0, 0):
continue
self._mouse_x = x
self._mouse_y = y
self._mouse_meta = self.modifiers
self.dispatch('on_mouse_move', x, y, self.modifiers)
# mouse action
elif event.type in (pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP):
self._pygame_update_modifiers()
x, y = event.pos
btn = 'left'
if event.button == 3:
btn = 'right'
elif event.button == 2:
btn = 'middle'
elif event.button == 4:
btn = 'scrolldown'
elif event.button == 5:
btn = 'scrollup'
elif event.button == 6:
btn = 'scrollright'
elif event.button == 7:
btn = 'scrollleft'
eventname = 'on_mouse_down'
if event.type == pygame.MOUSEBUTTONUP:
eventname = 'on_mouse_up'
self._mouse_x = x
self._mouse_y = y
self._mouse_meta = self.modifiers
self._mouse_btn = btn
self._mouse_down = eventname == 'on_mouse_down'
self.dispatch(eventname, x, y, btn, self.modifiers)
# joystick action
elif event.type == pygame.JOYAXISMOTION:
self.dispatch('on_joy_axis', event.joy, event.axis,
event.value)
elif event.type == pygame.JOYHATMOTION:
self.dispatch('on_joy_hat', event.joy, event.hat, event.value)
elif event.type == pygame.JOYBALLMOTION:
self.dispatch('on_joy_ball', event.joy, event.ballid,
event.rel[0], event.rel[1])
elif event.type == pygame.JOYBUTTONDOWN:
self.dispatch('on_joy_button_down', event.joy, event.button)
elif event.type == pygame.JOYBUTTONUP:
self.dispatch('on_joy_button_up', event.joy, event.button)
# keyboard action
elif event.type in (pygame.KEYDOWN, pygame.KEYUP):
self._pygame_update_modifiers(event.mod)
# atm, don't handle keyup
if event.type == pygame.KEYUP:
self.dispatch('on_key_up', event.key,
event.scancode)
continue
# don't dispatch more key if down event is accepted
if self.dispatch('on_key_down', event.key,
event.scancode, event.unicode,
self.modifiers):
continue
self.dispatch('on_keyboard', event.key,
event.scancode, event.unicode,
self.modifiers)
# video resize
elif event.type == pygame.VIDEORESIZE:
self._size = event.size
self.update_viewport()
elif event.type == pygame.VIDEOEXPOSE:
self.canvas.ask_update()
# ignored event
elif event.type == pygame.ACTIVEEVENT:
pass
# drop file (pygame patch needed)
elif event.type == pygame.USEREVENT and \
hasattr(pygame, 'USEREVENT_DROPFILE') and \
event.code == pygame.USEREVENT_DROPFILE:
self.dispatch('on_dropfile', event.filename)
'''
# unhandled event !
else:
Logger.debug('WinPygame: Unhandled event %s' % str(event))
'''
if not pygame.display.get_active():
pygame.time.wait(100)
#
# Pygame wrapper
#
def _pygame_set_mode(self, size=None):
if size is None:
size = self.size
if self.fullscreen == 'auto':
pygame.display.set_mode((0, 0), self.flags)
else:
pygame.display.set_mode(size, self.flags)
def _pygame_update_modifiers(self, mods=None):
# Available mod, from dir(pygame)
# 'KMOD_ALT', 'KMOD_CAPS', 'KMOD_CTRL', 'KMOD_LALT',
# 'KMOD_LCTRL', 'KMOD_LMETA', 'KMOD_LSHIFT', 'KMOD_META',
# 'KMOD_MODE', 'KMOD_NONE'
if mods is None:
mods = pygame.key.get_mods()
self._modifiers = []
if mods & (pygame.KMOD_SHIFT | pygame.KMOD_LSHIFT):
self._modifiers.append('shift')
if mods & (pygame.KMOD_ALT | pygame.KMOD_LALT):
self._modifiers.append('alt')
if mods & (pygame.KMOD_CTRL | pygame.KMOD_LCTRL):
self._modifiers.append('ctrl')
if mods & (pygame.KMOD_META | pygame.KMOD_LMETA):
self._modifiers.append('meta')
def request_keyboard(
self, callback, target, input_type='text', keyboard_suggestions=True
):
keyboard = super(WindowPygame, self).request_keyboard(
callback, target, input_type, keyboard_suggestions)
if android and not self.allow_vkeyboard:
android.show_keyboard(target, input_type)
return keyboard
def release_keyboard(self, *largs):
super(WindowPygame, self).release_keyboard(*largs)
if android:
android.hide_keyboard()
return True
| mit |
vanceeasleaf/aces | aces/algorithm/cs.py | 1 | 13800 | import numpy as np
import aces.tools as tl
import h5py
from numpy.linalg import norm
import os
from ase import io
from scipy import optimize
from aces.f import read_forces, writefc2, writefc3, disp2atoms
def shrink(y, a):
return np.sign(y) * np.maximum(np.abs(y) - a, 0.0)
def dot(a, b):
return np.tensordot(a, b, axes=([1], [0]))
def maxeig(A):
B = np.zeros_like(A)
for j in A.shape[1]:
B[:, j] = A[:, j] / A.sum(axis=0)[j]
C = B.sum(axis=1)
W = C / C.sum()
lmax = (A.dot(W) / W).average()
return lmax
class runner:
def __init__(self, NAH=3, split=True, mu=0.1, lam=0.9):
self.NAH = NAH
self.split = split
self.mu = mu
self.lam = lam
# self.db=h5py.File('force.hdf5')
def getForce(self, pos, files):
print("reading vasprun.xml and POSCAR")
u = []
for file in files:
dir = os.path.dirname(file)
atoms = io.read(dir + '/POSCAR')
u.append(atoms.positions - pos)
forces = []
for file in files:
forces.append(read_forces(file))
return np.array(forces), np.array(u)
def getsatoms(self):
filename = 'disp_fc3.yaml'
if (tl.exists(filename)):
return disp2atoms(filename)
filename = 'disp.yaml'
if (tl.exists(filename)):
return disp2atoms(filename)
filename = '3RD.SPOSCAR'
if (tl.exists(filename)):
from ase import io
return io.read(filename, format='vasp')
filename = 'SPOSCAR'
if (tl.exists(filename)):
from ase import io
return io.read(filename, format='vasp')
def getSupercell(self, atoms):
from pyspglib import spglib
s = spglib.get_symmetry(atoms)
symmetry = []
print("building symmetry")
for i, rot in enumerate(s['rotations'][:100]):
print("symetry :", i)
trans = s['translations'][i]
map0 = self.getMap(atoms, rot, trans)
symmetry.append([rot, map0])
return symmetry
def getMap(self, atoms, rot, trans):
v = atoms.copy()
v.positions = v.positions.dot(rot.T)
v.translate(trans.dot(v.cell))
import itertools
from scipy.spatial.distance import cdist
posi = atoms.positions
d2s = np.empty((27, len(v), len(v)))
for j, (ja, jb, jc) in enumerate(
itertools.product(range(-1, 2), range(-1, 2), range(-1, 2))):
posj = v.positions + np.dot([ja, jb, jc], v.cell)
d2s[j, :, :] = cdist(posi, posj, "sqeuclidean")
d2min = d2s.min(axis=0)
map0 = np.argmin(d2min, axis=1)
return map0
def getTrainSets(self, u):
assert len(u) > 0
self.L = len(u)
n = self.natom = len(u[0])
row = 0
rowr = [0]
for i in range(self.NAH):
row += (n * 3)**i
rowr.append(row)
self.rowr = rowr
def getMatrix(self, F, u):
print("getting compressive matrix")
rowr = self.rowr
A = np.zeros([self.L, rowr[-1]])
g = self.mulU
# shape=F.shape
n = self.natom
for j in range(self.L):
for i in range(self.NAH):
r = range(rowr[i], rowr[i + 1])
A[j, r] = -g(u[j].flatten(), i)
F = F.reshape([self.L, 3 * n])
c = 3 * n
F = F.T.flatten().T
A = np.kron(np.eye(c), A)
return F, A
def gauss(a):
m, n = a.shape
b = np.zeros(n)
for i in range(0, n - 1):
for j in range(i + 1, n):
imax = np.abs(a[i:n, i]).maxarg()
if imax != i:
a[i], a[imax] = a[imax], a[i]
if a[j, i] != 0.0 and a[i, i] != 0.0:
lam = float(a[j, i]) / a[i, i]
a[j] = a[j] - lam * a[i]
for k in range(n - 1, -1, -1):
b[k] = (b[k] - np.dot(a[k, (k + 1):], b[(k + 1):])) / a[k, k]
result = b
return result
def mulU(self, x, p):
if p > 0:
return np.kron(self.mulU(x, p - 1), x) / p
else:
return 1.0
def getCsMat(self, F, u, symmetry):
self.getTrainSets(u)
# keep to be the constrain of the newest variables
Q = []
n = u.shape[1]
v = self.rowr[-1]
p = 3 * n
step = 0
nval = p * v
# the connection between oldest variables and newest variables
E = None
for rot, map0 in symmetry:
step += 1
print("step:", step)
for i in range(n):
print("atom:", i)
for j in range(n):
ii = map0[i]
jj = map0[j]
for a in range(3):
for b in range(3):
t = np.zeros(nval)
for r in range(3):
id = (ii * 3 + a) * v + 1 + jj * 3 + r
id1 = (i * 3 + a) * v + 1 + j * 3 + b
if E is None:
t[id] += rot[r, b]
t[id1] -= rot[a, r]
else:
t[id] += E[id] * rot[r, b]
t[id1] -= E[id1] * rot[a, r]
# phi[ii,jj].dot(rot)=rot.dot(phi[i,j])
Q.append(t)
if (len(Q) == 50):
e, c = np.linalg.eig(Q)
if E is None:
E = c
else:
E = E.dot(c)
nval = E.shape[1]
print("nval:", nval)
Q = []
self.R = E
v = norm(u, axis=2)
u0 = v.flatten().max()
F, A = self.getMatrix(F, u / u0)
return F, A.dot(self.R)
def run(self):
atoms = self.getsatoms()
symmetry = self.getSupercell(atoms)
files = tl.shell_exec(
'find dirs/dir_* -name vasprun.xml |sort -n').split('\n')
if len(files) > 100:
files = files[:100]
pos = atoms.positions
f, u = self.getForce(pos, files)
F, A = self.getCsMat(f, u, symmetry)
print("start compressive sensing ")
B = cs(mu=self.mu, split=self.split, lam=self.lam).run(F, A)
print("rebuilding IFCs ")
phi = self.rebuild(B)
print("writing IFCs ")
v = norm(u, axis=2)
u0 = v.flatten().max()
fc2 = np.einsum(phi[1], [1, 0, 3, 2]) / u0
writefc2(fc2, 'csfc2')
if self.NAH >= 3:
a = h5py.File('fc3p.hdf5')
if 'fc3' in a:
del a['fc3']
a['fc3'] = phi[2] / u0 / u0
a.close()
self.fc3()
def fc3(self):
print("writing csfc3 ")
a = h5py.File('fc3p.hdf5')
fc3 = np.einsum(a['fc3'], [0, 2, 1, 3, 5, 4])
from ase import io
atoms = io.read('POSCAR')
satoms = self.getsatoms()
writefc3(fc3, atoms, satoms, 'csfc3')
def rebuild(self, B):
n = self.natom
rowr = self.rowr
B = self.R.dot(B).T.reshape([-1, 3 * n]).T
phi = []
for i in range(self.NAH):
r = range(rowr[i], rowr[i + 1])
x = B[r].reshape([n, 3] * (i + 1))
idx = np.array([0, i + 1])
rdx = []
for j in range(i):
rdx.extend(idx + (j + 1))
rdx.extend(idx)
x = np.einsum(x, rdx)
phi.append(x)
return phi
class cssklearn:
def __init__(self):
pass
def initu(self, f, A):
dim = list(f.shape)
dim[0] = A.shape[1]
# so dim is the shape of u
return np.ones(dim)
def run(self, f, A):
# from sklearn import cross_validation
from sklearn import linear_model
reg = linear_model.Lasso(
alpha=1e-15, fit_intercept=False, max_iter=10000, tol=1e-5)
print(reg.fit([[0, 0, 2], [1, 1, 2]], [[0, 1], [1, 1]]).coef_.T)
print(A.shape, f.shape)
return reg.fit(A, f).coef_.T
# k_fold = cross_validation.KFold(n=len(f), n_folds=10)
# [svc.fit(X_digits[train], y_digits[train])\
# .score(X_digits[test], y_digits[test]) for train, test in kfold]
class csfortran:
def __init__(self):
pass
def initu(self, f, A):
dim = list(f.shape)
dim[0] = A.shape[1]
# so dim is the shape of u
return np.ones(dim)
def run(self, f, A):
u = self.initu(f, A)
import sys
sys.path.append("/home/xggong/home1/zhouy/soft/bcs-master/wrap")
import bcs as p
ebars = np.zeros(len(A[0]))
sigma2 = np.std(f) / 100.
p.bcs.do_wrapped(A, f, sigma2, 1e-8, u, ebars)
return u
class cs:
def __init__(self, mu=0.7, split=True, lam=0.9):
self.mu, self.lam = mu, lam
self.split = split
def initu(self, f, A):
dim = list(f.shape)
dim[0] = A.shape[1]
# so dim is the shape of u
return np.ones(dim)
def testcs(self):
f = (np.ones(1) * 20.0).reshape(1, 1)
A = np.array([7.0, 10.0]).reshape(1, 2)
print(self.run(f, A))
def test2(self):
f = np.array([7.0, 8.0])
A = np.array([[1.0, 0], [1.0, 0]])
print(self.run(f, A))
def test3(self):
f = np.array([7.0, 8.0])
A = np.array([[1.0, 0, 0], [1.0, 0, 0]])
print(self.run(f, A))
def test4(self):
f = np.array([7.0, 8.0]).reshape(1, 2)
A = np.array([[1.0, 0]])
print(self.run(f, A))
def run(self, f, A):
# normalize
print("normalizing sensing matrix")
# from scipy.sparse.linalg import eigsh
"""
aA=eigsh(A.T.dot(A),k=6)[0].max()#the largest eigenvalue
f/=np.sqrt(aA)
# print norm(f)
A/=np.sqrt(aA)
# print norm(A)
"""
aA = np.double(A.shape[0]**A.max().max())
# maxeig(A.T.dot(A))
f /= np.sqrt(aA)
A /= np.sqrt(aA)
"""
v=np.eye(len(A.T))-A.T.dot(A)
for i in range(20):
v=v.dot(v)
print norm(v)
"""
if self.split:
return self.split_bregman(f, A)
else:
return self.bregman(f, A)
def split_bregman(self, f, A):
def cc(u1):
print("CG error:", norm(u1 - self.bb.flatten()) / norm(self.bb))
tt1 = g(u1, A, f, lam, d, mu, b)
print("CG target:", (tt1 - self.tt) / self.tt)
self.tt = tt1
self.bb = u1
def g(u, *args):
A, f, lam, d, mu, b = args
u = u.reshape(shape)
return 1.0 / 2 * norm(np.dot(A, u) - f)**2 + \
lam / 2.0 * norm(d - mu * u - b)**2
def dg(u, *args):
A, f, lam, d, mu, b = args
u = u.reshape(shape)
return (A.T.dot(A.dot(u) - f) - lam * mu *
(d - b - mu * u)).flatten()
u = self.initu(f, A)
shape = u.shape
d = np.zeros_like(u)
b = np.zeros_like(u)
deta = 0.001
erru = 1.0
lam = self.lam
t = 1.0
tt = 1.0
self.tt = tt
mu = self.mu
scale = 1.0 / np.amax(np.abs(f)) * 1000.0
print("scale=" + str(scale))
f0 = np.zeros_like(f)
self.bb = np.zeros_like(u)
# f*=scale
print('dimmensions:', A.shape, u.shape)
while erru > deta:
# g=lambda u:1.0/2*norm(dot(A,u.reshape(shape))-f)**2\
# +lam/2.0*norm(d-mu*u.reshape(shape)-b)**2
f1 = (f * scale - dot(A, u)) + (f0 + dot(A, u)) / 2
u1 = optimize.fmin_cg(
g,
u,
args=(A, f1, lam, d, mu, b),
disp=True,
fprime=dg,
callback=cc,
gtol=deta * 10).reshape(shape)
d1 = shrink(mu * u1 + b, 1.0 / lam)
b1 = b + mu * u1 - d1
erru = norm(u1 - u) / norm(u)
print('split bregman iteration error:', erru)
b = b1
u = u1
d = d1
f0 = f1
t1 = norm(d, 1) + tt
print('change of target func:', (t1 - t) / t)
t = t1
return u / scale
def bregman(self, f, A):
u = self.initu(f, A)
f0 = np.zeros_like(f)
deta = 0.0001
erru = 1
scale = 1000.0
while erru > deta:
f1 = f * scale + f0 - dot(A, u)
u1 = self.FCP(f1, A, u)
erru = norm(u1 - u) / norm(u)
print('bregman iteration error:', erru)
u = u1
f0 = f1
return u / scale
def FCP(self, f, A, u=None):
if u is None:
u = self.initu(f, A)
m, n = A.shape
if len(f.shape) > 1:
n *= list(f.shape)[1]
ta = 1.99999 # min(1.999,max(1.1,-1.665*np.float(m)/n+2.665))
mu = self.mu
deta = 0.01
# errg=1
erru = 1
while erru > deta: # or errg>deta:
p = np.dot(A, u) - f
g = np.dot(A.T, p)
u1 = shrink(u - ta * g, mu * ta)
# errg=1.0/mu*norm(g,np.inf)-1
erru = norm(u1 - u) / norm(u)
print('FCP iteration :', erru)
u = u1
return u
| gpl-2.0 |
dudepare/django | tests/one_to_one/tests.py | 90 | 19540 | from __future__ import unicode_literals
from django.db import IntegrityError, connection, transaction
from django.test import TestCase
from .models import (
Bar, Director, Favorites, HiddenPointer, ManualPrimaryKey, MultiModel,
Place, Pointer, RelatedModel, Restaurant, School, Target, UndergroundBar,
Waiter,
)
class OneToOneTests(TestCase):
def setUp(self):
self.p1 = Place.objects.create(name='Demon Dogs', address='944 W. Fullerton')
self.p2 = Place.objects.create(name='Ace Hardware', address='1013 N. Ashland')
self.r1 = Restaurant.objects.create(place=self.p1, serves_hot_dogs=True, serves_pizza=False)
self.b1 = Bar.objects.create(place=self.p1, serves_cocktails=False)
def test_getter(self):
# A Restaurant can access its place.
self.assertEqual(repr(self.r1.place), '<Place: Demon Dogs the place>')
# A Place can access its restaurant, if available.
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
# p2 doesn't have an associated restaurant.
with self.assertRaisesMessage(Restaurant.DoesNotExist, 'Place has no restaurant'):
self.p2.restaurant
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(self.p2, 'restaurant'))
def test_setter(self):
# Set the place using assignment notation. Because place is the primary
# key on Restaurant, the save will create a new restaurant
self.r1.place = self.p2
self.r1.save()
self.assertEqual(repr(self.p2.restaurant), '<Restaurant: Ace Hardware the restaurant>')
self.assertEqual(repr(self.r1.place), '<Place: Ace Hardware the place>')
self.assertEqual(self.p2.pk, self.r1.pk)
# Set the place back again, using assignment in the reverse direction.
self.p1.restaurant = self.r1
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
r = Restaurant.objects.get(pk=self.p1.id)
self.assertEqual(repr(r.place), '<Place: Demon Dogs the place>')
def test_manager_all(self):
# Restaurant.objects.all() just returns the Restaurants, not the Places.
self.assertQuerysetEqual(Restaurant.objects.all(), [
'<Restaurant: Demon Dogs the restaurant>',
])
# Place.objects.all() returns all Places, regardless of whether they
# have Restaurants.
self.assertQuerysetEqual(Place.objects.order_by('name'), [
'<Place: Ace Hardware the place>',
'<Place: Demon Dogs the place>',
])
def test_manager_get(self):
def assert_get_restaurant(**params):
self.assertEqual(repr(Restaurant.objects.get(**params)),
'<Restaurant: Demon Dogs the restaurant>')
assert_get_restaurant(place__id__exact=self.p1.pk)
assert_get_restaurant(place__id=self.p1.pk)
assert_get_restaurant(place__exact=self.p1.pk)
assert_get_restaurant(place__exact=self.p1)
assert_get_restaurant(place=self.p1.pk)
assert_get_restaurant(place=self.p1)
assert_get_restaurant(pk=self.p1.pk)
assert_get_restaurant(place__pk__exact=self.p1.pk)
assert_get_restaurant(place__pk=self.p1.pk)
assert_get_restaurant(place__name__startswith="Demon")
def assert_get_place(**params):
self.assertEqual(repr(Place.objects.get(**params)),
'<Place: Demon Dogs the place>')
assert_get_place(restaurant__place__exact=self.p1.pk)
assert_get_place(restaurant__place__exact=self.p1)
assert_get_place(restaurant__place__pk=self.p1.pk)
assert_get_place(restaurant__exact=self.p1.pk)
assert_get_place(restaurant__exact=self.r1)
assert_get_place(restaurant__pk=self.p1.pk)
assert_get_place(restaurant=self.p1.pk)
assert_get_place(restaurant=self.r1)
assert_get_place(id__exact=self.p1.pk)
assert_get_place(pk=self.p1.pk)
def test_foreign_key(self):
# Add a Waiter to the Restaurant.
w = self.r1.waiter_set.create(name='Joe')
self.assertEqual(repr(w), '<Waiter: Joe the waiter at Demon Dogs the restaurant>')
# Query the waiters
def assert_filter_waiters(**params):
self.assertQuerysetEqual(Waiter.objects.filter(**params), [
'<Waiter: Joe the waiter at Demon Dogs the restaurant>'
])
assert_filter_waiters(restaurant__place__exact=self.p1.pk)
assert_filter_waiters(restaurant__place__exact=self.p1)
assert_filter_waiters(restaurant__place__pk=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.r1.pk)
assert_filter_waiters(restaurant__exact=self.r1)
assert_filter_waiters(restaurant__pk=self.r1.pk)
assert_filter_waiters(restaurant=self.r1.pk)
assert_filter_waiters(restaurant=self.r1)
assert_filter_waiters(id__exact=w.pk)
assert_filter_waiters(pk=w.pk)
# Delete the restaurant; the waiter should also be removed
r = Restaurant.objects.get(pk=self.r1.pk)
r.delete()
self.assertEqual(Waiter.objects.count(), 0)
def test_multiple_o2o(self):
# One-to-one fields still work if you create your own primary key
o1 = ManualPrimaryKey(primary_key="abc123", name="primary")
o1.save()
o2 = RelatedModel(link=o1, name="secondary")
o2.save()
# You can have multiple one-to-one fields on a model, too.
x1 = MultiModel(link1=self.p1, link2=o1, name="x1")
x1.save()
self.assertEqual(repr(o1.multimodel), '<MultiModel: Multimodel x1>')
# This will fail because each one-to-one field must be unique (and
# link2=o1 was used for x1, above).
mm = MultiModel(link1=self.p2, link2=o1, name="x1")
with self.assertRaises(IntegrityError):
with transaction.atomic():
mm.save()
def test_unsaved_object(self):
"""
#10811 -- Assigning an unsaved object to a OneToOneField
should raise an exception.
"""
place = Place(name='User', address='London')
msg = "save() prohibited to prevent data loss due to unsaved related object 'place'."
with self.assertRaisesMessage(ValueError, msg):
Restaurant.objects.create(place=place, serves_hot_dogs=True, serves_pizza=False)
def test_reverse_relationship_cache_cascade(self):
"""
Regression test for #9023: accessing the reverse relationship shouldn't
result in a cascading delete().
"""
bar = UndergroundBar.objects.create(place=self.p1, serves_cocktails=False)
# The bug in #9023: if you access the one-to-one relation *before*
# setting to None and deleting, the cascade happens anyway.
self.p1.undergroundbar
bar.place.name = 'foo'
bar.place = None
bar.save()
self.p1.delete()
self.assertEqual(Place.objects.all().count(), 1)
self.assertEqual(UndergroundBar.objects.all().count(), 1)
def test_create_models_m2m(self):
"""
Regression test for #1064 and #1506
Check that we create models via the m2m relation if the remote model
has a OneToOneField.
"""
f = Favorites(name='Fred')
f.save()
f.restaurants = [self.r1]
self.assertQuerysetEqual(
f.restaurants.all(),
['<Restaurant: Demon Dogs the restaurant>']
)
def test_reverse_object_cache(self):
"""
Regression test for #7173
Check that the name of the cache for the reverse object is correct.
"""
self.assertEqual(self.p1.restaurant, self.r1)
self.assertEqual(self.p1.bar, self.b1)
def test_related_object_cache(self):
""" Regression test for #6886 (the related-object cache) """
# Look up the objects again so that we get "fresh" objects
p = Place.objects.get(name="Demon Dogs")
r = p.restaurant
# Accessing the related object again returns the exactly same object
self.assertIs(p.restaurant, r)
# But if we kill the cache, we get a new object
del p._restaurant_cache
self.assertIsNot(p.restaurant, r)
# Reassigning the Restaurant object results in an immediate cache update
# We can't use a new Restaurant because that'll violate one-to-one, but
# with a new *instance* the is test below will fail if #6886 regresses.
r2 = Restaurant.objects.get(pk=r.pk)
p.restaurant = r2
self.assertIs(p.restaurant, r2)
# Assigning None succeeds if field is null=True.
ug_bar = UndergroundBar.objects.create(place=p, serves_cocktails=False)
ug_bar.place = None
self.assertIsNone(ug_bar.place)
# Assigning None fails: Place.restaurant is null=False
self.assertRaises(ValueError, setattr, p, 'restaurant', None)
# You also can't assign an object of the wrong type here
self.assertRaises(ValueError, setattr, p, 'restaurant', p)
# Creation using keyword argument should cache the related object.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place=p)
self.assertIs(r.place, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Place()
r = Restaurant(place=p)
self.assertTrue(r.place is p)
# Creation using attname keyword argument and an id will cause the related
# object to be fetched.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place_id=p.id)
self.assertIsNot(r.place, p)
self.assertEqual(r.place, p)
def test_filter_one_to_one_relations(self):
"""
Regression test for #9968
filtering reverse one-to-one relations with primary_key=True was
misbehaving. We test both (primary_key=True & False) cases here to
prevent any reappearance of the problem.
"""
Target.objects.create()
self.assertQuerysetEqual(
Target.objects.filter(pointer=None),
['<Target: Target object>']
)
self.assertQuerysetEqual(
Target.objects.exclude(pointer=None),
[]
)
self.assertQuerysetEqual(
Target.objects.filter(second_pointer=None),
['<Target: Target object>']
)
self.assertQuerysetEqual(
Target.objects.exclude(second_pointer=None),
[]
)
def test_o2o_primary_key_delete(self):
t = Target.objects.create(name='name')
Pointer.objects.create(other=t)
num_deleted, objs = Pointer.objects.filter(other__name='name').delete()
self.assertEqual(num_deleted, 1)
self.assertEqual(objs, {'one_to_one.Pointer': 1})
def test_reverse_object_does_not_exist_cache(self):
"""
Regression for #13839 and #17439.
DoesNotExist on a reverse one-to-one relation is cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
with self.assertNumQueries(1):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
with self.assertNumQueries(0):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
def test_reverse_object_cached_when_related_is_accessed(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is cached
when the origin is accessed through the reverse relation.
"""
# Use a fresh object without caches
r = Restaurant.objects.get(pk=self.r1.pk)
p = r.place
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, r)
def test_related_object_cached_when_reverse_is_accessed(self):
"""
Regression for #13839 and #17439.
The origin of a one-to-one relation is cached
when the target is accessed through the reverse relation.
"""
# Use a fresh object without caches
p = Place.objects.get(pk=self.p1.pk)
r = p.restaurant
with self.assertNumQueries(0):
self.assertEqual(r.place, p)
def test_reverse_object_cached_when_related_is_set(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
self.r1.place = p
self.r1.save()
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, self.r1)
def test_reverse_object_cached_when_related_is_unset(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
b = UndergroundBar(place=self.p1, serves_cocktails=True)
b.save()
with self.assertNumQueries(0):
self.assertEqual(self.p1.undergroundbar, b)
b.place = None
b.save()
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
self.p1.undergroundbar
def test_get_reverse_on_unsaved_object(self):
"""
Regression for #18153 and #19089.
Accessing the reverse relation on an unsaved object
always raises an exception.
"""
p = Place()
# When there's no instance of the origin of the one-to-one
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
UndergroundBar.objects.create()
# When there's one instance of the origin
# (p.undergroundbar used to return that instance)
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
# Several instances of the origin are only possible if database allows
# inserting multiple NULL rows for a unique constraint
if connection.features.supports_nullable_unique_constraints:
UndergroundBar.objects.create()
# When there are several instances of the origin
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
def test_set_reverse_on_unsaved_object(self):
"""
Writing to the reverse relation on an unsaved object
is impossible too.
"""
p = Place()
b = UndergroundBar.objects.create()
msg = (
'Cannot assign "<UndergroundBar: UndergroundBar object>": "Place" '
'instance isn\'t saved in the database.'
)
with self.assertNumQueries(0):
with self.assertRaisesMessage(ValueError, msg):
p.undergroundbar = b
def test_nullable_o2o_delete(self):
u = UndergroundBar.objects.create(place=self.p1)
u.place_id = None
u.save()
self.p1.delete()
self.assertTrue(UndergroundBar.objects.filter(pk=u.pk).exists())
self.assertIsNone(UndergroundBar.objects.get(pk=u.pk).place)
def test_hidden_accessor(self):
"""
When a '+' ending related name is specified no reverse accessor should
be added to the related model.
"""
self.assertFalse(
hasattr(Target, HiddenPointer._meta.get_field('target').remote_field.get_accessor_name())
)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_director = Director.objects.create(school=public_school, is_temp=False)
private_school = School.objects.create(is_public=False)
private_director = Director.objects.create(school=private_school, is_temp=True)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(
School.objects.all(),
["<School: School object>"]
)
# Only one director is available via all() due to the custom default manager.
self.assertQuerysetEqual(
Director.objects.all(),
["<Director: Director object>"]
)
self.assertEqual(public_director.school, public_school)
self.assertEqual(public_school.director, public_director)
# Make sure the base manager is used so that the related objects
# is still accessible even if the default manager doesn't normally
# allow it.
self.assertEqual(private_director.school, private_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_school.director, private_director)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
School.objects.use_for_related_fields = True
try:
private_director = Director._base_manager.get(pk=private_director.pk)
self.assertRaises(School.DoesNotExist, lambda: private_director.school)
finally:
School.objects.use_for_related_fields = False
Director.objects.use_for_related_fields = True
try:
private_school = School._base_manager.get(pk=private_school.pk)
self.assertRaises(Director.DoesNotExist, lambda: private_school.director)
finally:
Director.objects.use_for_related_fields = False
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Director(), 'director'))
self.assertFalse(hasattr(School(), 'school'))
def test_update_one_to_one_pk(self):
p1 = Place.objects.create()
p2 = Place.objects.create()
r1 = Restaurant.objects.create(place=p1)
r2 = Restaurant.objects.create(place=p2)
w = Waiter.objects.create(restaurant=r1)
Waiter.objects.update(restaurant=r2)
w.refresh_from_db()
self.assertEqual(w.restaurant, r2)
def test_rel_pk_subquery(self):
r = Restaurant.objects.first()
q1 = Restaurant.objects.filter(place_id=r.pk)
# Test that subquery using primary key and a query against the
# same model works correctly.
q2 = Restaurant.objects.filter(place_id__in=q1)
self.assertQuerysetEqual(q2, [r], lambda x: x)
# Test that subquery using 'pk__in' instead of 'place_id__in' work, too.
q2 = Restaurant.objects.filter(
pk__in=Restaurant.objects.filter(place__id=r.place.pk)
)
self.assertQuerysetEqual(q2, [r], lambda x: x)
def test_rel_pk_exact(self):
r = Restaurant.objects.first()
r2 = Restaurant.objects.filter(pk__exact=r).first()
self.assertEqual(r, r2)
| bsd-3-clause |
angelapper/edx-platform | common/test/acceptance/tests/test_cohorted_courseware.py | 17 | 12136 | """
End-to-end test for cohorted courseware. This uses both Studio and LMS.
"""
from bok_choy.page_object import XSS_INJECTION
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.utils import add_enrollment_course_modes, enroll_user_track
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.instructor_dashboard import InstructorDashboardPage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.xblock_editor import XBlockVisibilityEditorView
from common.test.acceptance.tests.discussion.helpers import CohortTestMixin
from common.test.acceptance.tests.lms.test_lms_user_preview import verify_expected_problem_visibility
from studio.base_studio_test import ContainerBase
AUDIT_TRACK = "Audit"
VERIFIED_TRACK = "Verified"
@attr(shard=5)
class EndToEndCohortedCoursewareTest(ContainerBase, CohortTestMixin):
"""
End-to-end of cohorted courseware.
"""
def setUp(self, is_staff=True):
super(EndToEndCohortedCoursewareTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.content_group_a = "Content Group A" + XSS_INJECTION
self.content_group_b = "Content Group B" + XSS_INJECTION
# Creates the Course modes needed to test enrollment tracks
add_enrollment_course_modes(self.browser, self.course_id, ["audit", "verified"])
# Create a student who will be in "Cohort A"
self.cohort_a_student_username = "cohort_a_student"
self.cohort_a_student_email = "cohort_a_student@example.com"
AutoAuthPage(
self.browser, username=self.cohort_a_student_username, email=self.cohort_a_student_email, no_login=True
).visit()
# Create a student who will be in "Cohort B"
self.cohort_b_student_username = "cohort_b_student"
self.cohort_b_student_email = "cohort_b_student@example.com"
AutoAuthPage(
self.browser, username=self.cohort_b_student_username, email=self.cohort_b_student_email, no_login=True
).visit()
# Create a Verified Student
self.cohort_verified_student_username = "cohort_verified_student"
self.cohort_verified_student_email = "cohort_verified_student@example.com"
AutoAuthPage(
self.browser,
username=self.cohort_verified_student_username,
email=self.cohort_verified_student_email,
no_login=True
).visit()
# Create audit student
self.cohort_audit_student_username = "cohort_audit_student"
self.cohort_audit_student_email = "cohort_audit_student@example.com"
AutoAuthPage(
self.browser,
username=self.cohort_audit_student_username,
email=self.cohort_audit_student_email,
no_login=True
).visit()
# Create a student who will end up in the default cohort group
self.cohort_default_student_username = "cohort_default_student"
self.cohort_default_student_email = "cohort_default_student@example.com"
AutoAuthPage(
self.browser, username=self.cohort_default_student_username,
email=self.cohort_default_student_email, no_login=True
).visit()
# Start logged in as the staff user.
AutoAuthPage(
self.browser, username=self.staff_user["username"], email=self.staff_user["email"]
).visit()
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.group_a_problem = 'GROUP A CONTENT'
self.group_b_problem = 'GROUP B CONTENT'
self.group_verified_problem = 'GROUP VERIFIED CONTENT'
self.group_audit_problem = 'GROUP AUDIT CONTENT'
self.group_a_and_b_problem = 'GROUP A AND B CONTENT'
self.visible_to_all_problem = 'VISIBLE TO ALL CONTENT'
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('problem', self.group_a_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_verified_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_audit_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_a_and_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.visible_to_all_problem, data='<problem></problem>')
)
)
)
)
def create_content_groups(self):
"""
Creates two content groups in Studio Group Configurations Settings.
"""
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
group_configurations_page.create_first_content_group()
config = group_configurations_page.content_groups[0]
config.name = self.content_group_a
config.save()
group_configurations_page.add_content_group()
config = group_configurations_page.content_groups[1]
config.name = self.content_group_b
config.save()
def link_problems_to_content_groups_and_publish(self):
"""
Updates 5 of the 6 existing problems to limit their visibility by content group.
Publishes the modified units.
"""
container_page = self.go_to_unit_page()
enrollment_group = 'enrollment_track_group'
def set_visibility(problem_index, groups, group_partition='content_group'):
problem = container_page.xblocks[problem_index]
problem.edit_visibility()
visibility_dialog = XBlockVisibilityEditorView(self.browser, problem.locator)
partition_name = (visibility_dialog.ENROLLMENT_TRACK_PARTITION
if group_partition == enrollment_group
else visibility_dialog.CONTENT_GROUP_PARTITION)
visibility_dialog.select_groups_in_partition_scheme(partition_name, groups)
set_visibility(1, [self.content_group_a])
set_visibility(2, [self.content_group_b])
set_visibility(3, [VERIFIED_TRACK], enrollment_group)
set_visibility(4, [AUDIT_TRACK], enrollment_group)
set_visibility(5, [self.content_group_a, self.content_group_b])
container_page.publish_action.click()
def create_cohorts_and_assign_students(self):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
def add_cohort_with_student(cohort_name, content_group, student):
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort A", self.content_group_a, self.cohort_a_student_username)
add_cohort_with_student("Cohort B", self.content_group_b, self.cohort_b_student_username)
def view_cohorted_content_as_different_users(self):
"""
View content as staff, student in Cohort A, student in Cohort B, Verified Student, Audit student,
and student in Default Cohort.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
def login_and_verify_visible_problems(username, email, expected_problems, track=None):
AutoAuthPage(
self.browser, username=username, email=email, course_id=self.course_id
).visit()
if track is not None:
enroll_user_track(self.browser, self.course_id, track)
courseware_page.visit()
verify_expected_problem_visibility(self, courseware_page, expected_problems)
login_and_verify_visible_problems(
self.staff_user["username"], self.staff_user["email"],
[self.group_a_problem,
self.group_b_problem,
self.group_verified_problem,
self.group_audit_problem,
self.group_a_and_b_problem,
self.visible_to_all_problem
],
)
login_and_verify_visible_problems(
self.cohort_a_student_username, self.cohort_a_student_email,
[self.group_a_problem, self.group_audit_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_b_student_username, self.cohort_b_student_email,
[self.group_b_problem, self.group_audit_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_verified_student_username, self.cohort_verified_student_email,
[self.group_verified_problem, self.visible_to_all_problem],
'verified'
)
login_and_verify_visible_problems(
self.cohort_audit_student_username, self.cohort_audit_student_email,
[self.group_audit_problem, self.visible_to_all_problem],
'audit'
)
login_and_verify_visible_problems(
self.cohort_default_student_username, self.cohort_default_student_email,
[self.group_audit_problem, self.visible_to_all_problem],
)
def test_cohorted_courseware(self):
"""
Scenario: Can create content that is only visible to students in particular cohorts
Given that I have course with 6 problems, 1 staff member, and 6 students
When I enable cohorts in the course
And I add the Course Modes for Verified and Audit
And I create two content groups, Content Group A, and Content Group B, in the course
And I link one problem to Content Group A
And I link one problem to Content Group B
And I link one problem to the Verified Group
And I link one problem to the Audit Group
And I link one problem to both Content Group A and Content Group B
And one problem remains unlinked to any Content Group
And I create two manual cohorts, Cohort A and Cohort B,
linked to Content Group A and Content Group B, respectively
And I assign one student to each manual cohort
And I assign one student to each enrollment track
And one student remains in the default cohort
Then the staff member can see all 6 problems
And the student in Cohort A can see all the problems linked to A
And the student in Cohort B can see all the problems linked to B
And the student in Verified can see the problems linked to Verified and those not linked to a Group
And the student in Audit can see the problems linked to Audit and those not linked to a Group
And the student in the default cohort can ony see the problem that is unlinked to any Content Group
"""
self.enable_cohorting(self.course_fixture)
self.create_content_groups()
self.link_problems_to_content_groups_and_publish()
self.create_cohorts_and_assign_students()
self.view_cohorted_content_as_different_users()
| agpl-3.0 |
oliver-sanders/cylc | cylc/flow/task_message.py | 1 | 5618 | # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Allow a task job to record its messages.
Send task job messages to:
- The stdout/stderr.
- The job status file, if there is one.
- The suite server program, if communication is possible.
"""
from logging import getLevelName, WARNING, ERROR, CRITICAL
import os
import sys
import cylc.flow.flags
from cylc.flow.network.client import SuiteRuntimeClient
from cylc.flow.pathutil import get_suite_run_job_dir
from cylc.flow.task_outputs import TASK_OUTPUT_STARTED, TASK_OUTPUT_SUCCEEDED
from cylc.flow.wallclock import get_current_time_string
CYLC_JOB_PID = "CYLC_JOB_PID"
CYLC_JOB_INIT_TIME = "CYLC_JOB_INIT_TIME"
CYLC_JOB_EXIT = "CYLC_JOB_EXIT"
CYLC_JOB_EXIT_TIME = "CYLC_JOB_EXIT_TIME"
CYLC_MESSAGE = "CYLC_MESSAGE"
ABORT_MESSAGE_PREFIX = "aborted/"
FAIL_MESSAGE_PREFIX = "failed/"
VACATION_MESSAGE_PREFIX = "vacated/"
STDERR_LEVELS = (getLevelName(level) for level in (WARNING, ERROR, CRITICAL))
def record_messages(suite, task_job, messages):
"""Record task job messages.
Print the messages according to their severity.
Write the messages in the job status file.
Send the messages to the suite, if possible.
Arguments:
suite (str): Suite name.
task_job (str): Task job identifier "CYCLE/TASK_NAME/SUBMIT_NUM".
messages (list): List of messages "[[severity, message], ...]".
"""
# Record the event time, in case the message is delayed in some way.
event_time = get_current_time_string(
override_use_utc=(os.getenv('CYLC_UTC') == 'True'))
# Print to stdout/stderr
for severity, message in messages:
if severity in STDERR_LEVELS:
handle = sys.stderr
else:
handle = sys.stdout
handle.write('%s %s - %s\n' % (event_time, severity, message))
handle.flush()
# Write to job.status
_append_job_status_file(suite, task_job, event_time, messages)
# Send messages
try:
pclient = SuiteRuntimeClient(suite)
except Exception:
# Backward communication not possible
if cylc.flow.flags.debug:
import traceback
traceback.print_exc()
else:
pclient(
'put_messages',
{'task_job': task_job, 'event_time': event_time,
'messages': messages}
)
def _append_job_status_file(suite, task_job, event_time, messages):
"""Write messages to job status file."""
job_log_name = os.getenv('CYLC_TASK_LOG_ROOT')
if not job_log_name:
job_log_name = get_suite_run_job_dir(suite, task_job, 'job')
try:
job_status_file = open(job_log_name + '.status', 'a')
except IOError:
if cylc.flow.flags.debug:
import traceback
traceback.print_exc()
return
for severity, message in messages:
if message == TASK_OUTPUT_STARTED:
job_id = os.getppid()
if job_id > 1:
# If os.getppid() returns 1, the original job process
# is likely killed already
job_status_file.write('%s=%s\n' % (CYLC_JOB_PID, job_id))
job_status_file.write('%s=%s\n' % (CYLC_JOB_INIT_TIME, event_time))
elif message == TASK_OUTPUT_SUCCEEDED:
job_status_file.write(
('%s=%s\n' % (CYLC_JOB_EXIT, TASK_OUTPUT_SUCCEEDED.upper())) +
('%s=%s\n' % (CYLC_JOB_EXIT_TIME, event_time)))
elif message.startswith(FAIL_MESSAGE_PREFIX):
job_status_file.write(
('%s=%s\n' % (
CYLC_JOB_EXIT,
message[len(FAIL_MESSAGE_PREFIX):])) +
('%s=%s\n' % (CYLC_JOB_EXIT_TIME, event_time)))
elif message.startswith(ABORT_MESSAGE_PREFIX):
job_status_file.write(
('%s=%s\n' % (
CYLC_JOB_EXIT,
message[len(ABORT_MESSAGE_PREFIX):])) +
('%s=%s\n' % (CYLC_JOB_EXIT_TIME, event_time)))
elif message.startswith(VACATION_MESSAGE_PREFIX):
# Job vacated, remove entries related to current job
job_status_file_name = job_status_file.name
job_status_file.close()
lines = []
for line in open(job_status_file_name):
if not line.startswith('CYLC_JOB_'):
lines.append(line)
job_status_file = open(job_status_file_name, 'w')
for line in lines:
job_status_file.write(line)
job_status_file.write('%s=%s|%s|%s\n' % (
CYLC_MESSAGE, event_time, severity, message))
else:
job_status_file.write('%s=%s|%s|%s\n' % (
CYLC_MESSAGE, event_time, severity, message))
try:
job_status_file.close()
except IOError:
if cylc.flow.flags.debug:
import traceback
traceback.print_exc()
| gpl-3.0 |
gnuhub/intellij-community | python/lib/Lib/ctypes/__init__.py | 110 | 7851 | import jffi
__version__ = "0.0.1"
_TypeMap = {
'b': jffi.Type.BYTE,
'B': jffi.Type.UBYTE,
'h': jffi.Type.SHORT,
'H': jffi.Type.USHORT,
'i': jffi.Type.INT,
'I': jffi.Type.UINT,
'l': jffi.Type.LONG,
'L': jffi.Type.ULONG,
'q': jffi.Type.LONGLONG,
'Q': jffi.Type.ULONGLONG,
'f': jffi.Type.FLOAT,
'd': jffi.Type.DOUBLE,
'?': jffi.Type.BOOL,
'z': jffi.Type.STRING,
'P': jffi.Type.POINTER
}
class _CTypeMetaClass(type):
def __new__(cls, name, bases, dict):
return type.__new__(cls, name, bases, dict)
def __mul__(self, len):
dict = { '_jffi_type': jffi.Type.Array(self, len) }
# Look back up the stack frame to find out the module this new type is declared in
import inspect
mod = inspect.getmodule(inspect.stack()[1][0])
if mod is None:
name = "__main__"
else:
name = mod.__name__
dict["__module__"] = name
return type("%s_Array_%d" % (self.__name__, len), (jffi.ArrayCData, _ArrayCData, _CData), dict)
class _CData(object):
@classmethod
def in_dll(self, lib, name):
return self.from_address(lib[name])
@classmethod
def size(self):
return self._jffi_type.size()
class _ScalarCData(jffi.ScalarCData, _CData):
__metaclass__ = _CTypeMetaClass
class _ArrayCData(object):
def __len__(self):
return self._jffi_type.length
class _StructLayoutBuilder(object):
def __init__(self, union = False):
self.size = 0
self.offset = 0
self.fields = []
self.union = union
def align(self, offset, align):
return align + ((offset - 1) & ~(align - 1));
def add_fields(self, fields):
for f in fields:
self.add_field(f)
return self
def add_field(self, f):
if not issubclass(f[1], _ScalarCData):
raise RuntimeError("non-scalar fields not supported")
if len(f) != 2:
raise RuntimeError("structs with bitfields not supported")
self.offset = self.align(self.offset, alignment(f[1]))
self.fields.append(jffi.StructLayout.ScalarField(f[0], f[1], self.offset))
if not self.union:
self.offset += sizeof(f[1])
self.size = max(self.offset, sizeof(f[1]))
return self
def build(self):
return jffi.StructLayout(fields = self.fields, union = self.union)
class _AggregateMetaClass(type):
@staticmethod
def __new_aggregate__(cls, name, bases, dict, union = False):
if dict.has_key('_fields_'):
layout = dict['_jffi_type'] = _StructLayoutBuilder(union).add_fields(dict['_fields_']).build()
# make all fields accessible via .foo
for f in dict['_fields_']:
dict[f[0]] = layout[f[0]]
dict['__fields_'] = dict['_fields_']
else:
dict['__fields_'] = []
if dict.has_key('_pack_'):
raise NotImplementedError("struct packing not implemented")
if dict.has_key('_anonymous_'):
raise NotImplementedError("anonymous fields not implemented")
return type.__new__(cls, name, bases, dict)
def get_fields(self):
return self.__fields_
def set_fields(self, fields):
layout = _StructLayoutBuilder(union = issubclass(Union, self)).add_fields(fields).build()
self.__fields_ = fields
self._jffi_type = layout
# make all fields accessible via .foo
for f in fields:
setattr(self, f[0], layout[f[0]])
_fields_ = property(get_fields, set_fields)
# Make _pack_ and _anonymous_ throw errors if anyone tries to use them
_pack_ = property(None)
_anonymous_ = property(None)
class _StructMetaClass(_AggregateMetaClass):
def __new__(cls, name, bases, dict):
return _AggregateMetaClass.__new_aggregate__(cls, name, bases, dict, union = False)
class _UnionMetaClass(_AggregateMetaClass):
def __new__(cls, name, bases, dict):
return _AggregateMetaClass.__new_aggregate__(cls, name, bases, dict, union = True)
class Structure(jffi.Structure, _CData):
__metaclass__ = _StructMetaClass
class Union(jffi.Structure, _CData):
__metaclass__ = _UnionMetaClass
def sizeof(type):
if hasattr(type, '_jffi_type'):
return type._jffi_type.size()
else:
raise TypeError("this type has no size")
def alignment(type):
return type._jffi_type.alignment()
def addressof(cdata):
return cdata.address()
def byref(cdata, offset = 0):
return cdata.byref(offset)
def pointer(cdata):
return cdata.pointer(POINTER(cdata.__class__))
memmove = jffi.memmove
memset = jffi.memset
_pointer_type_cache = {}
def POINTER(ctype):
# If a pointer class for the C type has been created, re-use it
if _pointer_type_cache.has_key(ctype):
return _pointer_type_cache[ctype]
# Create a new class for this particular C type
dict = { '_jffi_type': jffi.Type.Pointer(ctype) }
# Look back up the stack frame to find out the module this new type is declared in
import inspect
mod = inspect.getmodule(inspect.stack()[1][0])
if mod is None:
name = "__main__"
else:
name = mod.__name__
dict["__module__"] = name
ptype = type("LP_%s" % (ctype.__name__,), (jffi.PointerCData, _CData), dict)
_pointer_type_cache[ctype] = ptype
return ptype
class c_bool(_ScalarCData):
_type_ = '?'
_jffi_type = jffi.Type.BOOL
class c_byte(_ScalarCData):
_type_ = 'b'
_jffi_type = jffi.Type.BYTE
class c_ubyte(_ScalarCData):
_type_ = 'B'
_jffi_type = jffi.Type.UBYTE
class c_short(_ScalarCData):
_type_ = 'h'
_jffi_type = jffi.Type.SHORT
class c_ushort(_ScalarCData):
_type_ = 'H'
_jffi_type = jffi.Type.USHORT
class c_int(_ScalarCData):
_type_ = 'i'
_jffi_type = jffi.Type.INT
class c_uint(_ScalarCData):
_type_ = 'I'
_jffi_type = jffi.Type.UINT
class c_longlong(_ScalarCData):
_type_ = 'q'
_jffi_type = jffi.Type.LONGLONG
class c_ulonglong(_ScalarCData):
_type_ = 'Q'
_jffi_type = jffi.Type.ULONGLONG
class c_long(_ScalarCData):
_type_ = 'l'
_jffi_type = jffi.Type.LONG
class c_ulong(_ScalarCData):
_type_ = 'L'
_jffi_type = jffi.Type.ULONG
class c_float(_ScalarCData):
_type_ = 'f'
_jffi_type = jffi.Type.FLOAT
class c_double(_ScalarCData):
_type_ = 'd'
_jffi_type = jffi.Type.DOUBLE
c_int8 = c_byte
c_uint8 = c_ubyte
c_int16 = c_short
c_uint16 = c_ushort
c_int32 = c_int
c_uint32 = c_uint
c_int64 = c_longlong
c_uint64 = c_ulonglong
c_size_t = c_ulong
c_ssize_t = c_long
class c_char_p(jffi.StringCData, _CData):
_type_ = 'z'
_jffi_type = jffi.Type.STRING
class c_void_p(_ScalarCData):
_type_ = 'P'
_jffi_type = jffi.Type.POINTER
class _Function(jffi.Function):
_restype = c_int
_argtypes = None
class CDLL:
DEFAULT_MODE = jffi.RTLD_GLOBAL | jffi.RTLD_LAZY
def __init__(self, name, mode = DEFAULT_MODE, handle = None):
self._handle = jffi.dlopen(name, mode)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
raise AttributeError, name
func = self.__getitem__(name)
setattr(self, name, func)
return func
def __getitem__(self, name):
return _Function(self._handle.find_symbol(name))
class LibraryLoader(object):
def __init__(self, dlltype):
self._dlltype = dlltype
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError(name)
dll = self._dlltype(name)
setattr(self, name, dll)
return dll
def __getitem__(self, name):
return getattr(self, name)
def LoadLibrary(self, name):
return self._dlltype(name)
cdll = LibraryLoader(CDLL)
| apache-2.0 |
sdlBasic/sdlbrt | win32/mingw/opt/lib/python2.7/lib2to3/tests/data/py3_test_grammar.py | 266 | 30362 | # Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
# NOTE: When you run this test as a script from the command line, you
# get warnings about certain hex/oct constants. Since those are
# issued by the parser, you can't suppress them by adding a
# filterwarnings() call to this module. Therefore, to shut up the
# regression test, the filterwarnings() call has been added to
# regrtest.py.
from test.support import run_unittest, check_syntax_error
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def testBackslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEquals(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEquals(x, 0, 'backslash ending comment')
def testPlainIntegers(self):
self.assertEquals(type(000), type(0))
self.assertEquals(0xff, 255)
self.assertEquals(0o377, 255)
self.assertEquals(2147483647, 0o17777777777)
self.assertEquals(0b1001, 9)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxsize
if maxsize == 2147483647:
self.assertEquals(-2147483647-1, -0o20000000000)
# XXX -2147483648
self.assert_(0o37777777777 > 0)
self.assert_(0xffffffff > 0)
self.assert_(0b1111111111111111111111111111111 > 0)
for s in ('2147483648', '0o40000000000', '0x100000000',
'0b10000000000000000000000000000000'):
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxsize == 9223372036854775807:
self.assertEquals(-9223372036854775807-1, -0o1000000000000000000000)
self.assert_(0o1777777777777777777777 > 0)
self.assert_(0xffffffffffffffff > 0)
self.assert_(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
for s in '9223372036854775808', '0o2000000000000000000000', \
'0x10000000000000000', \
'0b100000000000000000000000000000000000000000000000000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxsize value %r' % maxsize)
def testLongIntegers(self):
x = 0
x = 0xffffffffffffffff
x = 0Xffffffffffffffff
x = 0o77777777777777777
x = 0O77777777777777777
x = 123456789012345678901234567890
x = 0b100000000000000000000000000000000000000000000000000000000000000000000
x = 0B111111111111111111111111111111111111111111111111111111111111111111111
def testFloats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def testStringLiterals(self):
x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assert_(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assert_(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEquals(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEquals(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEquals(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEquals(x, y)
def testEllipsis(self):
x = ...
self.assert_(x is Ellipsis)
self.assertRaises(SyntaxError, eval, ".. .")
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def testEvalInput(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def testFuncdef(self):
### [decorators] 'def' NAME parameters ['->' test] ':' suite
### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
### decorators: decorator+
### parameters: '(' [typedargslist] ')'
### typedargslist: ((tfpdef ['=' test] ',')*
### ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
### | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
### tfpdef: NAME [':' test]
### varargslist: ((vfpdef ['=' test] ',')*
### ('*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
### | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
### vfpdef: NAME
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
self.assertEquals(f2.__code__.co_varnames, ('one_argument',))
self.assertEquals(f3.__code__.co_varnames, ('two', 'arguments'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
# keyword argument type tests
try:
str('x', **{b'foo':1 })
except TypeError:
pass
else:
self.fail('Bytes should not work as keyword argument names')
# keyword only argument tests
def pos0key1(*, key): return key
pos0key1(key=100)
def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2
pos2key2(1, 2, k1=100)
pos2key2(1, 2, k1=100, k2=200)
pos2key2(1, 2, k2=100, k1=200)
def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg
pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# argument annotation tests
def f(x) -> list: pass
self.assertEquals(f.__annotations__, {'return': list})
def f(x:int): pass
self.assertEquals(f.__annotations__, {'x': int})
def f(*x:str): pass
self.assertEquals(f.__annotations__, {'x': str})
def f(**x:float): pass
self.assertEquals(f.__annotations__, {'x': float})
def f(x, y:1+2): pass
self.assertEquals(f.__annotations__, {'y': 3})
def f(a, b:1, c:2, d): pass
self.assertEquals(f.__annotations__, {'b': 1, 'c': 2})
def f(a, b:1, c:2, d, e:3=4, f=5, *g:6): pass
self.assertEquals(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6})
def f(a, b:1, c:2, d, e:3=4, f=5, *g:6, h:7, i=8, j:9=10,
**k:11) -> 12: pass
self.assertEquals(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
'k': 11, 'return': 12})
# Check for SF Bug #1697248 - mixing decorators and a return annotation
def null(x): return x
@null
def f(x) -> list: pass
self.assertEquals(f.__annotations__, {'return': list})
# test MAKE_CLOSURE with a variety of oparg's
closure = 1
def f(): return closure
def f(x=1): return closure
def f(*, k=1): return closure
def f() -> int: return closure
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def testLambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEquals(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0]]
self.assertEquals(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEquals(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEquals(l5(1, 2), 5)
self.assertEquals(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
l6 = lambda x, y, *, k=20: x+y+k
self.assertEquals(l6(1,2), 1+2+20)
self.assertEquals(l6(1,2,k=10), 1+2+10)
### stmt: simple_stmt | compound_stmt
# Tested below
def testSimpleStmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
# Tested below
def testExprStmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
def testDelStmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def testPassStmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def testBreakStmt(self):
# 'break'
while 1: break
def testContinueStmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo != 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def testReturn(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def testYield(self):
check_syntax_error(self, "class foo:yield 1")
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError('just testing')
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def testImport(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def testGlobal(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def testNonlocal(self):
# 'nonlocal' NAME (',' NAME)*
x = 0
y = 0
def f():
nonlocal x
nonlocal x, y
def testAssert(self):
# assert_stmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert 0, "msg"
except AssertionError as e:
self.assertEquals(e.args[0], "msg")
else:
if __debug__:
self.fail("AssertionError not raised by assert 0")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def testIf(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def testWhile(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEquals(x, 2)
def testFor(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def testTry(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr ['as' expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError as msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError) as msg: pass
try: pass
finally: pass
def testSuite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def testTest(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def testComparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
def testBinaryMaskOps(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def testShiftOps(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def testAdditiveOps(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def testMultiplicativeOps(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def testUnaryOps(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def testSelectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort(key=lambda x: x if isinstance(x, tuple) else ())
self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def testAtoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING
### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = {'one'}
x = {'one', 1,}
x = {'one', 'two', 'three'}
x = {2, 3, 4,}
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def testClassdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x): return x
@class_decorator
class G: pass
def testDictcomps(self):
# dictorsetmaker: ( (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
nums = [1, 2, 3]
self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
def testListcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [0 < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def testGenexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(next(g), [x for x in range(10)])
try:
next(g)
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
next(g)
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def testComprehensionSpecials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def testIfElseExpr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print(x)
return ret
# the next line is not allowed anymore
#self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_main():
run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
| lgpl-2.1 |
EverythingMe/teleport | teleport/teleporter.py | 2 | 1792 | from teleport import Teleport
import logging
from contextlib import contextmanager
from subprocess import check_call
def _run_commands(commands):
for command in commands:
logging.debug("running command '%s'", " ".join(command))
check_call(command)
def allow_traffic_only_to(address, dns_servers=None):
host = address.split(":")[0]
logging.info("Allowing traffic only to %s on eth0", host)
commands = [
["iptables", "-A", "OUTPUT", "-m", "state", "--state", "RELATED,ESTABLISHED", "-j", "ACCEPT"],
["iptables", "-A", "OUTPUT", "-o", "tun0", "-j", "ACCEPT"],
["iptables", "-A", "OUTPUT", "-o", "lo", "-j", "ACCEPT"],
["iptables", "-A", "OUTPUT", "-o", "eth0", "-d", host, "-j", "ACCEPT"],
["iptables", "-P", "OUTPUT", "DROP"],
]
if dns_servers is not None:
logging.info('Allowing traffic to dns servers %s', dns_servers)
for dns_server in dns_servers:
commands.append(["iptables", "-A", "OUTPUT", "-o", "eth0", "-d", dns_server, "-j", "ACCEPT"])
_run_commands(commands)
def reset_firewall():
logging.info("resetting firewall")
commands = [
["iptables", "-F"],
["iptables", "-P", "OUTPUT", "ACCEPT"],
]
_run_commands(commands)
@contextmanager
def FirewallContext(address, dns_servers=None):
allow_traffic_only_to(address, dns_servers)
try:
yield
finally:
reset_firewall()
@contextmanager
def Teleporter(config, place, with_firewall=True, dns_servers=None):
t = Teleport(config).goto(place)
try:
if with_firewall:
with FirewallContext(t.get_peer_address(), dns_servers=dns_servers):
yield t
else:
yield t
finally:
t.go_home()
| bsd-2-clause |
enclose-io/compiler | lts/deps/v8/tools/testrunner/testproc/base.py | 11 | 6347 | # Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from .result import SKIPPED
"""
Pipeline
Test processors are chained together and communicate with each other by
calling previous/next processor in the chain.
----next_test()----> ----next_test()---->
Proc1 Proc2 Proc3
<---result_for()---- <---result_for()----
For every next_test there is exactly one result_for call.
If processor ignores the test it has to return SkippedResult.
If it created multiple subtests for one test and wants to pass all of them to
the previous processor it can enclose them in GroupedResult.
Subtests
When test processor needs to modify the test or create some variants of the
test it creates subtests and sends them to the next processor.
Each subtest has:
- procid - globally unique id that should contain id of the parent test and
some suffix given by test processor, e.g. its name + subtest type.
- processor - which created it
- origin - pointer to the parent (sub)test
"""
DROP_RESULT = 0
DROP_OUTPUT = 1
DROP_PASS_OUTPUT = 2
DROP_PASS_STDOUT = 3
class TestProc(object):
def __init__(self):
self._prev_proc = None
self._next_proc = None
self._stopped = False
self._requirement = DROP_RESULT
self._prev_requirement = None
self._reduce_result = lambda result: result
def connect_to(self, next_proc):
"""Puts `next_proc` after itself in the chain."""
next_proc._prev_proc = self
self._next_proc = next_proc
def remove_from_chain(self):
if self._prev_proc:
self._prev_proc._next_proc = self._next_proc
if self._next_proc:
self._next_proc._prev_proc = self._prev_proc
def setup(self, requirement=DROP_RESULT):
"""
Method called by previous processor or processor pipeline creator to let
the processors know what part of the result can be ignored.
"""
self._prev_requirement = requirement
if self._next_proc:
self._next_proc.setup(max(requirement, self._requirement))
# Since we're not winning anything by droping part of the result we are
# dropping the whole result or pass it as it is. The real reduction happens
# during result creation (in the output processor), so the result is
# immutable.
if (self._prev_requirement < self._requirement and
self._prev_requirement == DROP_RESULT):
self._reduce_result = lambda _: None
def next_test(self, test):
"""
Method called by previous processor whenever it produces new test.
This method shouldn't be called by anyone except previous processor.
Returns a boolean value to signal whether the test was loaded into the
execution queue successfully or not.
"""
raise NotImplementedError()
def result_for(self, test, result):
"""
Method called by next processor whenever it has result for some test.
This method shouldn't be called by anyone except next processor.
"""
raise NotImplementedError()
def heartbeat(self):
if self._prev_proc:
self._prev_proc.heartbeat()
def stop(self):
if not self._stopped:
self._stopped = True
if self._prev_proc:
self._prev_proc.stop()
if self._next_proc:
self._next_proc.stop()
@property
def is_stopped(self):
return self._stopped
### Communication
def notify_previous(self, event):
self._on_event(event)
if self._prev_proc:
self._prev_proc.notify_previous(event)
def _on_event(self, event):
"""Called when processors to the right signal events, e.g. termination.
Args:
event: A text describing the signalled event.
"""
pass
def _send_test(self, test):
"""Helper method for sending test to the next processor."""
return self._next_proc.next_test(test)
def _send_result(self, test, result):
"""Helper method for sending result to the previous processor."""
if not test.keep_output:
result = self._reduce_result(result)
self._prev_proc.result_for(test, result)
class TestProcObserver(TestProc):
"""Processor used for observing the data."""
def __init__(self):
super(TestProcObserver, self).__init__()
def next_test(self, test):
self._on_next_test(test)
return self._send_test(test)
def result_for(self, test, result):
self._on_result_for(test, result)
self._send_result(test, result)
def heartbeat(self):
self._on_heartbeat()
super(TestProcObserver, self).heartbeat()
def _on_next_test(self, test):
"""Method called after receiving test from previous processor but before
sending it to the next one."""
pass
def _on_result_for(self, test, result):
"""Method called after receiving result from next processor but before
sending it to the previous one."""
pass
def _on_heartbeat(self):
pass
class TestProcProducer(TestProc):
"""Processor for creating subtests."""
def __init__(self, name):
super(TestProcProducer, self).__init__()
self._name = name
def next_test(self, test):
return self._next_test(test)
def result_for(self, subtest, result):
self._result_for(subtest.origin, subtest, result)
### Implementation
def _next_test(self, test):
raise NotImplementedError()
def _result_for(self, test, subtest, result):
"""
result_for method extended with `subtest` parameter.
Args
test: test used by current processor to create the subtest.
subtest: test for which the `result` is.
result: subtest execution result created by the output processor.
"""
raise NotImplementedError()
### Managing subtests
def _create_subtest(self, test, subtest_id, **kwargs):
"""Creates subtest with subtest id <processor name>-`subtest_id`."""
return test.create_subtest(self, '%s-%s' % (self._name, subtest_id),
**kwargs)
class TestProcFilter(TestProc):
"""Processor for filtering tests."""
def next_test(self, test):
if self._filter(test):
return False
return self._send_test(test)
def result_for(self, test, result):
self._send_result(test, result)
def _filter(self, test):
"""Returns whether test should be filtered out."""
raise NotImplementedError()
| mit |
asimshankar/tensorflow | tensorflow/python/data/kernel_tests/filter_test.py | 5 | 4904 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.filter()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FilterTest(test_base.DatasetTestBase):
def testFilterDataset(self):
components = (
np.arange(7, dtype=np.int64),
np.array([[1, 2, 3]], dtype=np.int64) * np.arange(
7, dtype=np.int64)[:, np.newaxis],
np.array(37.0, dtype=np.float64) * np.arange(7)
)
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
def do_test(count, modulus):
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count).filter(
lambda x, _y, _z: math_ops.equal(math_ops.mod(x, modulus), 0))
self.assertEqual([c.shape[1:] for c in components],
[shape for shape in dataset.output_shapes])
get_next = self.getNext(dataset)
for _ in range(count):
for i in [x for x in range(7) if x**2 % modulus == 0]:
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
do_test(14, 2)
do_test(4, 18)
# Test an empty dataset.
do_test(0, 1)
def testFilterRange(self):
dataset = dataset_ops.Dataset.range(4).filter(
lambda x: math_ops.not_equal(math_ops.mod(x, 3), 2))
self.assertDatasetProduces(dataset, expected_output=[0, 1, 3])
def testFilterDict(self):
dataset = dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x ** 2}).filter(
lambda d: math_ops.equal(d["bar"] % 2, 0)).map(
lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset,
expected_output=[(i * 2 + i**2) for i in range(10) if not (i**2) % 2])
def testUseStepContainerInFilter(self):
input_data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)
# Define a predicate that returns true for the first element of
# the sequence and not the second, and uses `tf.map_fn()`.
def _predicate(xs):
squared_xs = functional_ops.map_fn(lambda x: x * x, xs)
summed = math_ops.reduce_sum(squared_xs)
return math_ops.equal(summed, 1 + 4 + 9)
dataset = dataset_ops.Dataset.from_tensor_slices(
[[1, 2, 3], [4, 5, 6]]).filter(_predicate)
self.assertDatasetProduces(dataset, expected_output=[input_data[0]])
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1])), i
def _filter_fn(_, i):
return math_ops.equal(i % 2, 0)
dataset = dataset_ops.Dataset.range(10).map(_map_fn).filter(_filter_fn).map(
lambda x, i: x)
self.assertDatasetProduces(
dataset, expected_output=[_map_fn(i * 2)[0] for i in range(5)])
def testShortCircuit(self):
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(10),
dataset_ops.Dataset.from_tensors(True).repeat(None)
)).filter(lambda x, y: y)
self.assertDatasetProduces(
dataset, expected_output=[(i, True) for i in range(10)])
def testParallelFilters(self):
dataset = dataset_ops.Dataset.range(10).filter(
lambda x: math_ops.equal(x % 2, 0))
next_elements = [self.getNext(dataset) for _ in range(10)]
self.assertEqual([0 for _ in range(10)],
self.evaluate(
[next_element() for next_element in next_elements]))
if __name__ == "__main__":
test.main()
| apache-2.0 |
deployed/django | django/test/client.py | 10 | 24083 | from __future__ import unicode_literals
import sys
import os
import re
import mimetypes
from copy import copy
from importlib import import_module
from io import BytesIO
from django.apps import apps
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import (request_started, request_finished,
got_request_exception)
from django.db import close_old_connections
from django.http import SimpleCookie, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import force_bytes, force_str
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils import six
from django.utils.six.moves.urllib.parse import unquote, urlparse, urlsplit
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Uses the WSGI
interface to compose requests, but returns the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
]])
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
]])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
if hasattr(file, 'content_type'):
content_type = file.content_type
else:
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, os.path.basename(file.name))),
to_bytes('Content-Type: %s' % content_type),
b'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = unquote(path)
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
path = path.encode('utf-8').decode('iso-8859-1')
return path
def get(self, path, data=None, secure=False, **extra):
"Construct a GET request."
r = {
'QUERY_STRING': urlencode(data or {}, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"Construct a POST request."
post_data = self._encode_data(data or {}, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"Construct a HEAD request."
r = {
'QUERY_STRING': urlencode(data or {}, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Constructs an arbitrary HTTP request."""
parsed = urlparse(path)
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
'SERVER_PORT': str('443') if secure else str('80'),
'wsgi.url_scheme': str('https') if secure else str('http'),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if apps.is_installed('django.contrib.sessions'):
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info, dispatch_uid="request-exception")
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid="request-exception")
def get(self, path, data=None, follow=False, secure=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
from django.contrib.auth import authenticate, login
user = authenticate(**credentials)
if (user and user.is_active and
apps.is_installed('django.contrib.sessions')):
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request that goes through request middleware
request = self.request().wsgi_request
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
from django.contrib.auth import get_user_model, logout
# Create a fake request that goes through request middleware
request = self.request().wsgi_request
engine = import_module(settings.SESSION_ENGINE)
UserModel = get_user_model()
if self.session:
request.session = self.session
uid = self.session.get("_auth_user_id")
if uid:
request.user = UserModel._default_manager.get(pk=uid)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((url, response.status_code))
url = urlsplit(url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
# Prevent loops
if response.redirect_chain[-1] in response.redirect_chain[0:-1]:
break
return response
| bsd-3-clause |
lail3344/apprtc_selenium_webrtc | src/third_party/apiclient/errors.py | 108 | 3516 | #!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from oauth2client import util
from oauth2client.anyjson import simplejson
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
@util.positional(3)
def __init__(self, resp, content, uri=None):
self.resp = resp
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content."""
reason = self.resp.reason
try:
data = simplejson.loads(self.content)
reason = data['error']['message']
except (ValueError, KeyError):
pass
if reason is None:
reason = ''
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason().strip())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownFileType(Error):
"""File type unknown or unexpected."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
class UnknownApiNameOrVersion(Error):
"""No API with that name and version exists."""
pass
class UnacceptableMimeTypeError(Error):
"""That is an unacceptable mimetype for this operation."""
pass
class MediaUploadSizeError(Error):
"""Media is larger than the method can accept."""
pass
class ResumableUploadError(HttpError):
"""Error occured during resumable upload."""
pass
class InvalidChunkSizeError(Error):
"""The given chunksize is not valid."""
pass
class InvalidNotificationError(Error):
"""The channel Notification is invalid."""
pass
class BatchError(HttpError):
"""Error occured during batch operations."""
@util.positional(2)
def __init__(self, reason, resp=None, content=None):
self.resp = resp
self.content = content
self.reason = reason
def __repr__(self):
return '<BatchError %s "%s">' % (self.resp.status, self.reason)
__str__ = __repr__
class UnexpectedMethodError(Error):
"""Exception raised by RequestMockBuilder on unexpected calls."""
@util.positional(1)
def __init__(self, methodId=None):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedMethodError, self).__init__(
'Received unexpected call %s' % methodId)
class UnexpectedBodyError(Error):
"""Exception raised by RequestMockBuilder on unexpected bodies."""
def __init__(self, expected, provided):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedBodyError, self).__init__(
'Expected: [%s] - Provided: [%s]' % (expected, provided))
| bsd-3-clause |
aequitas/home-assistant | homeassistant/components/websocket_api/const.py | 1 | 1082 | """Websocket constants."""
import asyncio
from concurrent import futures
from functools import partial
import json
from homeassistant.helpers.json import JSONEncoder
DOMAIN = 'websocket_api'
URL = '/api/websocket'
MAX_PENDING_MSG = 512
ERR_ID_REUSE = 'id_reuse'
ERR_INVALID_FORMAT = 'invalid_format'
ERR_NOT_FOUND = 'not_found'
ERR_HOME_ASSISTANT_ERROR = 'home_assistant_error'
ERR_UNKNOWN_COMMAND = 'unknown_command'
ERR_UNKNOWN_ERROR = 'unknown_error'
ERR_UNAUTHORIZED = 'unauthorized'
TYPE_RESULT = 'result'
# Define the possible errors that occur when connections are cancelled.
# Originally, this was just asyncio.CancelledError, but issue #9546 showed
# that futures.CancelledErrors can also occur in some situations.
CANCELLATION_ERRORS = (asyncio.CancelledError, futures.CancelledError)
# Event types
SIGNAL_WEBSOCKET_CONNECTED = 'websocket_connected'
SIGNAL_WEBSOCKET_DISCONNECTED = 'websocket_disconnected'
# Data used to store the current connection list
DATA_CONNECTIONS = DOMAIN + '.connections'
JSON_DUMP = partial(json.dumps, cls=JSONEncoder, allow_nan=False)
| apache-2.0 |
scalingdata/Impala | thirdparty/thrift-0.9.0/contrib/fb303/py/fb303/FacebookBase.py | 173 | 1917 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time
import FacebookService
import thrift.reflection.limited
from ttypes import fb_status
class FacebookBase(FacebookService.Iface):
def __init__(self, name):
self.name = name
self.alive = int(time.time())
self.counters = {}
def getName(self, ):
return self.name
def getVersion(self, ):
return ''
def getStatus(self, ):
return fb_status.ALIVE
def getCounters(self):
return self.counters
def resetCounter(self, key):
self.counters[key] = 0
def getCounter(self, key):
if self.counters.has_key(key):
return self.counters[key]
return 0
def incrementCounter(self, key):
self.counters[key] = self.getCounter(key) + 1
def setOption(self, key, value):
pass
def getOption(self, key):
return ""
def getOptions(self):
return {}
def getOptions(self):
return {}
def aliveSince(self):
return self.alive
def getCpuProfile(self, duration):
return ""
def getLimitedReflection(self):
return thrift.reflection.limited.Service()
def reinitialize(self):
pass
def shutdown(self):
pass
| apache-2.0 |
michaelaye/scikit-image | skimage/color/tests/test_delta_e.py | 40 | 4822 | """Test for correctness of color distance functions"""
from os.path import abspath, dirname, join as pjoin
import numpy as np
from numpy.testing import assert_allclose
from skimage.color import (deltaE_cie76,
deltaE_ciede94,
deltaE_ciede2000,
deltaE_cmc)
def test_ciede2000_dE():
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3))
lab1[:, 0] = data['L1']
lab1[:, 1] = data['a1']
lab1[:, 2] = data['b1']
lab2 = np.zeros((N, 3))
lab2[:, 0] = data['L2']
lab2[:, 1] = data['a2']
lab2[:, 2] = data['b2']
dE2 = deltaE_ciede2000(lab1, lab2)
assert_allclose(dE2, data['dE'], rtol=1.e-4)
def load_ciede2000_data():
dtype = [('pair', int),
('1', int),
('L1', float),
('a1', float),
('b1', float),
('a1_prime', float),
('C1_prime', float),
('h1_prime', float),
('hbar_prime', float),
('G', float),
('T', float),
('SL', float),
('SC', float),
('SH', float),
('RT', float),
('dE', float),
('2', int),
('L2', float),
('a2', float),
('b2', float),
('a2_prime', float),
('C2_prime', float),
('h2_prime', float),
]
# note: ciede_test_data.txt contains several intermediate quantities
path = pjoin(dirname(abspath(__file__)), 'ciede2000_test_data.txt')
return np.loadtxt(path, dtype=dtype)
def test_cie76():
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3))
lab1[:, 0] = data['L1']
lab1[:, 1] = data['a1']
lab1[:, 2] = data['b1']
lab2 = np.zeros((N, 3))
lab2[:, 0] = data['L2']
lab2[:, 1] = data['a2']
lab2[:, 2] = data['b2']
dE2 = deltaE_cie76(lab1, lab2)
oracle = np.array([
4.00106328, 6.31415011, 9.1776999, 2.06270077, 2.36957073,
2.91529271, 2.23606798, 2.23606798, 4.98000036, 4.9800004,
4.98000044, 4.98000049, 4.98000036, 4.9800004, 4.98000044,
3.53553391, 36.86800781, 31.91002977, 30.25309901, 27.40894015,
0.89242934, 0.7972, 0.8583065, 0.82982507, 3.1819238,
2.21334297, 1.53890382, 4.60630929, 6.58467989, 3.88641412,
1.50514845, 2.3237848, 0.94413208, 1.31910843
])
assert_allclose(dE2, oracle, rtol=1.e-8)
def test_ciede94():
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3))
lab1[:, 0] = data['L1']
lab1[:, 1] = data['a1']
lab1[:, 2] = data['b1']
lab2 = np.zeros((N, 3))
lab2[:, 0] = data['L2']
lab2[:, 1] = data['a2']
lab2[:, 2] = data['b2']
dE2 = deltaE_ciede94(lab1, lab2)
oracle = np.array([
1.39503887, 1.93410055, 2.45433566, 0.68449187, 0.6695627,
0.69194527, 2.23606798, 2.03163832, 4.80069441, 4.80069445,
4.80069449, 4.80069453, 4.80069441, 4.80069445, 4.80069449,
3.40774352, 34.6891632, 29.44137328, 27.91408781, 24.93766082,
0.82213163, 0.71658427, 0.8048753, 0.75284394, 1.39099471,
1.24808929, 1.29795787, 1.82045088, 2.55613309, 1.42491303,
1.41945261, 2.3225685, 0.93853308, 1.30654464
])
assert_allclose(dE2, oracle, rtol=1.e-8)
def test_cmc():
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3))
lab1[:, 0] = data['L1']
lab1[:, 1] = data['a1']
lab1[:, 2] = data['b1']
lab2 = np.zeros((N, 3))
lab2[:, 0] = data['L2']
lab2[:, 1] = data['a2']
lab2[:, 2] = data['b2']
dE2 = deltaE_cmc(lab1, lab2)
oracle = np.array([
1.73873611, 2.49660844, 3.30494501, 0.85735576, 0.88332927,
0.97822692, 3.50480874, 2.87930032, 6.5783807, 6.57838075,
6.5783808, 6.57838086, 6.67492321, 6.67492326, 6.67492331,
4.66852997, 42.10875485, 39.45889064, 38.36005919, 33.93663807,
1.14400168, 1.00600419, 1.11302547, 1.05335328, 1.42822951,
1.2548143, 1.76838061, 2.02583367, 3.08695508, 1.74893533,
1.90095165, 1.70258148, 1.80317207, 2.44934417
])
assert_allclose(dE2, oracle, rtol=1.e-8)
def test_single_color_cie76():
lab1 = (0.5, 0.5, 0.5)
lab2 = (0.4, 0.4, 0.4)
deltaE_cie76(lab1, lab2)
def test_single_color_ciede94():
lab1 = (0.5, 0.5, 0.5)
lab2 = (0.4, 0.4, 0.4)
deltaE_ciede94(lab1, lab2)
def test_single_color_ciede2000():
lab1 = (0.5, 0.5, 0.5)
lab2 = (0.4, 0.4, 0.4)
deltaE_ciede2000(lab1, lab2)
def test_single_color_cmc():
lab1 = (0.5, 0.5, 0.5)
lab2 = (0.4, 0.4, 0.4)
deltaE_cmc(lab1, lab2)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| bsd-3-clause |
agartland/cycluster | clustering.py | 1 | 12300 |
import scipy.cluster.hierarchy as sch
from scipy.spatial import distance
from bootstrap_cluster import bootstrapFeatures, bootstrapObservations
import numpy as np
import pandas as pd
from functools import partial
from .comparison import _alignClusterMats, alignClusters
from .preprocessing import partialCorrNormalize
from copy import deepcopy
from corrplots import partialcorr
import statsmodels.api as sm
__all__ = ['hierClusterFunc',
'corrDmatFunc',
'makeModuleVariables',
'formReliableClusters',
'labels2modules',
'cyclusterClass',
'meanCorr',
'silhouette']
def corrDmatFunc(cyDf, metric='pearson-signed', dfunc=None, minN=None):
if metric is None:
metric = 'pearson-signed'
if dfunc is None:
if metric in ['spearman', 'pearson']:
"""Anti-correlations are also considered as high similarity and will cluster together"""
dmat = (1 - np.abs(cyDf.corr(method=metric, min_periods=minN).values))
dmat[np.isnan(dmat)] = 1
elif metric in ['spearman-signed', 'pearson-signed']:
"""Anti-correlations are considered as dissimilar and will NOT cluster together"""
dmat = ((1 - cyDf.corr(method = metric.replace('-signed', ''), min_periods = minN).values) / 2)
dmat[np.isnan(dmat)] = 1
else:
raise NameError('metric name not recognized')
else:
ncols = cyDf.shape[1]
dmat = np.zeros((ncols, ncols))
for i in range(ncols):
for j in range(ncols):
"""Assume distance is symetric"""
if i <= j:
tmpdf = cyDf.iloc[:, [i, j]]
tmpdf = tmpdf.dropna()
if tmpdf.shape[0] >= minN:
d = dfunc(cyDf.iloc[:, i], cyDf.iloc[:, j])
else:
d = np.nan
dmat[i, j] = d
dmat[j, i] = d
return pd.DataFrame(dmat, columns = cyDf.columns, index = cyDf.columns)
def hierClusterFunc(dmatDf, K=6, method='complete', returnLinkageMat=False, old=False):
if not old:
if dmatDf.shape[0] == dmatDf.shape[1]:
#compressedDmat = dmat.values[np.triu_indices_from(dmat.values)].ravel()
compressedDmat = distance.squareform(dmatDf.values)
else:
raise
else:
compressedDmat = dmatDf.values
hclusters = sch.linkage(compressedDmat, method=method)
labelsVec = sch.fcluster(hclusters, K, criterion='maxclust')
labels = pd.Series(labelsVec, index=dmatDf.columns)
if not returnLinkageMat:
return labels
else:
return labels, hclusters
def formReliableClusters(cyDf, dmatFunc, clusterFunc, bootstraps=500, threshold=0.5):
"""Use bootstrap_clustering to determine the reliable clusters"""
clusters = {}
dmatDf = dmatFunc(cyDf)
#pwrel, labels = bootstrapFeatures(dmat, clusterFunc, bootstraps = bootstraps)
pwrelDf, labels = bootstrapObservations(cyDf, dmatFunc, clusterFunc, bootstraps = bootstraps)
dropped = pd.Series(np.zeros(cyDf.shape[1]).astype(bool), index = cyDf.columns)
for currLab in labels.unique():
cyMembers = labels.index[labels == currLab].tolist()
"""Step-down: start with all members and discard fringe"""
for cy in cyMembers:
meanReliability = (1 - pwrelDf[cy].loc[cyMembers].drop(cy).mean())
if meanReliability < threshold:
dropped[cy] = True
strTuple = (cy, cyDf.sampleStr, 'N' if cyDf.normed else '', currLab, 100 * meanReliability)
print('Excluded %s from cluster %s %sM%s: mean reliability was %1.1f%%' % strTuple)
"""Consider step-up strategy: start with best and add those that fit"""
return pwrelDf, labels, dropped
def labels2modules(labels, dropped = None):
uLabels = np.unique(labels)
out = {lab:labels.index[labels == lab].tolist() for lab in uLabels}
if not dropped is None:
todrop = dropped.index[dropped].tolist()
for lab in list(out.keys()):
out[lab] = [cy for cy in out[lab] if not cy in todrop]
if len(out[lab]) == 0:
_ = out.pop(lab)
return out
def makeModuleVariables(cyDf, labels, sampleStr='M', dropped=None):
"""Define variable for each module by standardizing all the cytokines in the
module and taking the mean. Can be applied to a stacked df with multiple timepoints.
Standardization will be performed across all data.
Each module is also standardized.
Parameters
----------
cyDf : pd.DataFrame [n x cytokines]
Contains columns for making the module.
May include additional columns than included in labels or dropped.
labels : pd.Series [index: cytokines]
Series indicating cluster labels with index containing cytokine vars in cyDf
dropped : pd.Series [index: cytokines]
Series indicating if a cytokine (index) should be dropped when making the module
Returns
-------
out : pd.DataFrame [n x modules]
Modules as columns, one row for every row in cyDf"""
if dropped is None:
dropped = pd.Series(np.zeros((labels.shape[0]), dtype = bool), index = labels.index)
standardizeFunc = lambda col: (col - np.nanmean(col))/np.nanstd(col)
out = None
uLabels = np.unique(labels)
for lab in uLabels:
members = labels.index[(labels == lab) & (~dropped)]
tmpS = cyDf.loc[:, members].apply(standardizeFunc, raw = True).mean(axis = 1, skipna=True)
tmpS.name = '%s%s' % (sampleStr, lab)
if out is None:
out = pd.DataFrame(tmpS)
else:
out = out.join(tmpS)
out = out.apply(standardizeFunc)
return out
def meanCorr(cyDf, meanVar, cyList=None, method='pearson'):
"""Each cytokine's correlation with the mean."""
if cyList is None:
cyList = np.array([c for c in cyDf.columns if not c == meanVar])
cyList = np.asarray(cyList)
tmpCorr = np.zeros((len(cyList), 3))
for i, s in enumerate(cyList):
tmpCorr[i, :2] = partialcorr(cyDf[s], cyDf[meanVar], method=method)
sorti = np.argsort(tmpCorr[:, 0])
tmpCorr = tmpCorr[sorti,:]
_, tmpCorr[:, 2], _, _ = sm.stats.multipletests(tmpCorr[:, 1], alpha=0.2, method='fdr_bh')
return pd.DataFrame(tmpCorr, index=cyList[sorti], columns=['rho', 'pvalue', 'qvalue'])
def silhouette(dmatDf, labels):
"""Compute the silhouette of every analyte."""
def oneSilhouette(cy):
modInd = labels == labels[cy]
a = dmatDf.loc[cy, modInd].sum()
b = None
for lab in labels.unique():
if not lab == labels[cy]:
tmp = dmatDf.loc[cy, labels==lab].sum()
if b is None or tmp < b:
b = tmp
s = (b - a)/max(b, a)
return s
return labels.index.map(oneSilhouette)
class cyclusterClass(object):
def __init__(self, studyStr, sampleStr, normed, rCyDf, compCommVars=None):
self.studyStr = studyStr
self.sampleStr = sampleStr
self.normed = normed
self.cyVars = rCyDf.columns.tolist()
self.rCyDf = rCyDf.copy()
self.nCyDf, self.normModels = partialCorrNormalize(rCyDf, compCommVars=compCommVars, meanVar='Mean')
self.meanS = self.nCyDf['Mean']
self.nCyDf = self.nCyDf[self.cyVars]
if normed:
self.cyDf = self.nCyDf
else:
self.cyDf = self.rCyDf
self.cyDf.sampleStr = sampleStr
self.cyDf.normed = normed
def applyModules(self, target):
"""Use modules from target for computing module values.
Parameters
----------
target : cyclusterClass"""
self.pwrel = target.pwrel
self.Z = target.Z
self.dmatDf = target.dmatDf
self.labels = target.labels
self.dropped = target.dropped
self.sampleStr = target.sampleStr
self.modS = labels2modules(self.labels, dropped=self.dropped)
self.modDf = makeModuleVariables(self.cyDf, self.labels, sampleStr=self.sampleStr, dropped=self.dropped)
if self.normed:
self.rModDf = makeModuleVariables(self.rCyDf, self.labels, sampleStr=self.sampleStr, dropped=self.dropped)
else:
self.rModDf = self.modDf
def clusterCytokines(self, K=6, alignLabels=None, labelMap=None, metric=None, minN=None):
corrFunc = partial(corrDmatFunc, metric=metric, minN=minN)
self.pwrel, self.labels, self.dropped = formReliableClusters(self.cyDf, corrFunc, partial(hierClusterFunc, K=K), threshold=0)
if not labelMap is None:
self.labels = self.labels.map(labelMap)
if not alignLabels is None:
self.labels = alignClusters(alignLabels, self.labels)
self.modS = labels2modules(self.labels, dropped=self.dropped)
self.modDf = makeModuleVariables(self.cyDf, self.labels, sampleStr=self.sampleStr, dropped=self.dropped)
if self.normed:
self.rModDf = makeModuleVariables(self.rCyDf, self.labels, sampleStr=self.sampleStr, dropped=self.dropped)
else:
self.rModDf = self.modDf
_, self.Z = hierClusterFunc(self.pwrel, returnLinkageMat=True)
self.dmatDf = corrDmatFunc(self.cyDf)
def printModules(self, modules=None):
tmp = labels2modules(self.labels, dropped=None)
for m in list(tmp.keys()):
mStr = '%s%d' % (self.sampleStr, m)
if modules is None or mStr == modules or mStr in modules:
print(mStr)
for c in sorted(tmp[m]):
if self.dropped[c]:
print('*', end=' ')
print(c)
print()
def modMembers(self, modStr):
return self.modS[int(modStr[-1])]
def meanICD(self, dmat='dmat', dropped=None):
"""Compute mean intra-cluster distance using either dmatDf or pwrel"""
def _micd(df, labels):
"""Should this be weighted by the size of each cluster? Yes."""
count = 0
tot = 0
for lab in np.unique(labels):
members = labels.index[labels == lab]
tmp = df[members].loc[members].values.flatten()
count += len(tmp)
tot += tmp.sum()
return tot/count
if dropped is None:
tmpLabels = labels
else:
tmpLabels = labels.loc[~self.dropped]
if dmat == 'dmat':
return _micd(self.dmatDf, self.tmpLabels)
elif dmat == 'pwrel':
return _micd(self.pwrel, self.tmpLabels)
else:
raise IndexError('Value for dmat not understood (%s)' % dmat)
def pwrelStats(self):
"""Return the mean and standard deviation of values from self.pwrel
for all non-identical cytokines. This is representative of
how reliable the clusters are overall. Returns mean of (1 - pwrel)"""
vec = 1 - self.pwrel.values[np.triu_indices_from(self.pwrel, k=1)].ravel()
return vec.mean(), vec.std()
def randCycluster(self):
"""Return a copy of self with shuffled rows, destroying covariation
among cytokines. Requires that each column be shuffled, independently."""
out = deepcopy(self)
N = out.rCyDf.shape[0]
for cy in out.cyVars:
vals = out.rCyDf[cy].values
nonnanInd = ~np.isnan(vals)
nonnan = vals[nonnanInd]
rind = np.random.permutation(nonnan.shape[0])
nonnan = nonnan[rind]
vals[nonnanInd] = nonnan
out.rCyDf.loc[:, cy] = vals
vals = out.nCyDf[cy].values
nonnan = vals[nonnanInd]
nonnan = nonnan[rind]
vals[nonnanInd] = nonnan
out.nCyDf.loc[:, cy] = vals
return out
@property
def name(self):
return '%s_%s_%s_' % (self.studyStr, self.sampleStr, 'normed' if self.normed else 'raw')
@property
def withMean(self):
return self.cyDf.join(self.meanS)
@property
def modWithMean(self):
return self.modDf.join(self.meanS)
| mit |
kenwang815/KodiPlugins | script.module.oceanktv/lib/youtube_dl/extractor/xboxclips.py | 16 | 1950 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_filesize,
unified_strdate,
)
class XboxClipsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?xboxclips\.com/(?:video\.php\?.*vid=|[^/]+/)(?P<id>[\w-]{36})'
_TEST = {
'url': 'http://xboxclips.com/video.php?uid=2533274823424419&gamertag=Iabdulelah&vid=074a69a9-5faf-46aa-b93b-9909c1720325',
'md5': 'fbe1ec805e920aeb8eced3c3e657df5d',
'info_dict': {
'id': '074a69a9-5faf-46aa-b93b-9909c1720325',
'ext': 'mp4',
'title': 'Iabdulelah playing Titanfall',
'filesize_approx': 26800000,
'upload_date': '20140807',
'duration': 56,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'>(?:Link|Download): <a[^>]+href="([^"]+)"', webpage, 'video URL')
title = self._html_search_regex(
r'<title>XboxClips \| ([^<]+)</title>', webpage, 'title')
upload_date = unified_strdate(self._html_search_regex(
r'>Recorded: ([^<]+)<', webpage, 'upload date', fatal=False))
filesize = parse_filesize(self._html_search_regex(
r'>Size: ([^<]+)<', webpage, 'file size', fatal=False))
duration = int_or_none(self._html_search_regex(
r'>Duration: (\d+) Seconds<', webpage, 'duration', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'>Views: (\d+)<', webpage, 'view count', fatal=False))
return {
'id': video_id,
'url': video_url,
'title': title,
'upload_date': upload_date,
'filesize_approx': filesize,
'duration': duration,
'view_count': view_count,
}
| gpl-2.0 |
absoludity/servo | tests/wpt/css-tests/tools/py/testing/process/test_forkedfunc.py | 162 | 4839 | import pytest
import py, sys, os
pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')")
def test_waitfinish_removes_tempdir():
ff = py.process.ForkedFunc(boxf1)
assert ff.tempdir.check()
ff.waitfinish()
assert not ff.tempdir.check()
def test_tempdir_gets_gc_collected(monkeypatch):
monkeypatch.setattr(os, 'fork', lambda: os.getpid())
ff = py.process.ForkedFunc(boxf1)
assert ff.tempdir.check()
ff.__del__()
assert not ff.tempdir.check()
def test_basic_forkedfunc():
result = py.process.ForkedFunc(boxf1).waitfinish()
assert result.out == "some out\n"
assert result.err == "some err\n"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 1
def test_exitstatus():
def func():
os._exit(4)
result = py.process.ForkedFunc(func).waitfinish()
assert result.exitstatus == 4
assert result.signal == 0
assert not result.out
assert not result.err
def test_execption_in_func():
def fun():
raise ValueError(42)
ff = py.process.ForkedFunc(fun)
result = ff.waitfinish()
assert result.exitstatus == ff.EXITSTATUS_EXCEPTION
assert result.err.find("ValueError: 42") != -1
assert result.signal == 0
assert not result.retval
def test_forkedfunc_on_fds():
result = py.process.ForkedFunc(boxf2).waitfinish()
assert result.out == "someout"
assert result.err == "someerr"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 2
def test_forkedfunc_on_fds_output():
result = py.process.ForkedFunc(boxf3).waitfinish()
assert result.signal == 11
assert result.out == "s"
def test_forkedfunc_on_stdout():
def boxf3():
import sys
sys.stdout.write("hello\n")
os.kill(os.getpid(), 11)
result = py.process.ForkedFunc(boxf3).waitfinish()
assert result.signal == 11
assert result.out == "hello\n"
def test_forkedfunc_signal():
result = py.process.ForkedFunc(boxseg).waitfinish()
assert result.retval is None
if sys.version_info < (2,4):
py.test.skip("signal detection does not work with python prior 2.4")
assert result.signal == 11
def test_forkedfunc_huge_data():
result = py.process.ForkedFunc(boxhuge).waitfinish()
assert result.out
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 3
def test_box_seq():
# we run many boxes with huge data, just one after another
for i in range(50):
result = py.process.ForkedFunc(boxhuge).waitfinish()
assert result.out
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 3
def test_box_in_a_box():
def boxfun():
result = py.process.ForkedFunc(boxf2).waitfinish()
print (result.out)
sys.stderr.write(result.err + "\n")
return result.retval
result = py.process.ForkedFunc(boxfun).waitfinish()
assert result.out == "someout\n"
assert result.err == "someerr\n"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 2
def test_kill_func_forked():
class A:
pass
info = A()
import time
def box_fun():
time.sleep(10) # we don't want to last forever here
ff = py.process.ForkedFunc(box_fun)
os.kill(ff.pid, 15)
result = ff.waitfinish()
if py.std.sys.version_info < (2,4):
py.test.skip("signal detection does not work with python prior 2.4")
assert result.signal == 15
def test_hooks(monkeypatch):
def _boxed():
return 1
def _on_start():
sys.stdout.write("some out\n")
sys.stdout.flush()
def _on_exit():
sys.stderr.write("some err\n")
sys.stderr.flush()
result = py.process.ForkedFunc(_boxed, child_on_start=_on_start,
child_on_exit=_on_exit).waitfinish()
assert result.out == "some out\n"
assert result.err == "some err\n"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 1
# ======================================================================
# examples
# ======================================================================
#
def boxf1():
sys.stdout.write("some out\n")
sys.stderr.write("some err\n")
return 1
def boxf2():
os.write(1, "someout".encode('ascii'))
os.write(2, "someerr".encode('ascii'))
return 2
def boxf3():
os.write(1, "s".encode('ascii'))
os.kill(os.getpid(), 11)
def boxseg():
os.kill(os.getpid(), 11)
def boxhuge():
s = " ".encode('ascii')
os.write(1, s * 10000)
os.write(2, s * 10000)
os.write(1, s * 10000)
os.write(1, s * 10000)
os.write(2, s * 10000)
os.write(2, s * 10000)
os.write(1, s * 10000)
return 3
| mpl-2.0 |
Vegaviet-Dev/android_kernel_pantech_ef63-common | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
bryceweiner/mojocoin | share/qt/extract_strings_qt.py | 1294 | 1784 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
40223245/123 | static/Brython3.1.1-20150328-091302/Lib/_collections.py | 603 | 19111 | # "High performance data structures
# "
# copied from pypy repo
#
# Copied and completed from the sandbox of CPython
# (nondist/sandbox/collections/pydeque.py rev 1.1, Raymond Hettinger)
#
# edited for Brython line 558 : catch ImportError instead of AttributeError
import operator
#try:
# from thread import get_ident as _thread_ident
#except ImportError:
def _thread_ident():
return -1
n = 30
LFTLNK = n
RGTLNK = n+1
BLOCKSIZ = n+2
# The deque's size limit is d.maxlen. The limit can be zero or positive, or
# None. After an item is added to a deque, we check to see if the size has
# grown past the limit. If it has, we get the size back down to the limit by
# popping an item off of the opposite end. The methods that can trigger this
# are append(), appendleft(), extend(), and extendleft().
#class deque(object):
class deque:
def __new__(cls, iterable=(), *args, **kw):
#fixme
#self = super(deque, cls).__new__(cls, *args, **kw)
self=object.__new__(cls, *args, **kw)
self.clear()
return self
def __init__(self, iterable=(), maxlen=None):
object.__init__(self)
self.clear()
if maxlen is not None:
if maxlen < 0:
raise ValueError("maxlen must be non-negative")
self._maxlen = maxlen
add = self.append
for elem in iterable:
add(elem)
@property
def maxlen(self):
return self._maxlen
def clear(self):
self.right = self.left = [None] * BLOCKSIZ
self.rightndx = n//2 # points to last written element
self.leftndx = n//2+1
self.length = 0
self.state = 0
def append(self, x):
self.state += 1
self.rightndx += 1
if self.rightndx == n:
newblock = [None] * BLOCKSIZ
self.right[RGTLNK] = newblock
newblock[LFTLNK] = self.right
self.right = newblock
self.rightndx = 0
self.length += 1
self.right[self.rightndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.popleft()
def appendleft(self, x):
self.state += 1
self.leftndx -= 1
if self.leftndx == -1:
newblock = [None] * BLOCKSIZ
self.left[LFTLNK] = newblock
newblock[RGTLNK] = self.left
self.left = newblock
self.leftndx = n-1
self.length += 1
self.left[self.leftndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.pop()
def extend(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.append(elem)
def extendleft(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.appendleft(elem)
def pop(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque" # does not work in brython
raise IndexError("pop from an empty deque")
x = self.right[self.rightndx]
self.right[self.rightndx] = None
self.length -= 1
self.rightndx -= 1
self.state += 1
if self.rightndx == -1:
prevblock = self.right[LFTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[RGTLNK] = None
self.right[LFTLNK] = None
self.right = prevblock
self.rightndx = n-1
return x
def popleft(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque"
raise IndexError("pop from an empty deque")
x = self.left[self.leftndx]
self.left[self.leftndx] = None
self.length -= 1
self.leftndx += 1
self.state += 1
if self.leftndx == n:
prevblock = self.left[RGTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[LFTLNK] = None
self.left[RGTLNK] = None
self.left = prevblock
self.leftndx = 0
return x
def count(self, value):
c = 0
for item in self:
if item == value:
c += 1
return c
def remove(self, value):
# Need to be defensive for mutating comparisons
for i in range(len(self)):
if self[i] == value:
del self[i]
return
raise ValueError("deque.remove(x): x not in deque")
def rotate(self, n=1):
length = len(self)
if length == 0:
return
halflen = (length+1) >> 1
if n > halflen or n < -halflen:
n %= length
if n > halflen:
n -= length
elif n < -halflen:
n += length
while n > 0:
self.appendleft(self.pop())
n -= 1
while n < 0:
self.append(self.popleft())
n += 1
def reverse(self):
"reverse *IN PLACE*"
leftblock = self.left
rightblock = self.right
leftindex = self.leftndx
rightindex = self.rightndx
for i in range(self.length // 2):
# Validate that pointers haven't met in the middle
assert leftblock != rightblock or leftindex < rightindex
# Swap
(rightblock[rightindex], leftblock[leftindex]) = (
leftblock[leftindex], rightblock[rightindex])
# Advance left block/index pair
leftindex += 1
if leftindex == n:
leftblock = leftblock[RGTLNK]
assert leftblock is not None
leftindex = 0
# Step backwards with the right block/index pair
rightindex -= 1
if rightindex == -1:
rightblock = rightblock[LFTLNK]
assert rightblock is not None
rightindex = n - 1
def __repr__(self):
threadlocalattr = '__repr' + str(_thread_ident())
if threadlocalattr in self.__dict__:
return 'deque([...])'
else:
self.__dict__[threadlocalattr] = True
try:
if self.maxlen is not None:
return 'deque(%r, maxlen=%s)' % (list(self), self.maxlen)
else:
return 'deque(%r)' % (list(self),)
finally:
del self.__dict__[threadlocalattr]
def __iter__(self):
return deque_iterator(self, self._iter_impl)
def _iter_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in block[l:r]:
yield elem
if self.state != original_state:
giveup()
block = block[RGTLNK]
def __reversed__(self):
return deque_iterator(self, self._reversed_impl)
def _reversed_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in reversed(block[l:r]):
yield elem
if self.state != original_state:
giveup()
block = block[LFTLNK]
def __len__(self):
#sum = 0
#block = self.left
#while block:
# sum += n
# block = block[RGTLNK]
#return sum + self.rightndx - self.leftndx + 1 - n
return self.length
def __getref(self, index):
if index >= 0:
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
span = r-l
if index < span:
return block, l+index
index -= span
block = block[RGTLNK]
else:
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
negative_span = l-r
if index >= negative_span:
return block, r+index
index -= negative_span
block = block[LFTLNK]
raise IndexError("deque index out of range")
def __getitem__(self, index):
block, index = self.__getref(index)
return block[index]
def __setitem__(self, index, value):
block, index = self.__getref(index)
block[index] = value
def __delitem__(self, index):
length = len(self)
if index >= 0:
if index >= length:
raise IndexError("deque index out of range")
self.rotate(-index)
self.popleft()
self.rotate(index)
else:
#index = ~index #todo until bit wise operators are in bython
index= index^(2**31)
if index >= length:
raise IndexError("deque index out of range")
self.rotate(index)
self.pop()
self.rotate(-index)
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
def __hash__(self):
#raise TypeError, "deque objects are unhashable"
raise TypeError("deque objects are unhashable")
def __copy__(self):
return self.__class__(self, self.maxlen)
# XXX make comparison more efficient
def __eq__(self, other):
if isinstance(other, deque):
return list(self) == list(other)
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, deque):
return list(self) != list(other)
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, deque):
return list(self) < list(other)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, deque):
return list(self) <= list(other)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, deque):
return list(self) > list(other)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, deque):
return list(self) >= list(other)
else:
return NotImplemented
def __iadd__(self, other):
self.extend(other)
return self
class deque_iterator(object):
def __init__(self, deq, itergen):
self.counter = len(deq)
def giveup():
self.counter = 0
#raise RuntimeError, "deque mutated during iteration"
raise RuntimeError("deque mutated during iteration")
self._gen = itergen(deq.state, giveup)
def next(self):
res = self._gen.next()
self.counter -= 1
return res
def __iter__(self):
return self
class defaultdict(dict):
def __init__(self, *args, **kwds):
if len(args) > 0:
default_factory = args[0]
args = args[1:]
if not callable(default_factory) and default_factory is not None:
raise TypeError("first argument must be callable")
else:
default_factory = None
dict.__init__(self, args, kwds)
self.default_factory = default_factory
self.update(args, kwds)
#super(defaultdict, self).__init__(*args, **kwds)
#fixme.. had to add this function to get defaultdict working with brython correctly
def __getitem__(self, key):
if self.__contains__(key):
return dict.__getitem__(self,key)
return self.__missing__(key)
def __missing__(self, key):
# from defaultdict docs
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __repr__(self, recurse=set()):
if id(self) in recurse:
return "defaultdict(...)"
try:
recurse.add(id(self))
return "defaultdict(%s, %s)" % (repr(self.default_factory), super(defaultdict, self).__repr__())
finally:
recurse.remove(id(self))
def copy(self):
return type(self)(self.default_factory, self)
def __copy__(self):
return self.copy()
def __reduce__(self):
#
#__reduce__ must return a 5-tuple as follows:
#
# - factory function
# - tuple of args for the factory function
# - additional state (here None)
# - sequence iterator (here None)
# - dictionary iterator (yielding successive (key, value) pairs
# This API is used by pickle.py and copy.py.
#
return (type(self), (self.default_factory,), None, None, self.iteritems())
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not min(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not min(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new dict which maps field names to their values'
return dict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
print(template)
# Execute the template string in a temporary namespace
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template,namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
if __name__ == '__main__':
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0]+p[1])
x,y=p
print(x,y)
print(p.x+p.y)
print(p)
| gpl-3.0 |
Orav/kbengine | kbe/src/lib/python/Lib/encodings/cp875.py | 37 | 13161 | """ Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp875',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'|' # 0x6A -> VERTICAL LINE
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xa8' # 0x70 -> DIAERESIS
'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\xa0' # 0x74 -> NO-BREAK SPACE
'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x9F -> GREEK SMALL LETTER MU
'\xb4' # 0xA0 -> ACUTE ACCENT
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\u03bd' # 0xAA -> GREEK SMALL LETTER NU
'\u03be' # 0xAB -> GREEK SMALL LETTER XI
'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xAD -> GREEK SMALL LETTER PI
'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
'\xa3' # 0xB0 -> POUND SIGN
'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
'\u2015' # 0xCF -> HORIZONTAL BAR
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb1' # 0xDA -> PLUS-MINUS SIGN
'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
'\x1a' # 0xDC -> SUBSTITUTE
'\u0387' # 0xDD -> GREEK ANO TELEIA
'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
'\xa6' # 0xDF -> BROKEN BAR
'\\' # 0xE0 -> REVERSE SOLIDUS
'\x1a' # 0xE1 -> SUBSTITUTE
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xa7' # 0xEB -> SECTION SIGN
'\x1a' # 0xEC -> SUBSTITUTE
'\x1a' # 0xED -> SUBSTITUTE
'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xEF -> NOT SIGN
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xa9' # 0xFB -> COPYRIGHT SIGN
'\x1a' # 0xFC -> SUBSTITUTE
'\x1a' # 0xFD -> SUBSTITUTE
'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
ftomassetti/intellij-community | python/helpers/pydev/_pydev_imps/_pydev_BaseHTTPServer.py | 54 | 22554 | """HTTP server base class.
Note: the class in this module doesn't implement any HTTP request; see
SimpleHTTPServer for simple implementations of GET, HEAD and POST
(including CGI scripts). It does, however, optionally implement HTTP/1.1
persistent connections, as of version 0.3.
Contents:
- BaseHTTPRequestHandler: HTTP request handler base class
- test: test function
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.3"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import sys
from _pydev_imps import _pydev_time as time
from _pydev_imps import _pydev_socket as socket
from warnings import filterwarnings, catch_warnings
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
from _pydev_imps import _pydev_SocketServer as SocketServer
# Default error message template
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
DEFAULT_ERROR_CONTENT_TYPE = "text/html"
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class HTTPServer(SocketServer.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of mimetools.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
# The default request version. This only affects responses up until
# the point where the request line is parsed, so it mainly decides what
# the client gets back when sending a malformed request line.
# Most web servers default to HTTP 0.9, i.e. don't send a status line.
default_request_version = "HTTP/0.9"
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = self.raw_requestline
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", sys.exc_info()[1])
self.close_connection = 1
return
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
error_message_format = DEFAULT_ERROR_MESSAGE
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# The Message-like class used to parse headers
MessageClass = mimetools.Message
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print ("Serving HTTP on", sa[0], "port", sa[1], "...")
httpd.serve_forever()
if __name__ == '__main__':
test()
| apache-2.0 |
vfalico/hydra | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
jswope00/griffinx | common/djangoapps/terrain/steps.py | 14 | 7208 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
# pylint: disable=wildcard-import
# Disable the "Unused import %s from wildcard import" warning
# pylint: disable=unused-wildcard-import
# Disable the "unused argument" warning because lettuce uses "step"
# pylint: disable=unused-argument
# django_url is assigned late in the process of loading lettuce,
# so we import this as a module, and then read django_url from
# it to get the correct value
import lettuce.django
from lettuce import world, step
from .course_helpers import *
from .ui_helpers import *
from nose.tools import assert_equals # pylint: disable=no-name-in-module
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from logging import getLogger
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+\.?\d*)" seconds?$')
def wait_for_seconds(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.wait_for_ajax_complete()
world.browser.reload()
world.wait_for_js_to_load()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('section.container.dashboard')
assert 'Dashboard' in world.browser.title
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('section.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
if 'COURSE' in world.scenario_dict:
path = path.format(world.scenario_dict['COURSE'].id)
assert world.url_equals(path), (
"path should be {!r} but is {!r}".format(path, world.browser.url)
)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert(title in world.browser.title)
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
world.register_by_course_key(course_key, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], lettuce.django.django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
multiplier = 2
else:
multiplier = 1
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5 * multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5 * multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(lettuce.django.django_url('/'))
dash_css = 'section.container.dashboard'
assert world.is_css_present(dash_css)
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
if 'COURSE' in world.scenario_dict:
url = url.format(world.scenario_dict['COURSE'].id)
world.browser.visit(lettuce.django.django_url(url))
@step(u'wait for AJAX to (?:finish|complete)')
def wait_ajax(_step):
wait_for_ajax_complete()
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
@step('I run ipdb')
def run_ipdb(_step):
"""Run ipdb as step for easy debugging"""
import ipdb
ipdb.set_trace()
assert True
| agpl-3.0 |
robwarm/gpaw-symm | gpaw/utilities/vector.py | 3 | 3718 | from math import acos, cos, sin, sqrt
import numpy as np
from ase.atoms import string2vector
class Vector3d(list):
def __init__(self,vector=None):
if vector is None or vector == []:
vector = [0,0,0]
vector = string2vector(vector)
list.__init__(self)
for c in range(3):
self.append(float(vector[c]))
self.l = False
def __add__(self, other):
result = self.copy()
for c in range(3):
result[c] += other[c]
return result
def __div__(self,other):
return Vector3d(np.array(self) / other)
def __mul__(self, x):
if type(x) == type(self):
return np.dot( self, x )
else:
return Vector3d(x * np.array(self))
def __rmul__(self, x):
return self.__mul__(x)
def __lmul__(self, x):
return self.__mul__(x)
def __neg__(self):
return -1 * self
def __str__(self):
return "(%g,%g,%g)" % tuple(self)
def __sub__(self, other):
result = self.copy()
for c in range(3):
result[c] -= other[c]
return result
def angle(self, other):
"""Return the angle between the directions of yourself and the
other vector in radians."""
other = Vector3d(other)
ll = self.length() * other.length()
if not ll > 0:
return None
return acos((self * other) / ll)
def copy(self):
return Vector3d(self)
def distance(self,vector):
if type(vector) is not type(self):
vector=Vector3d(vector)
dv = self - vector
return (self - vector).length()
def length(self,value=None):
if value:
fac = value / self.length()
for c in range(3):
self[c] *= fac
self.l = False
if not self.l:
self.l = sqrt(self.norm())
return self.l
def norm(self):
#return np.sum( self*self )
return self*self # XXX drop this class and use numpy arrays ...
def rotation_axis(self, other):
"""Return the rotation axis to rotate yourself to the direction
of the other vector. The length of the axis gives the rotation angle
(in radians)."""
other = Vector3d(other)
angle = self.angle(other)
if angle is None:
return None
axis = self.vprod(other)
axis.length(angle)
return axis
def rotate(self, axis, angle=None):
"""Rotate the vector about the given axis with the given angle.
Note, that the right hand rule applies: If your right thumb points
into the direction of the axis, the other fingers show the rotation
direction."""
axis=Vector3d(axis)
if angle is None:
angle=axis.length()
axis.length(1.)
res = (cos(angle) * self - sin(angle) * self.vprod(axis) +
((self * axis) * (1. - cos(angle))) * axis )
for c in range(3):
self[c] = res[c]
def vprod(self, a, b=None):
"""vector product"""
if b is None:
# [self x a]
return Vector3d([self[1]*a[2]-self[2]*a[1],
self[2]*a[0]-self[0]*a[2],
self[0]*a[1]-self[1]*a[0]])
else:
# [a x b]
return Vector3d([a[1]*b[2]-a[2]*b[1],
a[2]*b[0]-a[0]*b[2],
a[0]*b[1]-a[1]*b[0]])
def x(self):
return self[0]
def y(self):
return self[1]
def z(self):
return self[2]
| gpl-3.0 |
viktorTarasov/PyKMIP | kmip/demos/units/register.py | 1 | 2876 | # Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kmip.core.enums import KeyFormatType
from kmip.core.enums import ObjectType
from kmip.core.enums import Operation
from kmip.core.enums import ResultStatus
from kmip.core.objects import TemplateAttribute
from kmip.demos import utils
from kmip.services.kmip_client import KMIPProxy
import logging
import sys
if __name__ == '__main__':
logger = utils.build_console_logger(logging.INFO)
parser = utils.build_cli_parser(Operation.REGISTER)
opts, args = parser.parse_args(sys.argv[1:])
username = opts.username
password = opts.password
config = opts.config
object_type = opts.type
format_type = opts.format
# Exit early if the arguments are not specified
object_type = getattr(ObjectType, object_type, None)
if object_type is None:
logger.error("Invalid object type specified; exiting early from demo")
sys.exit()
key_format_type = getattr(KeyFormatType, format_type, None)
if key_format_type is None:
logger.error(
"Invalid key format type specified; exiting early from demo")
# Create the template attribute for the secret and then build the secret
usage_mask = utils.build_cryptographic_usage_mask(logger, object_type)
attributes = [usage_mask]
template_attribute = TemplateAttribute(attributes=attributes)
secret = utils.build_object(logger, object_type, key_format_type)
# Build the client, connect to the server, register the secret, and
# disconnect from the server
client = KMIPProxy(config=config)
client.open()
result = client.register(object_type, template_attribute, secret)
client.close()
# Display operation results
logger.info('register() result status: {0}'.format(
result.result_status.value))
if result.result_status.value == ResultStatus.SUCCESS:
logger.info('registered UUID: {0}'.format(result.uuid.value))
logger.info('registered template attribute: {0}'.
format(result.template_attribute))
else:
logger.info('register() result reason: {0}'.format(
result.result_reason.value))
logger.info('register() result message: {0}'.format(
result.result_message.value))
| apache-2.0 |
1013553207/django | tests/template_backends/test_django.py | 199 | 4793 | from template_tests.test_response import test_processor_name
from django.template import RequestContext
from django.template.backends.django import DjangoTemplates
from django.template.library import InvalidTemplateLibrary
from django.test import RequestFactory, ignore_warnings, override_settings
from django.utils.deprecation import RemovedInDjango110Warning
from .test_dummy import TemplateStringsTests
class DjangoTemplatesTests(TemplateStringsTests):
engine_class = DjangoTemplates
backend_name = 'django'
def test_context_has_priority_over_template_context_processors(self):
# See ticket #23789.
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'context_processors': [test_processor_name],
},
})
template = engine.from_string('{{ processors }}')
request = RequestFactory().get('/')
# Check that context processors run
content = template.render({}, request)
self.assertEqual(content, 'yes')
# Check that context overrides context processors
content = template.render({'processors': 'no'}, request)
self.assertEqual(content, 'no')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_request_context_conflicts_with_request(self):
template = self.engine.from_string('hello')
request = RequestFactory().get('/')
request_context = RequestContext(request)
# This doesn't raise an exception.
template.render(request_context, request)
other_request = RequestFactory().get('/')
msg = ("render() was called with a RequestContext and a request "
"argument which refer to different requests. Make sure "
"that the context argument is a dict or at least that "
"the two arguments refer to the same request.")
with self.assertRaisesMessage(ValueError, msg):
template.render(request_context, other_request)
@override_settings(INSTALLED_APPS=['template_backends.apps.good'])
def test_templatetag_discovery(self):
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'libraries': {
'alternate': 'template_backends.apps.good.templatetags.good_tags',
'override': 'template_backends.apps.good.templatetags.good_tags',
},
},
})
# libraries are discovered from installed applications
self.assertEqual(
engine.engine.libraries['good_tags'],
'template_backends.apps.good.templatetags.good_tags',
)
self.assertEqual(
engine.engine.libraries['subpackage.tags'],
'template_backends.apps.good.templatetags.subpackage.tags',
)
# libraries are discovered from django.templatetags
self.assertEqual(
engine.engine.libraries['static'],
'django.templatetags.static',
)
# libraries passed in OPTIONS are registered
self.assertEqual(
engine.engine.libraries['alternate'],
'template_backends.apps.good.templatetags.good_tags',
)
# libraries passed in OPTIONS take precedence over discovered ones
self.assertEqual(
engine.engine.libraries['override'],
'template_backends.apps.good.templatetags.good_tags',
)
@override_settings(INSTALLED_APPS=['template_backends.apps.importerror'])
def test_templatetag_discovery_import_error(self):
"""
Import errors in tag modules should be reraised with a helpful message.
"""
with self.assertRaisesMessage(
InvalidTemplateLibrary,
"ImportError raised when trying to load "
"'template_backends.apps.importerror.templatetags.broken_tags'"
):
DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {},
})
def test_builtins_discovery(self):
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'builtins': ['template_backends.apps.good.templatetags.good_tags'],
},
})
self.assertEqual(
engine.engine.builtins, [
'django.template.defaulttags',
'django.template.defaultfilters',
'django.template.loader_tags',
'template_backends.apps.good.templatetags.good_tags',
]
)
| bsd-3-clause |
jvkops/django | tests/utils_tests/test_datastructures.py | 262 | 4154 | """
Tests for stuff in django.utils.datastructures.
"""
import copy
from django.test import SimpleTestCase
from django.utils import six
from django.utils.datastructures import (
DictWrapper, ImmutableList, MultiValueDict, MultiValueDictKeyError,
OrderedSet,
)
class OrderedSetTests(SimpleTestCase):
def test_bool(self):
# Refs #23664
s = OrderedSet()
self.assertFalse(s)
s.add(1)
self.assertTrue(s)
def test_len(self):
s = OrderedSet()
self.assertEqual(len(s), 0)
s.add(1)
s.add(2)
s.add(2)
self.assertEqual(len(s), 2)
class MultiValueDictTests(SimpleTestCase):
def test_multivaluedict(self):
d = MultiValueDict({'name': ['Adrian', 'Simon'],
'position': ['Developer']})
self.assertEqual(d['name'], 'Simon')
self.assertEqual(d.get('name'), 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
self.assertEqual(
sorted(six.iteritems(d)),
[('name', 'Simon'), ('position', 'Developer')]
)
self.assertEqual(
sorted(six.iterlists(d)),
[('name', ['Adrian', 'Simon']), ('position', ['Developer'])]
)
six.assertRaisesRegex(self, MultiValueDictKeyError, 'lastname',
d.__getitem__, 'lastname')
self.assertEqual(d.get('lastname'), None)
self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
self.assertEqual(d.getlist('lastname'), [])
self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
['Adrian', 'Simon'])
d.setlist('lastname', ['Holovaty', 'Willison'])
self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
self.assertEqual(sorted(six.itervalues(d)),
['Developer', 'Simon', 'Willison'])
def test_appendlist(self):
d = MultiValueDict()
d.appendlist('name', 'Adrian')
d.appendlist('name', 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
def test_copy(self):
for copy_func in [copy.copy, lambda d: d.copy()]:
d1 = MultiValueDict({
"developers": ["Carl", "Fred"]
})
self.assertEqual(d1["developers"], "Fred")
d2 = copy_func(d1)
d2.update({"developers": "Groucho"})
self.assertEqual(d2["developers"], "Groucho")
self.assertEqual(d1["developers"], "Fred")
d1 = MultiValueDict({
"key": [[]]
})
self.assertEqual(d1["key"], [])
d2 = copy_func(d1)
d2["key"].append("Penguin")
self.assertEqual(d1["key"], ["Penguin"])
self.assertEqual(d2["key"], ["Penguin"])
def test_dict_translation(self):
mvd = MultiValueDict({
'devs': ['Bob', 'Joe'],
'pm': ['Rory'],
})
d = mvd.dict()
self.assertEqual(sorted(six.iterkeys(d)), sorted(six.iterkeys(mvd)))
for key in six.iterkeys(mvd):
self.assertEqual(d[key], mvd[key])
self.assertEqual({}, MultiValueDict().dict())
class ImmutableListTests(SimpleTestCase):
def test_sort(self):
d = ImmutableList(range(10))
# AttributeError: ImmutableList object is immutable.
self.assertRaisesMessage(AttributeError,
'ImmutableList object is immutable.', d.sort)
self.assertEqual(repr(d), '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)')
def test_custom_warning(self):
d = ImmutableList(range(10), warning="Object is immutable!")
self.assertEqual(d[1], 1)
# AttributeError: Object is immutable!
self.assertRaisesMessage(AttributeError,
'Object is immutable!', d.__setitem__, 1, 'test')
class DictWrapperTests(SimpleTestCase):
def test_dictwrapper(self):
f = lambda x: "*%s" % x
d = DictWrapper({'a': 'a'}, f, 'xx_')
self.assertEqual(
"Normal: %(a)s. Modified: %(xx_a)s" % d,
'Normal: a. Modified: *a'
)
| bsd-3-clause |
alabeduarte/pixelated-user-agent | service/test/unit/bitmask_libraries/test_certs.py | 8 | 1376 | import unittest
from pixelated.bitmask_libraries.certs import LeapCertificate
from mock import MagicMock
class CertsTest(unittest.TestCase):
def setUp(self):
config = MagicMock(leap_home='/some/leap/home')
self.provider = MagicMock(server_name=u'test.leap.net', config=config)
def test_set_cert_and_fingerprint_sets_cert(self):
LeapCertificate.set_cert_and_fingerprint('some cert', None)
certs = LeapCertificate(self.provider)
self.assertIsNone(certs.LEAP_FINGERPRINT)
self.assertEqual('some cert', certs.provider_web_cert)
def test_set_cert_and_fingerprint_sets_fingerprint(self):
LeapCertificate.set_cert_and_fingerprint(None, 'fingerprint')
certs = LeapCertificate(self.provider)
self.assertEqual('fingerprint', LeapCertificate.LEAP_FINGERPRINT)
self.assertFalse(certs.provider_web_cert)
def test_set_cert_and_fingerprint_when_none_are_passed(self):
LeapCertificate.set_cert_and_fingerprint(None, None)
certs = LeapCertificate(self.provider)
self.assertIsNone(certs.LEAP_FINGERPRINT)
self.assertEqual(True, certs.provider_web_cert)
def test_provider_api_cert(self):
certs = LeapCertificate(self.provider).provider_api_cert
self.assertEqual('/some/leap/home/providers/test.leap.net/keys/client/api.pem', certs)
| agpl-3.0 |
reminisce/mxnet | example/image-classification/symbols/mlp.py | 58 | 1434 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
a simple multilayer perceptron
"""
import mxnet as mx
def get_symbol(num_classes=10, **kwargs):
data = mx.symbol.Variable('data')
data = mx.sym.Flatten(data=data)
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=num_classes)
mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
return mlp
| apache-2.0 |
TeamExodus/external_chromium_org | tools/deep_memory_profiler/visualizer/services.py | 99 | 2971 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is expected to be used under another directory to use,
# so we disable checking import path of GAE tools from this directory.
# pylint: disable=F0401,E0611,W0232
import json
from google.appengine.ext import blobstore
from google.appengine.ext import ndb
class Profiler(ndb.Model):
"""Profiler entity to store json data. Use run_id as its key.
Json data will be stored at blobstore, but can be referred by BlobKey."""
blob_key = ndb.BlobKeyProperty()
class Template(ndb.Model):
"""Template to breakdown profiler with multiple tags.
Use content as its key."""
content = ndb.JsonProperty()
def CreateProfiler(blob_info):
"""Create Profiler entity in database of uploaded file. Return run_id."""
json_str = blob_info.open().read()
json_obj = json.loads(json_str)
# Check the uniqueness of data run_id and store new one.
run_id = json_obj['run_id']
prof_key = ndb.Key('Profiler', run_id)
if not prof_key.get():
# Profile for this run_id does not exist
profiler = Profiler(id=run_id, blob_key=blob_info.key())
profiler.put()
return run_id
def GetProfiler(run_id):
"""Get Profiler entity from database of given run_id."""
# Get entity key.
profiler = ndb.Key('Profiler', run_id).get()
return blobstore.BlobReader(profiler.blob_key).read()
def CreateTemplates(blob_info):
"""Create Template entities for all templates of uploaded file. Return ndb.Key
of default template or None if not indicated or found in templates."""
json_str = blob_info.open().read()
json_obj = json.loads(json_str)
# Return None when no default template indicated.
if 'default_template' not in json_obj:
return None
# Return None when no default template found in templates.
if json_obj['default_template'] not in json_obj['templates']:
return None
# Check the uniqueness of template content and store new one.
for tag, content in json_obj['templates'].iteritems():
content_str = json.dumps(content)
tmpl_key = ndb.Key('Template', content_str)
if tag == json_obj['default_template']:
default_key = tmpl_key
if not tmpl_key.get():
# Template of the same content does not exist.
template = Template(id=content_str, content=content)
template.put()
return default_key
def CreateTemplate(content):
"""Create Template entity for user to share."""
content_str = json.dumps(content)
tmpl_key = ndb.Key('Template', content_str)
if not tmpl_key.get():
# Template of the same content does not exist.
template = Template(id=content_str, content=content)
template.put()
return tmpl_key
def GetTemplate(tmpl_id):
"""Get Template entity of given tmpl_id generated by ndb.Key."""
# Get entity key.
template = ndb.Key(urlsafe=tmpl_id).get()
return json.dumps(template.content)
| bsd-3-clause |
mushorg/tanner | tanner/tests/test_cmd_exec_emulation.py | 1 | 2054 | import unittest
from unittest import mock
import asyncio
from tanner.emulators import cmd_exec
class TestCmdExecEmulator(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.handler = cmd_exec.CmdExecEmulator()
self.handler.helper.host_image = "busybox:latest"
self.sess = mock.Mock()
self.sess.sess_uuid.hex = "e86d20b858224e239d3991c1a2650bc7"
def test_scan(self):
attack = "id; uname"
assert_result = dict(name="cmd_exec", order=3)
result = self.handler.scan(attack)
self.assertEqual(result, assert_result)
def test_scan_negative(self):
attack = "id; curl"
assert_result = None
result = self.handler.scan(attack)
self.assertEqual(result, assert_result)
def test_handle_simple_command(self):
attack_params = [dict(id="foo", value="id")]
result = self.loop.run_until_complete(self.handler.handle(attack_params, self.sess))
assert_result = "uid=0(root) gid=0(root)"
self.assertIn(assert_result, result["value"])
def test_handle_nested_commands(self):
attack_params = [[dict(id="foo1", value="id; uname")], [dict(id="foo2", value="id && uname")]]
assert_result = {"id": "uid=0(root) gid=0(root)", "uname": "Linux"}
for attack_param in attack_params:
result = self.loop.run_until_complete(self.handler.handle(attack_param, self.sess))
self.assertIn(assert_result["id"], result["value"])
self.assertIn(assert_result["uname"], result["value"])
def test_handle_invalid_commands(self):
attack_params = [dict(id="foo", value="foo")]
result = self.loop.run_until_complete(self.handler.handle(attack_params, self.sess))
assert_result = "sh: foo: not found"
self.assertIn(assert_result, result["value"])
def tearDown(self):
self.loop.run_until_complete(self.handler.helper.docker_client.close())
self.loop.close()
| gpl-3.0 |
zsiciarz/django | django/contrib/gis/shortcuts.py | 24 | 1221 | import zipfile
from io import BytesIO
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
def compress_kml(kml):
"Return compressed KMZ from the given KML string."
kmz = BytesIO()
with zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED) as zf:
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
kmz.seek(0)
return kmz.read()
def render_to_kml(*args, **kwargs):
"Render the response as KML (using the correct MIME type)."
return HttpResponse(
loader.render_to_string(*args, **kwargs),
content_type='application/vnd.google-earth.kml+xml',
)
def render_to_kmz(*args, **kwargs):
"""
Compress the KML content and return as KMZ (using the correct
MIME type).
"""
return HttpResponse(
compress_kml(loader.render_to_string(*args, **kwargs)),
content_type='application/vnd.google-earth.kmz',
)
def render_to_text(*args, **kwargs):
"Render the response using the MIME type for plain text."
return HttpResponse(loader.render_to_string(*args, **kwargs), content_type='text/plain')
| bsd-3-clause |
onitake/ansible | lib/ansible/modules/cloud/google/gce_net.py | 32 | 18212 | #!/usr/bin/python
# Copyright 2013 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_net
version_added: "1.5"
short_description: create/destroy GCE networks and firewall rules
description:
- This module can create and destroy Google Compute Engine networks and
firewall rules U(https://cloud.google.com/compute/docs/networking).
The I(name) parameter is reserved for referencing a network while the
I(fwname) parameter is used to reference firewall rules.
IPv4 Address ranges must be specified using the CIDR
U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
allowed:
description:
- the protocol:ports to allow (I(tcp:80) or I(tcp:80,443) or I(tcp:80-800;udp:1-25))
this parameter is mandatory when creating or updating a firewall rule
ipv4_range:
description:
- the IPv4 address range in CIDR notation for the network
this parameter is not mandatory when you specified existing network in name parameter,
but when you create new network, this parameter is mandatory
aliases: ['cidr']
fwname:
description:
- name of the firewall rule
aliases: ['fwrule']
name:
description:
- name of the network
src_range:
description:
- the source IPv4 address range in CIDR notation
default: []
aliases: ['src_cidr']
src_tags:
description:
- the source instance tags for creating a firewall rule
default: []
target_tags:
version_added: "1.9"
description:
- the target instance tags for creating a firewall rule
default: []
state:
description:
- desired state of the network or firewall
default: "present"
choices: ["active", "present", "absent", "deleted"]
service_account_email:
version_added: "1.6"
description:
- service account email
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use C(credentials_file).
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
project_id:
version_added: "1.6"
description:
- your GCE project ID
mode:
version_added: "2.2"
description:
- network mode for Google Cloud
C(legacy) indicates a network with an IP address range;
C(auto) automatically generates subnetworks in different regions;
C(custom) uses networks to group subnets of user specified IP address ranges
https://cloud.google.com/compute/docs/networking#network_types
default: "legacy"
choices: ["legacy", "auto", "custom"]
subnet_name:
version_added: "2.2"
description:
- name of subnet to create
subnet_region:
version_added: "2.2"
description:
- region of subnet to create
subnet_desc:
version_added: "2.2"
description:
- description of subnet to create
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>, Tom Melendez (@supertom) <supertom@google.com>"
'''
EXAMPLES = '''
# Create a 'legacy' Network
- name: Create Legacy Network
gce_net:
name: legacynet
ipv4_range: '10.24.17.0/24'
mode: legacy
state: present
# Create an 'auto' Network
- name: Create Auto Network
gce_net:
name: autonet
mode: auto
state: present
# Create a 'custom' Network
- name: Create Custom Network
gce_net:
name: customnet
mode: custom
subnet_name: "customsubnet"
subnet_region: us-east1
ipv4_range: '10.240.16.0/24'
state: "present"
# Create Firewall Rule with Source Tags
- name: Create Firewall Rule w/Source Tags
gce_net:
name: default
fwname: "my-firewall-rule"
allowed: tcp:80
state: "present"
src_tags: "foo,bar"
# Create Firewall Rule with Source Range
- name: Create Firewall Rule w/Source Range
gce_net:
name: default
fwname: "my-firewall-rule"
allowed: tcp:80
state: "present"
src_range: ['10.1.1.1/32']
# Create Custom Subnetwork
- name: Create Custom Subnetwork
gce_net:
name: privatenet
mode: custom
subnet_name: subnet_example
subnet_region: us-central1
ipv4_range: '10.0.0.0/16'
'''
RETURN = '''
allowed:
description: Rules (ports and protocols) specified by this firewall rule.
returned: When specified
type: string
sample: "tcp:80;icmp"
fwname:
description: Name of the firewall rule.
returned: When specified
type: string
sample: "my-fwname"
ipv4_range:
description: IPv4 range of the specified network or subnetwork.
returned: when specified or when a subnetwork is created
type: string
sample: "10.0.0.0/16"
name:
description: Name of the network.
returned: always
type: string
sample: "my-network"
src_range:
description: IP address blocks a firewall rule applies to.
returned: when specified
type: list
sample: [ '10.1.1.12/8' ]
src_tags:
description: Instance Tags firewall rule applies to.
returned: when specified while creating a firewall rule
type: list
sample: [ 'foo', 'bar' ]
state:
description: State of the item operated on.
returned: always
type: string
sample: "present"
subnet_name:
description: Name of the subnetwork.
returned: when specified or when a subnetwork is created
type: string
sample: "my-subnetwork"
subnet_region:
description: Region of the specified subnet.
returned: when specified or when a subnetwork is created
type: string
sample: "us-east1"
target_tags:
description: Instance Tags with these tags receive traffic allowed by firewall rule.
returned: when specified while creating a firewall rule
type: list
sample: [ 'foo', 'bar' ]
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect, unexpected_error_msg
def format_allowed_section(allowed):
"""Format each section of the allowed list"""
if allowed.count(":") == 0:
protocol = allowed
ports = []
elif allowed.count(":") == 1:
protocol, ports = allowed.split(":")
else:
return []
if ports.count(","):
ports = ports.split(",")
elif ports:
ports = [ports]
return_val = {"IPProtocol": protocol}
if ports:
return_val["ports"] = ports
return return_val
def format_allowed(allowed):
"""Format the 'allowed' value so that it is GCE compatible."""
return_value = []
if allowed.count(";") == 0:
return [format_allowed_section(allowed)]
else:
sections = allowed.split(";")
for section in sections:
return_value.append(format_allowed_section(section))
return return_value
def sorted_allowed_list(allowed_list):
"""Sort allowed_list (output of format_allowed) by protocol and port."""
# sort by protocol
allowed_by_protocol = sorted(allowed_list, key=lambda x: x['IPProtocol'])
# sort the ports list
return sorted(allowed_by_protocol, key=lambda y: sorted(y.get('ports', [])))
def main():
module = AnsibleModule(
argument_spec=dict(
allowed=dict(),
ipv4_range=dict(),
fwname=dict(),
name=dict(),
src_range=dict(default=[], type='list'),
src_tags=dict(default=[], type='list'),
target_tags=dict(default=[], type='list'),
state=dict(default='present'),
service_account_email=dict(),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
project_id=dict(),
mode=dict(default='legacy', choices=['legacy', 'auto', 'custom']),
subnet_name=dict(),
subnet_region=dict(),
subnet_desc=dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
gce = gce_connect(module)
allowed = module.params.get('allowed')
ipv4_range = module.params.get('ipv4_range')
fwname = module.params.get('fwname')
name = module.params.get('name')
src_range = module.params.get('src_range')
src_tags = module.params.get('src_tags')
target_tags = module.params.get('target_tags')
state = module.params.get('state')
mode = module.params.get('mode')
subnet_name = module.params.get('subnet_name')
subnet_region = module.params.get('subnet_region')
subnet_desc = module.params.get('subnet_desc')
changed = False
json_output = {'state': state}
if state in ['active', 'present']:
network = None
subnet = None
try:
network = gce.ex_get_network(name)
json_output['name'] = name
if mode == 'legacy':
json_output['ipv4_range'] = network.cidr
if network and mode == 'custom' and subnet_name:
if not hasattr(gce, 'ex_get_subnetwork'):
module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
json_output['subnet_name'] = subnet_name
json_output['ipv4_range'] = subnet.cidr
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants to create a new network that doesn't yet exist
if name and not network:
if not ipv4_range and mode != 'auto':
module.fail_json(msg="Network '" + name + "' is not found. To create network in legacy or custom mode, 'ipv4_range' parameter is required",
changed=False)
args = [ipv4_range if mode == 'legacy' else None]
kwargs = {}
if mode != 'legacy':
kwargs['mode'] = mode
try:
network = gce.ex_create_network(name, *args, **kwargs)
json_output['name'] = name
json_output['ipv4_range'] = ipv4_range
changed = True
except TypeError:
module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if (subnet_name or ipv4_range) and not subnet and mode == 'custom':
if not hasattr(gce, 'ex_create_subnetwork'):
module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
if not subnet_name or not ipv4_range or not subnet_region:
module.fail_json(msg="subnet_name, ipv4_range, and subnet_region required for custom mode", changed=changed)
try:
subnet = gce.ex_create_subnetwork(subnet_name, cidr=ipv4_range, network=name, region=subnet_region, description=subnet_desc)
json_output['subnet_name'] = subnet_name
json_output['ipv4_range'] = ipv4_range
changed = True
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=changed)
if fwname:
# user creating a firewall rule
if not allowed and not src_range and not src_tags:
if changed and network:
module.fail_json(
msg="Network created, but missing required " + "firewall rule parameter(s)", changed=True)
module.fail_json(
msg="Missing required firewall rule parameter(s)",
changed=False)
allowed_list = format_allowed(allowed)
# Fetch existing rule and if it exists, compare attributes
# update if attributes changed. Create if doesn't exist.
try:
fw_changed = False
fw = gce.ex_get_firewall(fwname)
# If old and new attributes are different, we update the firewall rule.
# This implicitly lets us clear out attributes as well.
# allowed_list is required and must not be None for firewall rules.
if allowed_list and (sorted_allowed_list(allowed_list) != sorted_allowed_list(fw.allowed)):
fw.allowed = allowed_list
fw_changed = True
# source_ranges might not be set in the project; cast it to an empty list
fw.source_ranges = fw.source_ranges or []
# If these attributes are lists, we sort them first, then compare.
# Otherwise, we update if they differ.
if fw.source_ranges != src_range:
if isinstance(src_range, list):
if sorted(fw.source_ranges) != sorted(src_range):
fw.source_ranges = src_range
fw_changed = True
else:
fw.source_ranges = src_range
fw_changed = True
# source_tags might not be set in the project; cast it to an empty list
fw.source_tags = fw.source_tags or []
if fw.source_tags != src_tags:
if isinstance(src_tags, list):
if sorted(fw.source_tags) != sorted(src_tags):
fw.source_tags = src_tags
fw_changed = True
else:
fw.source_tags = src_tags
fw_changed = True
# target_tags might not be set in the project; cast it to an empty list
fw.target_tags = fw.target_tags or []
if fw.target_tags != target_tags:
if isinstance(target_tags, list):
if sorted(fw.target_tags) != sorted(target_tags):
fw.target_tags = target_tags
fw_changed = True
else:
fw.target_tags = target_tags
fw_changed = True
if fw_changed is True:
try:
gce.ex_update_firewall(fw)
changed = True
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# Firewall rule not found so we try to create it.
except ResourceNotFoundError:
try:
gce.ex_create_firewall(fwname, allowed_list, network=name,
source_ranges=src_range, source_tags=src_tags, target_tags=target_tags)
changed = True
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['fwname'] = fwname
json_output['allowed'] = allowed
json_output['src_range'] = src_range
json_output['src_tags'] = src_tags
json_output['target_tags'] = target_tags
if state in ['absent', 'deleted']:
if fwname:
json_output['fwname'] = fwname
fw = None
try:
fw = gce.ex_get_firewall(fwname)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fw:
gce.ex_destroy_firewall(fw)
changed = True
elif subnet_name:
if not hasattr(gce, 'ex_get_subnetwork') or not hasattr(gce, 'ex_destroy_subnetwork'):
module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
json_output['name'] = subnet_name
subnet = None
try:
subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if subnet:
gce.ex_destroy_subnetwork(subnet)
changed = True
elif name:
json_output['name'] = name
network = None
try:
network = gce.ex_get_network(name)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if network:
try:
gce.ex_destroy_network(network)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
nhenezi/kuma | vendor/packages/nose/nose/plugins/base.py | 83 | 26056 | import os
import textwrap
from optparse import OptionConflictError
from warnings import warn
from nose.util import tolist
class Plugin(object):
"""Base class for nose plugins. It's recommended but not *necessary* to
subclass this class to create a plugin, but all plugins *must* implement
`options(self, parser, env)` and `configure(self, options, conf)`, and
must have the attributes `enabled`, `name` and `score`. The `name`
attribute may contain hyphens ('-').
Plugins should not be enabled by default.
Subclassing Plugin (and calling the superclass methods in
__init__, configure, and options, if you override them) will give
your plugin some friendly default behavior:
* A --with-$name option will be added to the command line interface
to enable the plugin, and a corresponding environment variable
will be used as the default value. The plugin class's docstring
will be used as the help for this option.
* The plugin will not be enabled unless this option is selected by
the user.
"""
can_configure = False
enabled = False
enableOpt = None
name = None
score = 100
def __init__(self):
if self.name is None:
self.name = self.__class__.__name__.lower()
if self.enableOpt is None:
self.enableOpt = "enable_plugin_%s" % self.name.replace('-', '_')
def addOptions(self, parser, env=None):
"""Add command-line options for this plugin.
The base plugin class adds --with-$name by default, used to enable the
plugin.
.. warning :: Don't implement addOptions unless you want to override
all default option handling behavior, including
warnings for conflicting options. Implement
:meth:`options
<nose.plugins.base.IPluginInterface.options>`
instead.
"""
self.add_options(parser, env)
def add_options(self, parser, env=None):
"""Non-camel-case version of func name for backwards compatibility.
.. warning ::
DEPRECATED: Do not use this method,
use :meth:`options <nose.plugins.base.IPluginInterface.options>`
instead.
"""
# FIXME raise deprecation warning if wasn't called by wrapper
if env is None:
env = os.environ
try:
self.options(parser, env)
self.can_configure = True
except OptionConflictError, e:
warn("Plugin %s has conflicting option string: %s and will "
"be disabled" % (self, e), RuntimeWarning)
self.enabled = False
self.can_configure = False
def options(self, parser, env):
"""Register commandline options.
Implement this method for normal options behavior with protection from
OptionConflictErrors. If you override this method and want the default
--with-$name option to be registered, be sure to call super().
"""
env_opt = 'NOSE_WITH_%s' % self.name.upper()
env_opt = env_opt.replace('-', '_')
parser.add_option("--with-%s" % self.name,
action="store_true",
dest=self.enableOpt,
default=env.get(env_opt),
help="Enable plugin %s: %s [%s]" %
(self.__class__.__name__, self.help(), env_opt))
def configure(self, options, conf):
"""Configure the plugin and system, based on selected options.
The base plugin class sets the plugin to enabled if the enable option
for the plugin (self.enableOpt) is true.
"""
if not self.can_configure:
return
self.conf = conf
if hasattr(options, self.enableOpt):
self.enabled = getattr(options, self.enableOpt)
def help(self):
"""Return help for this plugin. This will be output as the help
section of the --with-$name option that enables the plugin.
"""
if self.__class__.__doc__:
# doc sections are often indented; compress the spaces
return textwrap.dedent(self.__class__.__doc__)
return "(no help available)"
# Compatiblity shim
def tolist(self, val):
warn("Plugin.tolist is deprecated. Use nose.util.tolist instead",
DeprecationWarning)
return tolist(val)
class IPluginInterface(object):
"""
IPluginInterface describes the plugin API. Do not subclass or use this
class directly.
"""
def __new__(cls, *arg, **kw):
raise TypeError("IPluginInterface class is for documentation only")
def addOptions(self, parser, env):
"""Called to allow plugin to register command-line options with the
parser. DO NOT return a value from this method unless you want to stop
all other plugins from setting their options.
.. warning ::
DEPRECATED -- implement
:meth:`options <nose.plugins.base.IPluginInterface.options>` instead.
"""
pass
add_options = addOptions
add_options.deprecated = True
def addDeprecated(self, test):
"""Called when a deprecated test is seen. DO NOT return a value
unless you want to stop other plugins from seeing the deprecated
test.
.. warning :: DEPRECATED -- check error class in addError instead
"""
pass
addDeprecated.deprecated = True
def addError(self, test, err):
"""Called when a test raises an uncaught exception. DO NOT return a
value unless you want to stop other plugins from seeing that the
test has raised an error.
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
addError.changed = True
def addFailure(self, test, err):
"""Called when a test fails. DO NOT return a value unless you
want to stop other plugins from seeing that the test has failed.
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: 3-tuple
:type err: sys.exc_info() tuple
"""
pass
addFailure.changed = True
def addSkip(self, test):
"""Called when a test is skipped. DO NOT return a value unless
you want to stop other plugins from seeing the skipped test.
.. warning:: DEPRECATED -- check error class in addError instead
"""
pass
addSkip.deprecated = True
def addSuccess(self, test):
"""Called when a test passes. DO NOT return a value unless you
want to stop other plugins from seeing the passing test.
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
addSuccess.changed = True
def afterContext(self):
"""Called after a context (generally a module) has been
lazy-loaded, imported, setup, had its tests loaded and
executed, and torn down.
"""
pass
afterContext._new = True
def afterDirectory(self, path):
"""Called after all tests have been loaded from directory at path
and run.
:param path: the directory that has finished processing
:type path: string
"""
pass
afterDirectory._new = True
def afterImport(self, filename, module):
"""Called after module is imported from filename. afterImport
is called even if the import failed.
:param filename: The file that was loaded
:type filename: string
:param module: The name of the module
:type module: string
"""
pass
afterImport._new = True
def afterTest(self, test):
"""Called after the test has been run and the result recorded
(after stopTest).
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
afterTest._new = True
def beforeContext(self):
"""Called before a context (generally a module) is
examined. Because the context is not yet loaded, plugins don't
get to know what the context is; so any context operations
should use a stack that is pushed in `beforeContext` and popped
in `afterContext` to ensure they operate symmetrically.
`beforeContext` and `afterContext` are mainly useful for tracking
and restoring global state around possible changes from within a
context, whatever the context may be. If you need to operate on
contexts themselves, see `startContext` and `stopContext`, which
are passed the context in question, but are called after
it has been loaded (imported in the module case).
"""
pass
beforeContext._new = True
def beforeDirectory(self, path):
"""Called before tests are loaded from directory at path.
:param path: the directory that is about to be processed
"""
pass
beforeDirectory._new = True
def beforeImport(self, filename, module):
"""Called before module is imported from filename.
:param filename: The file that will be loaded
:param module: The name of the module found in file
:type module: string
"""
beforeImport._new = True
def beforeTest(self, test):
"""Called before the test is run (before startTest).
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
beforeTest._new = True
def begin(self):
"""Called before any tests are collected or run. Use this to
perform any setup needed before testing begins.
"""
pass
def configure(self, options, conf):
"""Called after the command line has been parsed, with the
parsed options and the config container. Here, implement any
config storage or changes to state or operation that are set
by command line options.
DO NOT return a value from this method unless you want to
stop all other plugins from being configured.
"""
pass
def finalize(self, result):
"""Called after all report output, including output from all
plugins, has been sent to the stream. Use this to print final
test results or perform final cleanup. Return None to allow
other plugins to continue printing, or any other value to stop
them.
:param result: test result object
.. Note:: When tests are run under a test runner other than
:class:`nose.core.TextTestRunner`, such as
via ``python setup.py test``, this method may be called
**before** the default report output is sent.
"""
pass
def describeTest(self, test):
"""Return a test description.
Called by :meth:`nose.case.Test.shortDescription`.
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
describeTest._new = True
def formatError(self, test, err):
"""Called in result.addError, before plugin.addError. If you
want to replace or modify the error tuple, return a new error
tuple, otherwise return err, the original error tuple.
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
formatError._new = True
formatError.chainable = True
# test arg is not chainable
formatError.static_args = (True, False)
def formatFailure(self, test, err):
"""Called in result.addFailure, before plugin.addFailure. If you
want to replace or modify the error tuple, return a new error
tuple, otherwise return err, the original error tuple.
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
formatFailure._new = True
formatFailure.chainable = True
# test arg is not chainable
formatFailure.static_args = (True, False)
def handleError(self, test, err):
"""Called on addError. To handle the error yourself and prevent normal
error processing, return a true value.
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
handleError._new = True
def handleFailure(self, test, err):
"""Called on addFailure. To handle the failure yourself and
prevent normal failure processing, return a true value.
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
handleFailure._new = True
def loadTestsFromDir(self, path):
"""Return iterable of tests from a directory. May be a
generator. Each item returned must be a runnable
unittest.TestCase (or subclass) instance or suite instance.
Return None if your plugin cannot collect any tests from
directory.
:param path: The path to the directory.
"""
pass
loadTestsFromDir.generative = True
loadTestsFromDir._new = True
def loadTestsFromModule(self, module, path=None):
"""Return iterable of tests in a module. May be a
generator. Each item returned must be a runnable
unittest.TestCase (or subclass) instance.
Return None if your plugin cannot
collect any tests from module.
:param module: The module object
:type module: python module
:param path: the path of the module to search, to distinguish from
namespace package modules
.. note::
NEW. The ``path`` parameter will only be passed by nose 0.11
or above.
"""
pass
loadTestsFromModule.generative = True
def loadTestsFromName(self, name, module=None, importPath=None):
"""Return tests in this file or module. Return None if you are not able
to load any tests, or an iterable if you are. May be a
generator.
:param name: The test name. May be a file or module name plus a test
callable. Use split_test_name to split into parts. Or it might
be some crazy name of your own devising, in which case, do
whatever you want.
:param module: Module from which the name is to be loaded
:param importPath: Path from which file (must be a python module) was
found
.. warning:: DEPRECATED: this argument will NOT be passed.
"""
pass
loadTestsFromName.generative = True
def loadTestsFromNames(self, names, module=None):
"""Return a tuple of (tests loaded, remaining names). Return
None if you are not able to load any tests. Multiple plugins
may implement loadTestsFromNames; the remaining name list from
each will be passed to the next as input.
:param names: List of test names.
:type names: iterable
:param module: Module from which the names are to be loaded
"""
pass
loadTestsFromNames._new = True
loadTestsFromNames.chainable = True
def loadTestsFromFile(self, filename):
"""Return tests in this file. Return None if you are not
interested in loading any tests, or an iterable if you are and
can load some. May be a generator. *If you are interested in
loading tests from the file and encounter no errors, but find
no tests, yield False or return [False].*
.. Note:: This method replaces loadTestsFromPath from the 0.9
API.
:param filename: The full path to the file or directory.
"""
pass
loadTestsFromFile.generative = True
loadTestsFromFile._new = True
def loadTestsFromPath(self, path):
"""
.. warning:: DEPRECATED -- use loadTestsFromFile instead
"""
pass
loadTestsFromPath.deprecated = True
def loadTestsFromTestCase(self, cls):
"""Return tests in this test case class. Return None if you are
not able to load any tests, or an iterable if you are. May be a
generator.
:param cls: The test case class. Must be subclass of
:class:`unittest.TestCase`.
"""
pass
loadTestsFromTestCase.generative = True
def loadTestsFromTestClass(self, cls):
"""Return tests in this test class. Class will *not* be a
unittest.TestCase subclass. Return None if you are not able to
load any tests, an iterable if you are. May be a generator.
:param cls: The test case class. Must be **not** be subclass of
:class:`unittest.TestCase`.
"""
pass
loadTestsFromTestClass._new = True
loadTestsFromTestClass.generative = True
def makeTest(self, obj, parent):
"""Given an object and its parent, return or yield one or more
test cases. Each test must be a unittest.TestCase (or subclass)
instance. This is called before default test loading to allow
plugins to load an alternate test case or cases for an
object. May be a generator.
:param obj: The object to be made into a test
:param parent: The parent of obj (eg, for a method, the class)
"""
pass
makeTest._new = True
makeTest.generative = True
def options(self, parser, env):
"""Called to allow plugin to register command line
options with the parser.
DO NOT return a value from this method unless you want to stop
all other plugins from setting their options.
:param parser: options parser instance
:type parser: :class:`ConfigParser.ConfigParser`
:param env: environment, default is os.environ
"""
pass
options._new = True
def prepareTest(self, test):
"""Called before the test is run by the test runner. Please
note the article *the* in the previous sentence: prepareTest
is called *only once*, and is passed the test case or test
suite that the test runner will execute. It is *not* called
for each individual test case. If you return a non-None value,
that return value will be run as the test. Use this hook to
wrap or decorate the test with another function. If you need
to modify or wrap individual test cases, use `prepareTestCase`
instead.
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
def prepareTestCase(self, test):
"""Prepare or wrap an individual test case. Called before
execution of the test. The test passed here is a
nose.case.Test instance; the case to be executed is in the
test attribute of the passed case. To modify the test to be
run, you should return a callable that takes one argument (the
test result object) -- it is recommended that you *do not*
side-effect the nose.case.Test instance you have been passed.
Keep in mind that when you replace the test callable you are
replacing the run() method of the test case -- including the
exception handling and result calls, etc.
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
prepareTestCase._new = True
def prepareTestLoader(self, loader):
"""Called before tests are loaded. To replace the test loader,
return a test loader. To allow other plugins to process the
test loader, return None. Only one plugin may replace the test
loader. Only valid when using nose.TestProgram.
:param loader: :class:`nose.loader.TestLoader`
(or other loader) instance
"""
pass
prepareTestLoader._new = True
def prepareTestResult(self, result):
"""Called before the first test is run. To use a different
test result handler for all tests than the given result,
return a test result handler. NOTE however that this handler
will only be seen by tests, that is, inside of the result
proxy system. The TestRunner and TestProgram -- whether nose's
or other -- will continue to see the original result
handler. For this reason, it is usually better to monkeypatch
the result (for instance, if you want to handle some
exceptions in a unique way). Only one plugin may replace the
result, but many may monkeypatch it. If you want to
monkeypatch and stop other plugins from doing so, monkeypatch
and return the patched result.
:param result: :class:`nose.result.TextTestResult`
(or other result) instance
"""
pass
prepareTestResult._new = True
def prepareTestRunner(self, runner):
"""Called before tests are run. To replace the test runner,
return a test runner. To allow other plugins to process the
test runner, return None. Only valid when using nose.TestProgram.
:param runner: :class:`nose.core.TextTestRunner`
(or other runner) instance
"""
pass
prepareTestRunner._new = True
def report(self, stream):
"""Called after all error output has been printed. Print your
plugin's report to the provided stream. Return None to allow
other plugins to print reports, any other value to stop them.
:param stream: stream object; send your output here
:type stream: file-like object
"""
pass
def setOutputStream(self, stream):
"""Called before test output begins. To direct test output to a
new stream, return a stream object, which must implement a
`write(msg)` method. If you only want to note the stream, not
capture or redirect it, then return None.
:param stream: stream object; send your output here
:type stream: file-like object
"""
def startContext(self, context):
"""Called before context setup and the running of tests in the
context. Note that tests have already been *loaded* from the
context before this call.
:param context: the context about to be setup. May be a module or
class, or any other object that contains tests.
"""
pass
startContext._new = True
def startTest(self, test):
"""Called before each test is run. DO NOT return a value unless
you want to stop other plugins from seeing the test start.
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
def stopContext(self, context):
"""Called after the tests in a context have run and the
context has been torn down.
:param context: the context that has been torn down. May be a module or
class, or any other object that contains tests.
"""
pass
stopContext._new = True
def stopTest(self, test):
"""Called after each test is run. DO NOT return a value unless
you want to stop other plugins from seeing that the test has stopped.
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
def testName(self, test):
"""Return a short test name. Called by `nose.case.Test.__str__`.
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
testName._new = True
def wantClass(self, cls):
"""Return true if you want the main test selector to collect
tests from this class, false if you don't, and None if you don't
care.
:param cls: The class being examined by the selector
"""
pass
def wantDirectory(self, dirname):
"""Return true if you want test collection to descend into this
directory, false if you do not, and None if you don't care.
:param dirname: Full path to directory being examined by the selector
"""
pass
def wantFile(self, file):
"""Return true if you want to collect tests from this file,
false if you do not and None if you don't care.
Change from 0.9: The optional package parameter is no longer passed.
:param file: Full path to file being examined by the selector
"""
pass
def wantFunction(self, function):
"""Return true to collect this function as a test, false to
prevent it from being collected, and None if you don't care.
:param function: The function object being examined by the selector
"""
pass
def wantMethod(self, method):
"""Return true to collect this method as a test, false to
prevent it from being collected, and None if you don't care.
:param method: The method object being examined by the selector
:type method: unbound method
"""
pass
def wantModule(self, module):
"""Return true if you want to collection to descend into this
module, false to prevent the collector from descending into the
module, and None if you don't care.
:param module: The module object being examined by the selector
:type module: python module
"""
pass
def wantModuleTests(self, module):
"""
.. warning:: DEPRECATED -- this method will not be called, it has
been folded into wantModule.
"""
pass
wantModuleTests.deprecated = True
| mpl-2.0 |
sebastic/QGIS | python/plugins/processing/algs/taudem/dinfdistup.py | 12 | 5169 | # -*- coding: utf-8 -*-
"""
***************************************************************************
dinfdistup.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import QIcon
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputRaster
from TauDEMUtils import TauDEMUtils
class DinfDistUp(GeoAlgorithm):
DINF_FLOW_DIR_GRID = 'DINF_FLOW_DIR_GRID'
PIT_FILLED_GRID = 'PIT_FILLED_GRID'
SLOPE_GRID = 'SLOPE_GRID'
THRESHOLD = 'THRESHOLD'
STAT_METHOD = 'STAT_METHOD'
DIST_METHOD = 'DIST_METHOD'
EDGE_CONTAM = 'EDGE_CONTAM'
DIST_UP_GRID = 'DIST_UP_GRID'
STATISTICS = ['Minimum', 'Maximum', 'Average']
STAT_DICT = {0: 'min', 1: 'max', 2: 'ave'}
DISTANCE = ['Pythagoras', 'Horizontal', 'Vertical', 'Surface']
DIST_DICT = {
0: 'p',
1: 'h',
2: 'v',
3: 's',
}
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../../images/taudem.png')
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('D-Infinity Distance Up')
self.cmdName = 'dinfdistup'
self.group, self.i18n_group = self.trAlgorithm('Specialized Grid Analysis tools')
self.addParameter(ParameterRaster(self.DINF_FLOW_DIR_GRID,
self.tr('D-Infinity Flow Direction Grid'), False))
self.addParameter(ParameterRaster(self.PIT_FILLED_GRID,
self.tr('Pit Filled Elevation Grid'), False))
self.addParameter(ParameterRaster(self.SLOPE_GRID,
self.tr('Slope Grid'), False))
self.addParameter(ParameterSelection(self.STAT_METHOD,
self.tr('Statistical Method'), self.STATISTICS, 2))
self.addParameter(ParameterSelection(self.DIST_METHOD,
self.tr('Distance Method'), self.DISTANCE, 1))
self.addParameter(ParameterNumber(self.THRESHOLD,
self.tr('Proportion Threshold'), 0, None, 0.5))
self.addParameter(ParameterBoolean(self.EDGE_CONTAM,
self.tr('Check for edge contamination'), True))
self.addOutput(OutputRaster(self.DIST_UP_GRID,
self.tr('D-Infinity Distance Up')))
def processAlgorithm(self, progress):
commands = []
commands.append(os.path.join(TauDEMUtils.mpiexecPath(), 'mpiexec'))
processNum = ProcessingConfig.getSetting(TauDEMUtils.MPI_PROCESSES)
if processNum <= 0:
raise GeoAlgorithmExecutionException(
self.tr('Wrong number of MPI processes used. Please set '
'correct number before running TauDEM algorithms.'))
commands.append('-n')
commands.append(unicode(processNum))
commands.append(os.path.join(TauDEMUtils.taudemPath(), self.cmdName))
commands.append('-ang')
commands.append(self.getParameterValue(self.DINF_FLOW_DIR_GRID))
commands.append('-fel')
commands.append(self.getParameterValue(self.PIT_FILLED_GRID))
commands.append('-m')
commands.append(unicode(self.STAT_DICT[self.getParameterValue(
self.STAT_METHOD)]))
commands.append(unicode(self.DIST_DICT[self.getParameterValue(
self.DIST_METHOD)]))
commands.append('-thresh')
commands.append(unicode(self.getParameterValue(self.THRESHOLD)))
if not self.getParameterValue(self.EDGE_CONTAM):
commands.append('-nc')
commands.append('-du')
commands.append(self.getOutputValue(self.DIST_UP_GRID))
TauDEMUtils.executeTauDEM(commands, progress)
| gpl-2.0 |
azverkan/scons | src/engine/SCons/Tool/cvf.py | 5 | 2419 | """engine.SCons.Tool.cvf
Tool-specific initialization for the Compaq Visual Fortran compiler.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import fortran
compilers = ['f90']
def generate(env):
"""Add Builders and construction variables for compaq visual fortran to an Environment."""
fortran.generate(env)
env['FORTRAN'] = 'f90'
env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['OBJSUFFIX'] = '.obj'
env['FORTRANMODDIR'] = '${TARGET.dir}'
env['FORTRANMODDIRPREFIX'] = '/module:'
env['FORTRANMODDIRSUFFIX'] = ''
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
maxwen/android_kernel_oppo_msm8916_orig | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
sunzuolei/youtube-dl | youtube_dl/extractor/vrt.py | 139 | 3478 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import float_or_none
class VRTIE(InfoExtractor):
_VALID_URL = r'https?://(?:deredactie|sporza|cobra)\.be/cm/(?:[^/]+/)+(?P<id>[^/]+)/*'
_TESTS = [
# deredactie.be
{
'url': 'http://deredactie.be/cm/vrtnieuws/videozone/programmas/journaal/EP_141025_JOL',
'md5': '4cebde1eb60a53782d4f3992cbd46ec8',
'info_dict': {
'id': '2129880',
'ext': 'flv',
'title': 'Het journaal L - 25/10/14',
'description': None,
'timestamp': 1414271750.949,
'upload_date': '20141025',
'duration': 929,
}
},
# sporza.be
{
'url': 'http://sporza.be/cm/sporza/videozone/programmas/extratime/EP_141020_Extra_time',
'md5': '11f53088da9bf8e7cfc42456697953ff',
'info_dict': {
'id': '2124639',
'ext': 'flv',
'title': 'Bekijk Extra Time van 20 oktober',
'description': 'md5:83ac5415a4f1816c6a93f8138aef2426',
'timestamp': 1413835980.560,
'upload_date': '20141020',
'duration': 3238,
}
},
# cobra.be
{
'url': 'http://cobra.be/cm/cobra/videozone/rubriek/film-videozone/141022-mv-ellis-cafecorsari',
'md5': '78a2b060a5083c4f055449a72477409d',
'info_dict': {
'id': '2126050',
'ext': 'flv',
'title': 'Bret Easton Ellis in Café Corsari',
'description': 'md5:f699986e823f32fd6036c1855a724ee9',
'timestamp': 1413967500.494,
'upload_date': '20141022',
'duration': 661,
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._search_regex(
r'data-video-id="([^"]+)_[^"]+"', webpage, 'video id', fatal=False)
formats = []
mobj = re.search(
r'data-video-iphone-server="(?P<server>[^"]+)"\s+data-video-iphone-path="(?P<path>[^"]+)"',
webpage)
if mobj:
formats.extend(self._extract_m3u8_formats(
'%s/%s' % (mobj.group('server'), mobj.group('path')),
video_id, 'mp4'))
mobj = re.search(r'data-video-src="(?P<src>[^"]+)"', webpage)
if mobj:
formats.extend(self._extract_f4m_formats(
'%s/manifest.f4m' % mobj.group('src'), video_id))
self._sort_formats(formats)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage, default=None)
thumbnail = self._og_search_thumbnail(webpage)
timestamp = float_or_none(self._search_regex(
r'data-video-sitestat-pubdate="(\d+)"', webpage, 'timestamp', fatal=False), 1000)
duration = float_or_none(self._search_regex(
r'data-video-duration="(\d+)"', webpage, 'duration', fatal=False), 1000)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
}
| unlicense |
imarin/Odoo-Mexico-localization | l10n_mx_facturae_cer/__init__.py | 1 | 1389 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2010 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
############################################################################
# Coded by: moylop260 (moylop260@vauxoo.com)
# Launchpad Project Manager for Publication: Nhomar Hernandez - nhomar@vauxoo.com
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_company
import invoice
import wizard
| agpl-3.0 |
wangmiao1981/spark | examples/src/main/python/ml/onehot_encoder_example.py | 27 | 1599 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.ml.feature import OneHotEncoder
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("OneHotEncoderExample")\
.getOrCreate()
# Note: categorical features are usually first encoded with StringIndexer
# $example on$
df = spark.createDataFrame([
(0.0, 1.0),
(1.0, 0.0),
(2.0, 1.0),
(0.0, 2.0),
(0.0, 1.0),
(2.0, 0.0)
], ["categoryIndex1", "categoryIndex2"])
encoder = OneHotEncoder(inputCols=["categoryIndex1", "categoryIndex2"],
outputCols=["categoryVec1", "categoryVec2"])
model = encoder.fit(df)
encoded = model.transform(df)
encoded.show()
# $example off$
spark.stop()
| apache-2.0 |
Fusion-Rom/android_external_chromium_org | ppapi/PRESUBMIT.py | 86 | 11943 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
import subprocess
def RunCmdAndCheck(cmd, err_string, output_api, cwd=None, warning=False):
results = []
p = subprocess.Popen(cmd, cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(p_stdout, p_stderr) = p.communicate()
if p.returncode:
if warning:
results.append(output_api.PresubmitPromptWarning(
'%s\n\n%s' % (err_string, p_stderr)))
else:
results.append(
output_api.PresubmitError(err_string,
long_text=p_stderr))
return results
def RunUnittests(input_api, output_api):
# Run some Generator unittests if the generator source was changed.
results = []
files = input_api.LocalPaths()
generator_files = []
for filename in files:
name_parts = filename.split(os.sep)
if name_parts[0:2] == ['ppapi', 'generators']:
generator_files.append(filename)
if generator_files != []:
cmd = [ sys.executable, 'idl_tests.py']
ppapi_dir = input_api.PresubmitLocalPath()
results.extend(RunCmdAndCheck(cmd,
'PPAPI IDL unittests failed.',
output_api,
os.path.join(ppapi_dir, 'generators')))
return results
# Verify that the files do not contain a 'TODO' in them.
RE_TODO = re.compile(r'\WTODO\W', flags=re.I)
def CheckTODO(input_api, output_api):
live_files = input_api.AffectedFiles(include_deletes=False)
files = [f.LocalPath() for f in live_files]
todo = []
for filename in files:
name, ext = os.path.splitext(filename)
name_parts = name.split(os.sep)
# Only check normal build sources.
if ext not in ['.h', '.idl']:
continue
# Only examine the ppapi directory.
if name_parts[0] != 'ppapi':
continue
# Only examine public plugin facing directories.
if name_parts[1] not in ['api', 'c', 'cpp', 'utility']:
continue
# Only examine public stable interfaces.
if name_parts[2] in ['dev', 'private', 'trusted']:
continue
filepath = os.path.join('..', filename)
if RE_TODO.search(open(filepath, 'rb').read()):
todo.append(filename)
if todo:
return [output_api.PresubmitError(
'TODOs found in stable public PPAPI files:',
long_text='\n'.join(todo))]
return []
# Verify that no CPP wrappers use un-versioned PPB interface name macros.
RE_UNVERSIONED_PPB = re.compile(r'\bPPB_\w+_INTERFACE\b')
def CheckUnversionedPPB(input_api, output_api):
live_files = input_api.AffectedFiles(include_deletes=False)
files = [f.LocalPath() for f in live_files]
todo = []
for filename in files:
name, ext = os.path.splitext(filename)
name_parts = name.split(os.sep)
# Only check C++ sources.
if ext not in ['.cc']:
continue
# Only examine the public plugin facing ppapi/cpp directory.
if name_parts[0:2] != ['ppapi', 'cpp']:
continue
# Only examine public stable and trusted interfaces.
if name_parts[2] in ['dev', 'private']:
continue
filepath = os.path.join('..', filename)
if RE_UNVERSIONED_PPB.search(open(filepath, 'rb').read()):
todo.append(filename)
if todo:
return [output_api.PresubmitError(
'Unversioned PPB interface references found in PPAPI C++ wrappers:',
long_text='\n'.join(todo))]
return []
# Verify that changes to ppapi headers/sources are also made to NaCl SDK.
def CheckUpdatedNaClSDK(input_api, output_api):
files = input_api.LocalPaths()
# PPAPI files the Native Client SDK cares about.
nacl_sdk_files = []
for filename in files:
name, ext = os.path.splitext(filename)
name_parts = name.split(os.sep)
if len(name_parts) <= 2:
continue
if name_parts[0] != 'ppapi':
continue
if ((name_parts[1] == 'c' and ext == '.h') or
(name_parts[1] in ('cpp', 'utility') and ext in ('.h', '.cc'))):
if name_parts[2] in ('documentation', 'trusted'):
continue
nacl_sdk_files.append(filename)
if not nacl_sdk_files:
return []
verify_ppapi_py = os.path.join(input_api.change.RepositoryRoot(),
'native_client_sdk', 'src', 'build_tools',
'verify_ppapi.py')
cmd = [sys.executable, verify_ppapi_py] + nacl_sdk_files
return RunCmdAndCheck(cmd,
'PPAPI Interface modified without updating NaCl SDK.\n'
'(note that some dev interfaces should not be added '
'the NaCl SDK; when in doubt, ask a ppapi OWNER.\n'
'To ignore a file, add it to IGNORED_FILES in '
'native_client_sdk/src/build_tools/verify_ppapi.py)',
output_api,
warning=True)
# Verify that changes to ppapi/thunk/interfaces_* files have a corresponding
# change to tools/metrics/histograms/histograms.xml for UMA tracking.
def CheckHistogramXml(input_api, output_api):
# We can't use input_api.LocalPaths() here because we need to know about
# changes outside of ppapi/. See tools/depot_tools/presubmit_support.py for
# details on input_api.
files = input_api.change.AffectedFiles()
INTERFACE_FILES = ('ppapi/thunk/interfaces_legacy.h',
'ppapi/thunk/interfaces_ppb_private_flash.h',
'ppapi/thunk/interfaces_ppb_private.h',
'ppapi/thunk/interfaces_ppb_private_no_permissions.h',
'ppapi/thunk/interfaces_ppb_public_dev_channel.h',
'ppapi/thunk/interfaces_ppb_public_dev.h',
'ppapi/thunk/interfaces_ppb_public_stable.h')
HISTOGRAM_XML_FILE = 'tools/metrics/histograms/histograms.xml'
interface_changes = []
has_histogram_xml_change = False
for filename in files:
path = filename.LocalPath()
if path in INTERFACE_FILES:
interface_changes.append(path)
if path == HISTOGRAM_XML_FILE:
has_histogram_xml_change = True
if interface_changes and not has_histogram_xml_change:
return [output_api.PresubmitNotifyResult(
'Missing change to tools/metrics/histograms/histograms.xml.\n' +
'Run pepper_hash_for_uma to make get values for new interfaces.\n' +
'Interface changes:\n' + '\n'.join(interface_changes))]
return []
def CheckChange(input_api, output_api):
results = []
results.extend(RunUnittests(input_api, output_api))
results.extend(CheckTODO(input_api, output_api))
results.extend(CheckUnversionedPPB(input_api, output_api))
results.extend(CheckUpdatedNaClSDK(input_api, output_api))
results.extend(CheckHistogramXml(input_api, output_api))
# Verify all modified *.idl have a matching *.h
files = input_api.LocalPaths()
h_files = []
idl_files = []
generators_changed = False
# These are autogenerated by the command buffer generator, they don't go
# through idl.
whitelist = ['ppb_opengles2', 'ppb_opengles2ext_dev']
# The PDF interface is hand-written.
whitelist += ['ppb_pdf', 'ppp_pdf']
# Find all relevant .h and .idl files.
for filename in files:
name, ext = os.path.splitext(filename)
name_parts = name.split(os.sep)
if name_parts[-1] in whitelist:
continue
if name_parts[0:2] == ['ppapi', 'c'] and ext == '.h':
h_files.append('/'.join(name_parts[2:]))
elif name_parts[0:2] == ['ppapi', 'api'] and ext == '.idl':
idl_files.append('/'.join(name_parts[2:]))
elif name_parts[0:2] == ['ppapi', 'generators']:
generators_changed = True
# Generate a list of all appropriate *.h and *.idl changes in this CL.
both = h_files + idl_files
# If there aren't any, we are done checking.
if not both: return results
missing = []
for filename in idl_files:
if filename not in set(h_files):
missing.append('ppapi/api/%s.idl' % filename)
# An IDL change that includes [generate_thunk] doesn't need to have
# an update to the corresponding .h file.
new_thunk_files = []
for filename in missing:
lines = input_api.RightHandSideLines(lambda f: f.LocalPath() == filename)
for line in lines:
if line[2].strip() == '[generate_thunk]':
new_thunk_files.append(filename)
for filename in new_thunk_files:
missing.remove(filename)
if missing:
results.append(
output_api.PresubmitPromptWarning(
'Missing PPAPI header, no change or skipped generation?',
long_text='\n '.join(missing)))
missing_dev = []
missing_stable = []
missing_priv = []
for filename in h_files:
if filename not in set(idl_files):
name_parts = filename.split(os.sep)
if name_parts[-1] == 'pp_macros':
# The C header generator adds a PPAPI_RELEASE macro based on all the
# IDL files, so pp_macros.h may change while its IDL does not.
lines = input_api.RightHandSideLines(
lambda f: f.LocalPath() == 'ppapi/c/%s.h' % filename)
releaseChanged = False
for line in lines:
if line[2].split()[:2] == ['#define', 'PPAPI_RELEASE']:
results.append(
output_api.PresubmitPromptOrNotify(
'PPAPI_RELEASE has changed', long_text=line[2]))
releaseChanged = True
break
if releaseChanged:
continue
if 'trusted' in name_parts:
missing_priv.append(' ppapi/c/%s.h' % filename)
continue
if 'private' in name_parts:
missing_priv.append(' ppapi/c/%s.h' % filename)
continue
if 'dev' in name_parts:
missing_dev.append(' ppapi/c/%s.h' % filename)
continue
missing_stable.append(' ppapi/c/%s.h' % filename)
if missing_priv:
results.append(
output_api.PresubmitPromptWarning(
'Missing PPAPI IDL for private interface, please generate IDL:',
long_text='\n'.join(missing_priv)))
if missing_dev:
results.append(
output_api.PresubmitPromptWarning(
'Missing PPAPI IDL for DEV, required before moving to stable:',
long_text='\n'.join(missing_dev)))
if missing_stable:
# It might be okay that the header changed without a corresponding IDL
# change. E.g., comment indenting may have been changed. Treat this as a
# warning.
if generators_changed:
results.append(
output_api.PresubmitPromptWarning(
'Missing PPAPI IDL for stable interface (due to change in ' +
'generators?):',
long_text='\n'.join(missing_stable)))
else:
results.append(
output_api.PresubmitError(
'Missing PPAPI IDL for stable interface:',
long_text='\n'.join(missing_stable)))
# Verify all *.h files match *.idl definitions, use:
# --test to prevent output to disk
# --diff to generate a unified diff
# --out to pick which files to examine (only the ones in the CL)
ppapi_dir = input_api.PresubmitLocalPath()
cmd = [sys.executable, 'generator.py',
'--wnone', '--diff', '--test','--cgen', '--range=start,end']
# Only generate output for IDL files references (as *.h or *.idl) in this CL
cmd.append('--out=' + ','.join([name + '.idl' for name in both]))
cmd_results = RunCmdAndCheck(cmd,
'PPAPI IDL Diff detected: Run the generator.',
output_api,
os.path.join(ppapi_dir, 'generators'))
if cmd_results:
results.extend(cmd_results)
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
| bsd-3-clause |
Kazade/NeHe-Website | google_appengine/google/appengine/datastore/sortable_pb_encoder.py | 8 | 8361 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An Encoder class for Protocol Buffers that preserves sorting characteristics.
This is used by datastore_sqlite_stub and datastore_types to match the ordering
semantics of the production datastore. Broadly, there are four
changes from regular PB encoding:
- Strings are escaped and null terminated instead of length-prefixed. The
escaping replaces \x00 with \x01\x01 and \x01 with \x01\x02, thus preserving
the ordering of the original string.
- Variable length integers are encoded using a variable length encoding that
preserves order. The first byte stores the absolute value if it's between
-119 to 119, otherwise it stores the number of bytes that follow.
- Numbers are stored big endian instead of little endian.
- Negative doubles are entirely negated, while positive doubles have their sign
bit flipped.
Warning:
Due to the way nested Protocol Buffers are encoded, this encoder will NOT
preserve sorting characteristics for embedded protocol buffers!
"""
import array
import struct
from google.net.proto import ProtocolBuffer
_MAX_UNSIGNED_BYTE = 255
_MAX_LONG_BYTES = 8
_MAX_INLINE = (_MAX_UNSIGNED_BYTE - (2 * _MAX_LONG_BYTES)) / 2
_MIN_INLINE = -_MAX_INLINE
_OFFSET = 1 + 8
_POS_OFFSET = _OFFSET + _MAX_INLINE * 2
class Encoder(ProtocolBuffer.Encoder):
"""Encodes Protocol Buffers in a form that sorts nicely."""
def put16(self, value):
if value < 0 or value >= (1<<16):
raise ProtocolBuffer.ProtocolBufferEncodeError, 'u16 too big'
self.buf.append((value >> 8) & 0xff)
self.buf.append((value >> 0) & 0xff)
return
def put32(self, value):
if value < 0 or value >= (1L<<32):
raise ProtocolBuffer.ProtocolBufferEncodeError, 'u32 too big'
self.buf.append((value >> 24) & 0xff)
self.buf.append((value >> 16) & 0xff)
self.buf.append((value >> 8) & 0xff)
self.buf.append((value >> 0) & 0xff)
return
def put64(self, value):
if value < 0 or value >= (1L<<64):
raise ProtocolBuffer.ProtocolBufferEncodeError, 'u64 too big'
self.buf.append((value >> 56) & 0xff)
self.buf.append((value >> 48) & 0xff)
self.buf.append((value >> 40) & 0xff)
self.buf.append((value >> 32) & 0xff)
self.buf.append((value >> 24) & 0xff)
self.buf.append((value >> 16) & 0xff)
self.buf.append((value >> 8) & 0xff)
self.buf.append((value >> 0) & 0xff)
return
def _PutVarInt(self, value):
if value is None:
self.buf.append(0)
return
if value >= _MIN_INLINE and value <= _MAX_INLINE:
value = _OFFSET + (value - _MIN_INLINE)
self.buf.append(value & 0xff)
return
negative = False
if value < 0:
value = _MIN_INLINE - value
negative = True
else:
value = value - _MAX_INLINE
len = 0
w = value
while w > 0:
w >>= 8
len += 1
if negative:
head = _OFFSET - len
else:
head = _POS_OFFSET + len
self.buf.append(head & 0xff)
for i in range(len - 1, -1, -1):
b = value >> (i * 8)
if negative:
b = _MAX_UNSIGNED_BYTE - (b & 0xff)
self.buf.append(b & 0xff)
def putVarInt32(self, value):
if value >= 0x80000000 or value < -0x80000000:
raise ProtocolBuffer.ProtocolBufferEncodeError, 'int32 too big'
self._PutVarInt(value)
def putVarInt64(self, value):
if value >= 0x8000000000000000 or value < -0x8000000000000000:
raise ProtocolBuffer.ProtocolBufferEncodeError, 'int64 too big'
self._PutVarInt(value)
def putVarUint64(self, value):
if value < 0 or value >= 0x10000000000000000:
raise ProtocolBuffer.ProtocolBufferEncodeError, 'uint64 too big'
self._PutVarInt(value)
def _isFloatNegative(self, value, encoded):
if value == 0:
return encoded[0] == 128
return value < 0
def putFloat(self, value):
encoded = array.array('B')
encoded.fromstring(struct.pack('>f', value))
if self._isFloatNegative(value, encoded):
encoded[0] ^= 0xFF
encoded[1] ^= 0xFF
encoded[2] ^= 0xFF
encoded[3] ^= 0xFF
else:
encoded[0] ^= 0x80
self.buf.extend(encoded)
def putDouble(self, value):
encoded = array.array('B')
encoded.fromstring(struct.pack('>d', value))
if self._isFloatNegative(value, encoded):
encoded[0] ^= 0xFF
encoded[1] ^= 0xFF
encoded[2] ^= 0xFF
encoded[3] ^= 0xFF
encoded[4] ^= 0xFF
encoded[5] ^= 0xFF
encoded[6] ^= 0xFF
encoded[7] ^= 0xFF
else:
encoded[0] ^= 0x80
self.buf.extend(encoded)
def putPrefixedString(self, value):
self.buf.fromstring(
value.replace('\x01', '\x01\x02').replace('\x00', '\x01\x01') + '\x00')
class Decoder(ProtocolBuffer.Decoder):
def __init__(self, buf, idx=0, limit=None):
if not limit:
limit = len(buf)
ProtocolBuffer.Decoder.__init__(self, buf, idx, limit)
def get16(self):
if self.idx + 2 > self.limit:
raise ProtocolBuffer.ProtocolBufferDecodeError, 'truncated'
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
self.idx += 2
return (c << 8) | d
def get32(self):
if self.idx + 4 > self.limit:
raise ProtocolBuffer.ProtocolBufferDecodeError, 'truncated'
c = long(self.buf[self.idx])
d = self.buf[self.idx + 1]
e = self.buf[self.idx + 2]
f = self.buf[self.idx + 3]
self.idx += 4
return (c << 24) | (d << 16) | (e << 8) | f
def get64(self):
if self.idx + 8 > self.limit:
raise ProtocolBuffer.ProtocolBufferDecodeError, 'truncated'
c = long(self.buf[self.idx])
d = long(self.buf[self.idx + 1])
e = long(self.buf[self.idx + 2])
f = long(self.buf[self.idx + 3])
g = long(self.buf[self.idx + 4])
h = self.buf[self.idx + 5]
i = self.buf[self.idx + 6]
j = self.buf[self.idx + 7]
self.idx += 8
return ((c << 56) | (d << 48) | (e << 40) | (f << 32) | (g << 24)
| (h << 16) | (i << 8) | j)
def getVarInt64(self):
b = self.get8()
if b >= _OFFSET and b <= _POS_OFFSET:
return b - _OFFSET + _MIN_INLINE
if b == 0:
return None
if b < _OFFSET:
negative = True
bytes = _OFFSET - b
else:
negative = False
bytes = b - _POS_OFFSET
ret = 0
for _ in range(bytes):
b = self.get8()
if negative:
b = _MAX_UNSIGNED_BYTE - b
ret = ret << 8 | b
if negative:
return _MIN_INLINE - ret
else:
return ret + _MAX_INLINE
def getVarInt32(self):
result = self.getVarInt64()
if result >= 0x80000000L or result < -0x80000000L:
raise ProtocolBuffer.ProtocolBufferDecodeError, 'corrupted'
return result
def getVarUint64(self):
result = self.getVarInt64()
if result < 0:
raise ProtocolBuffer.ProtocolBufferDecodeError, 'corrupted'
return result
def getFloat(self):
if self.idx + 4 > self.limit:
raise ProtocolBuffer.ProtocolBufferDecodeError, 'truncated'
a = self.buf[self.idx:self.idx+4]
self.idx += 4
if a[0] & 0x80:
a[0] ^= 0x80
else:
a = [x ^ 0xFF for x in a]
return struct.unpack('>f', array.array('B', a).tostring())[0]
def getDouble(self):
if self.idx + 8 > self.limit:
raise ProtocolBuffer.ProtocolBufferDecodeError, 'truncated'
a = self.buf[self.idx:self.idx+8]
self.idx += 8
if a[0] & 0x80:
a[0] ^= 0x80
else:
a = [x ^ 0xFF for x in a]
return struct.unpack('>d', array.array('B', a).tostring())[0]
def getPrefixedString(self):
end_idx = self.idx
while self.buf[end_idx] != 0:
end_idx += 1
data = array.array('B', self.buf[self.idx:end_idx]).tostring()
self.idx = end_idx + 1
return data.replace('\x01\x01', '\x00').replace('\x01\x02', '\x01')
| bsd-3-clause |
adamgf/zxing_iphone_pdf_417 | cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/sgicc.py | 34 | 1870 | """SCons.Tool.sgicc
Tool-specific initialization for MIPSPro cc on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgicc.py 5023 2010/06/14 22:05:46 scons"
import cc
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
cc.generate(env)
env['CXX'] = 'CC'
env['SHOBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('cc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
lextoumbourou/cyclone | appskel/signup/modname/txdbapi.py | 3 | 10025 | # coding: utf-8
# http://en.wikipedia.org/wiki/Active_record_pattern
# http://en.wikipedia.org/wiki/Create,_read,_update_and_delete
#
$license
import sqlite3
import sys
import types
from twisted.enterprise import adbapi
from twisted.internet import defer
class InlineSQLite:
def __init__(self, dbname, autocommit=True, cursorclass=None):
self.autocommit = autocommit
self.conn = sqlite3.connect(dbname)
if cursorclass:
self.conn.row_factory = cursorclass
self.curs = self.conn.cursor()
def runQuery(self, query, *args, **kwargs):
self.curs.execute(query.replace("%s", "?"), *args, **kwargs)
return self.curs.fetchall()
def runOperation(self, command, *args, **kwargs):
self.curs.execute(command.replace("%s", "?"), *args, **kwargs)
if self.autocommit is True:
self.conn.commit()
def runOperationMany(self, command, *args, **kwargs):
self.curs.executemany(command.replace("%s", "?"), *args, **kwargs)
if self.autocommit is True:
self.conn.commit()
def runInteraction(self, interaction, *args, **kwargs):
return interaction(self.curs, *args, **kwargs)
def commit(self):
self.conn.commit()
def rollback(self):
self.conn.rollback()
def close(self):
self.conn.close()
def ConnectionPool(dbapiName, *args, **kwargs):
if dbapiName == "sqlite3":
if sys.version_info < (2, 6):
# hax for py2.5
def __row(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
kwargs["cursorclass"] = __row
else:
kwargs["cursorclass"] = sqlite3.Row
return InlineSQLite(*args, **kwargs)
elif dbapiName == "MySQLdb":
import MySQLdb.cursors
kwargs["cursorclass"] = MySQLdb.cursors.DictCursor
return adbapi.ConnectionPool(dbapiName, *args, **kwargs)
elif dbapiName == "psycopg2":
import psycopg2
import psycopg2.extras
psycopg2.connect = psycopg2.extras.RealDictConnection
return adbapi.ConnectionPool(dbapiName, *args, **kwargs)
else:
raise ValueError("Database %s is not yet supported." % dbapiName)
class DatabaseObject(object):
def __init__(self, model, row):
self._model = model
self._changes = set()
self._data = {}
for k, v in dict(row).items():
self.__setattr__(k, v)
def __setattr__(self, k, v):
if k[0] == "_":
object.__setattr__(self, k, v)
else:
if k in self._data:
self._changes.add(k)
if k in self._model.codecs and \
not isinstance(v, types.StringTypes):
self._data[k] = self._model.codecs[k][0](v)
else:
self._data[k] = v
def __getattr__(self, k):
if [0] == "_":
object.__getattr__(self, k)
else:
return self._model.codecs[k][1](self._data[k]) \
if k in self._model.codecs else self._data[k]
def __setitem__(self, k, v):
self.__setattr__(k, v)
def __getitem__(self, k):
return self.__getattr__(k)
def get(self, k, default=None):
return self._data.get(k, default)
@property
def has_changes(self):
return bool(self._changes)
@defer.inlineCallbacks
def save(self, force=False):
if "id" in self._data:
if self._changes and not force:
kv = dict(map(lambda k: (k, self._data[k]), self._changes))
kv["where"] = ("id=%s", self._data["id"])
yield self._model.update(**kv)
elif force:
k, v = self._data.items()
yield self._model.update(set=(k, v),
where=("id=%s", self._data["id"]))
self._changes.clear()
defer.returnValue(self)
else:
rs = yield self._model.insert(**self._data)
self["id"] = rs["id"]
defer.returnValue(self)
@defer.inlineCallbacks
def delete(self):
if "id" in self._data:
yield self._model.delete(where=("id=%s", self._data["id"]))
self._data.pop("id")
defer.returnValue(self)
def __repr__(self):
return repr(self._data)
class DatabaseCRUD(object):
#db = None
allow = []
deny = []
codecs = {}
@classmethod
def __table__(cls):
return getattr(cls, "table_name", cls.__name__)
@classmethod
def kwargs_cleanup(cls, kwargs):
if cls.allow:
deny = cls.deny + [k for k in kwargs if k not in cls.allow]
else:
deny = cls.deny
if deny:
map(lambda k: kwargs.pop(k, None), deny)
return kwargs
@classmethod
@defer.inlineCallbacks
def insert(cls, **kwargs):
kwargs = cls.kwargs_cleanup(kwargs)
keys = kwargs.keys()
q = "insert into %s (%s) values " % (cls.__table__(),
",".join(keys)) + "(%s)"
vs = []
vd = []
for v in kwargs.itervalues():
vs.append("%s")
vd.append(v["id"] if isinstance(v, DatabaseObject) else v)
if isinstance(cls.db, InlineSQLite):
vs = [s.replace("%s", "?") for s in vs]
q = q % ",".join(vs)
if "id" in kwargs:
yield cls.db.runOperation(q, vd)
else:
def _insert_transaction(trans, *args, **kwargs):
trans.execute(*args, **kwargs)
if isinstance(cls.db, InlineSQLite):
trans.execute("select last_insert_rowid() as id")
elif cls.db.dbapiName == "MySQLdb":
trans.execute("select last_insert_id() as id")
elif cls.db.dbapiName == "psycopg2":
trans.execute("select currval('%s_id_seq') as id" %
cls.__table__())
return trans.fetchall()
r = yield cls.db.runInteraction(_insert_transaction, q, vd)
kwargs["id"] = r[0]["id"]
defer.returnValue(DatabaseObject(cls, kwargs))
@classmethod
def update(cls, **kwargs):
where = kwargs.pop("where", None)
kwargs = cls.kwargs_cleanup(kwargs)
keys = kwargs.keys()
vals = [kwargs[k] for k in keys]
keys = ",".join(["%s=%s" % (k, "%s") for k in keys])
if where:
where, args = where[0], list(where[1:])
for arg in args:
if isinstance(arg, DatabaseObject):
vals.append(arg["id"])
else:
vals.append(arg)
return cls.db.runOperation("update %s set %s where %s" %
(cls.__table__(), keys, where), vals)
else:
return cls.db.runOperation("update %s set %s" %
(cls.__table__(), keys), vals)
@classmethod
@defer.inlineCallbacks
def select(cls, **kwargs):
extra = []
star = "id,*" if isinstance(cls.db, InlineSQLite) else "*"
if "groupby" in kwargs:
extra.append("group by %s" % kwargs["groupby"])
if "orderby" in kwargs:
extra.append("order by %s" % kwargs["orderby"])
if "asc" in kwargs and kwargs["asc"] is True:
extra.append("asc")
if "desc" in kwargs and kwargs["desc"] is True:
extra.append("desc")
if "limit" in kwargs:
extra.append("limit %s" % kwargs["limit"])
if "offset" in kwargs:
extra.append("offset %s" % kwargs["offset"])
extra = " ".join(extra)
if "where" in kwargs:
where, args = kwargs["where"][0], list(kwargs["where"][1:])
for n, arg in enumerate(args):
if isinstance(arg, DatabaseObject):
args[n] = arg["id"]
rs = yield cls.db.runQuery("select %s from %s where %s %s" %
(star, cls.__table__(), where, extra),
args)
else:
rs = yield cls.db.runQuery("select %s from %s %s" %
(star, cls.__table__(), extra))
result = map(lambda d: DatabaseObject(cls, d), rs)
defer.returnValue(result)
@classmethod
def delete(cls, **kwargs):
if "where" in kwargs:
where, args = kwargs["where"][0], kwargs["where"][1:]
return cls.db.runOperation("delete from %s where %s" %
(cls.__table__(), where), args)
else:
return cls.db.runOperation("delete from %s" % cls.__table__())
def __str__(self):
return str(self.data)
class DatabaseModel(DatabaseCRUD):
@classmethod
@defer.inlineCallbacks
def count(cls, **kwargs):
if "where" in kwargs:
where, args = kwargs["where"][0], kwargs["where"][1:]
rs = yield cls.db.runQuery("select count(*) as count from %s "
"where %s" %
(cls.__table__(), where), args)
else:
rs = yield cls.db.runQuery("select count(*) as count from %s" %
cls.__table__())
defer.returnValue(rs[0]["count"])
@classmethod
def all(cls):
return cls.select()
@classmethod
def find(cls, **kwargs):
return cls.select(**kwargs)
@classmethod
@defer.inlineCallbacks
def find_first(cls, **kwargs):
kwargs["limit"] = 1
rs = yield cls.select(**kwargs)
defer.returnValue(rs[0] if rs else None)
@classmethod
def new(cls, **kwargs):
return DatabaseObject(cls, kwargs)
| apache-2.0 |
Codepoints/unidump | setup.py | 1 | 1410 | from setuptools import setup, find_packages
from codecs import open
from os import path
from unidump import VERSION
from unidump.cli import DESCRIPTION, EPILOG
long_description = DESCRIPTION + '\n\n' + EPILOG
setup(
name='unidump',
version=VERSION,
description='hexdump for your Unicode data',
long_description=long_description,
url='https://github.com/Codepoints/unidump',
author='Manuel Strehl',
author_email='boldewyn@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Internationalization',
'Topic :: Software Development :: Testing',
'Topic :: Text Processing :: General',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='unicode hexdump debugging codepoint utility',
packages=['unidump'],
package_data={
'unidump': [
'locale/de/LC_MESSAGES/unidump.mo',
],
},
entry_points={
'console_scripts': [
'unidump=unidump.cli:main',
],
}
)
| mit |
great-expectations/great_expectations | great_expectations/dataset/dataset.py | 1 | 198226 | import inspect
import logging
from datetime import datetime
from functools import lru_cache, wraps
from itertools import zip_longest
from numbers import Number
from typing import Any, List, Optional, Set, Union
import numpy as np
import pandas as pd
from dateutil.parser import parse
from scipy import stats
from great_expectations.data_asset.data_asset import DataAsset
from great_expectations.data_asset.util import DocInherit, parse_result_format
from great_expectations.dataset.util import (
build_categorical_partition_object,
build_continuous_partition_object,
is_valid_categorical_partition_object,
is_valid_partition_object,
)
logger = logging.getLogger(__name__)
try:
from sqlalchemy.sql import quoted_name
except:
logger.debug(
"Unable to load quoted name from SqlAlchemy; install optional sqlalchemy dependency for support"
)
quoted_name = None
class MetaDataset(DataAsset):
"""
Holds expectation decorators.
"""
@classmethod
def column_map_expectation(cls, func):
"""Constructs an expectation using column-map semantics.
The column_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating
truthiness of some condition on a per-row basis.
Args:
func (function): \
The function implementing a row-wise expectation. The function should take a column of data and \
return an equally-long column of boolean values corresponding to the truthiness of the \
underlying expectation.
Notes:
column_map_expectation intercepts and takes action based on the following parameters:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
column_map_expectation *excludes null values* from being passed to the function
Depending on the `result_format` selected, column_map_expectation can additional data to a return object, \
including `element_count`, `nonnull_values`, `nonnull_count`, `success_count`, `unexpected_list`, and \
`unexpected_index_list`. \
See :func:`_format_map_output <great_expectations.data_asset.dataset.Dataset._format_map_output>`
See also:
:func:`expect_column_values_to_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_set>` \
for an example of a column_map_expectation
"""
raise NotImplementedError
@classmethod
def column_aggregate_expectation(cls, func):
"""Constructs an expectation using column-aggregate semantics.
The column_aggregate_expectation decorator handles boilerplate issues surrounding the common pattern of \
evaluating truthiness of some condition on an aggregated-column basis.
Args:
func (function): \
The function implementing an expectation using an aggregate property of a column. \
The function should take a column of data and return the aggregate value it computes.
Notes:
column_aggregate_expectation *excludes null values* from being passed to the function
See also:
:func:`expect_column_mean_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_mean_to_be_between>` \
for an example of a column_aggregate_expectation
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self,
column=None,
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
# Retain support for string-only output formats:
result_format = parse_result_format(result_format)
if row_condition and self._supports_row_condition:
self = self.query(row_condition, parser=condition_parser).reset_index(
drop=True
)
element_count = self.get_row_count()
if kwargs.get("column"):
column = kwargs.get("column")
if column is not None:
# We test whether the dataset is a sqlalchemy_dataset by seeing if it has an engine. We don't test
# whether it is actually an instance to avoid circular dependency issues.
if (
hasattr(self, "engine")
and self.batch_kwargs.get("use_quoted_name")
and quoted_name
):
column = quoted_name(column, quote=True)
nonnull_count = self.get_column_nonnull_count(
kwargs.get("column", column)
)
# column is treated specially as a positional argument in most expectations
args = tuple((column, *args))
elif kwargs.get("column_A") and kwargs.get("column_B"):
try:
nonnull_count = (
self[kwargs.get("column_A")].notnull()
& self[kwargs.get("column_B")].notnull()
).sum()
except TypeError:
nonnull_count = None
else:
raise ValueError(
"The column_aggregate_expectation wrapper requires either column or "
"both column_A and column_B as input."
)
if nonnull_count:
null_count = element_count - nonnull_count
else:
null_count = None
evaluation_result = func(self, *args, **kwargs)
if "success" not in evaluation_result:
raise ValueError(
"Column aggregate expectation failed to return required information: success"
)
if ("result" not in evaluation_result) or (
"observed_value" not in evaluation_result["result"]
):
raise ValueError(
"Column aggregate expectation failed to return required information: observed_value"
)
return_obj = {"success": bool(evaluation_result["success"])}
if result_format["result_format"] == "BOOLEAN_ONLY":
return return_obj
return_obj["result"] = {
"observed_value": evaluation_result["result"]["observed_value"],
"element_count": element_count,
}
if null_count:
return_obj["result"]["missing_count"] = null_count
if element_count > 0:
return_obj["result"]["missing_percent"] = (
null_count * 100.0 / element_count
)
else:
return_obj["result"]["missing_percent"] = None
else:
return_obj["result"]["missing_count"] = None
return_obj["result"]["missing_percent"] = None
if result_format["result_format"] == "BASIC":
return return_obj
if "details" in evaluation_result["result"]:
return_obj["result"]["details"] = evaluation_result["result"]["details"]
if result_format["result_format"] in ["SUMMARY", "COMPLETE"]:
return return_obj
raise ValueError(
"Unknown result_format %s." % result_format["result_format"]
)
return inner_wrapper
# noinspection PyIncorrectDocstring
class Dataset(MetaDataset):
# This should in general only be changed when a subclass *adds expectations* or *changes expectation semantics*
# That way, multiple backends can implement the same data_asset_type
_data_asset_type = "Dataset"
_supports_row_condition = False
# getter functions with hashable arguments - can be cached
hashable_getters = [
"get_column_min",
"get_column_max",
"get_column_mean",
"get_column_modes",
"get_column_median",
"get_column_quantiles",
"get_column_nonnull_count",
"get_column_stdev",
"get_column_sum",
"get_column_unique_count",
"get_column_value_counts",
"get_row_count",
"get_column_count",
"get_table_columns",
"get_column_count_in_range",
]
def __init__(self, *args, **kwargs):
# NOTE: using caching makes the strong assumption that the user will not modify the core data store
# (e.g. self.spark_df) over the lifetime of the dataset instance
self.caching = kwargs.pop("caching", True)
super().__init__(*args, **kwargs)
if self.caching:
for func in self.hashable_getters:
caching_func = lru_cache(maxsize=None)(getattr(self, func))
setattr(self, func, caching_func)
@classmethod
def from_dataset(cls, dataset=None):
"""This base implementation naively passes arguments on to the real constructor, which
is suitable really when a constructor knows to take its own type. In general, this should be overridden"""
return cls(dataset)
def get_row_count(self):
"""Returns: int, table row count"""
raise NotImplementedError
def get_column_count(self):
"""Returns: int, table column count"""
raise NotImplementedError
def get_table_columns(self) -> List[str]:
"""Returns: List[str], list of column names"""
raise NotImplementedError
def get_column_nonnull_count(self, column):
"""Returns: int"""
raise NotImplementedError
def get_column_mean(self, column):
"""Returns: float"""
raise NotImplementedError
def get_column_value_counts(self, column, sort="value", collate=None):
"""Get a series containing the frequency counts of unique values from the named column.
Args:
column: the column for which to obtain value_counts
sort (string): must be one of "value", "count", or "none".
- if "value" then values in the resulting partition object will be sorted lexigraphically
- if "count" then values will be sorted according to descending count (frequency)
- if "none" then values will not be sorted
collate (string): the collate (sort) method to be used on supported backends (SqlAlchemy only)
Returns:
pd.Series of value counts for a column, sorted according to the value requested in sort
"""
raise NotImplementedError
def get_column_sum(self, column):
"""Returns: float"""
raise NotImplementedError
def get_column_max(self, column, parse_strings_as_datetimes=False):
"""Returns: Any"""
raise NotImplementedError
def get_column_min(self, column, parse_strings_as_datetimes=False):
"""Returns: Any"""
raise NotImplementedError
def get_column_unique_count(self, column):
"""Returns: int"""
raise NotImplementedError
def get_column_modes(self, column):
"""Returns: List[Any], list of modes (ties OK)"""
raise NotImplementedError
def get_column_median(self, column):
"""Returns: Any"""
raise NotImplementedError
def get_column_quantiles(
self, column, quantiles, allow_relative_error=False
) -> List[Any]:
"""Get the values in column closest to the requested quantiles
Args:
column (string): name of column
quantiles (tuple of float): the quantiles to return. quantiles \
*must* be a tuple to ensure caching is possible
Returns:
List[Any]: the nearest values in the dataset to those quantiles
"""
raise NotImplementedError
def get_column_stdev(self, column):
"""Returns: float"""
raise NotImplementedError
def get_column_partition(
self, column, bins="uniform", n_bins=10, allow_relative_error=False
):
"""Get a partition of the range of values in the specified column.
Args:
column: the name of the column
bins: 'uniform' for evenly spaced bins or 'quantile' for bins spaced according to quantiles
n_bins: the number of bins to produce
allow_relative_error: passed to get_column_quantiles, set to False for only precise
values, True to allow approximate values on systems with only binary choice (e.g. Redshift), and to a
value between zero and one for systems that allow specification of relative error (e.g.
SparkDFDataset).
Returns:
A list of bins
"""
if bins == "uniform":
# TODO: in the event that we shift the compute model for
# min and max to have a single pass, use that instead of
# quantiles for clarity
# min_ = self.get_column_min(column)
# max_ = self.get_column_max(column)
min_, max_ = self.get_column_quantiles(
column, (0.0, 1.0), allow_relative_error=allow_relative_error
)
# PRECISION NOTE: some implementations of quantiles could produce
# varying levels of precision (e.g. a NUMERIC column producing
# Decimal from a SQLAlchemy source, so we cast to float for numpy)
bins = np.linspace(start=float(min_), stop=float(max_), num=n_bins + 1)
elif bins in ["ntile", "quantile", "percentile"]:
bins = self.get_column_quantiles(
column,
tuple(np.linspace(start=0, stop=1, num=n_bins + 1)),
allow_relative_error=allow_relative_error,
)
elif bins == "auto":
# Use the method from numpy histogram_bin_edges
nonnull_count = self.get_column_nonnull_count(column)
sturges = np.log2(nonnull_count + 1)
min_, _25, _75, max_ = self.get_column_quantiles(
column,
(0.0, 0.25, 0.75, 1.0),
allow_relative_error=allow_relative_error,
)
iqr = _75 - _25
if iqr < 1e-10: # Consider IQR 0 and do not use variance-based estimator
n_bins = sturges
else:
fd = (2 * float(iqr)) / (nonnull_count ** (1 / 3))
n_bins = max(
int(np.ceil(sturges)), int(np.ceil(float(max_ - min_) / fd))
)
bins = np.linspace(start=float(min_), stop=float(max_), num=n_bins + 1)
else:
raise ValueError("Invalid parameter for bins argument")
return bins
def get_column_hist(self, column, bins):
"""Get a histogram of column values
Args:
column: the column for which to generate the histogram
bins (tuple): the bins to slice the histogram. bins *must* be a tuple to ensure caching is possible
Returns: List[int], a list of counts corresponding to bins"""
raise NotImplementedError
def get_column_count_in_range(
self, column, min_val=None, max_val=None, strict_min=False, strict_max=True
):
"""Returns: int"""
raise NotImplementedError
def get_crosstab(
self,
column_A,
column_B,
bins_A=None,
bins_B=None,
n_bins_A=None,
n_bins_B=None,
):
"""Get crosstab of column_A and column_B, binning values if necessary"""
raise NotImplementedError
def test_column_map_expectation_function(self, function, *args, **kwargs):
"""Test a column map expectation function
Args:
function (func): The function to be tested. (Must be a valid column_map_expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
An ExpectationSuiteValidationResult
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you'll still need to \
define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
"""
new_function = self.column_map_expectation(function)
return new_function(self, *args, **kwargs)
def test_column_aggregate_expectation_function(self, function, *args, **kwargs):
"""Test a column aggregate expectation function
Args:
function (func): The function to be tested. (Must be a valid column_aggregate_expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
An ExpectationSuiteValidationResult
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you'll still need to \
define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
"""
new_function = self.column_aggregate_expectation(function)
return new_function(self, *args, **kwargs)
#####
#
# Table shape expectations
#
#####
@DocInherit
@DataAsset.expectation(["column"])
def expect_column_to_exist(
self,
column,
column_index=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the specified column to exist.
expect_column_to_exist is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column (str): \
The column name.
Other Parameters:
column_index (int or None): \
If not None, checks the order of the columns. The expectation will fail if the \
column is not in location column_index (zero-indexed).
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
columns = self.get_table_columns()
if column in columns:
return {
# FIXME: list.index does not check for duplicate values.
"success": (column_index is None)
or (columns.index(column) == column_index)
}
else:
return {"success": False}
@DocInherit
@DataAsset.expectation(["column_list"])
def expect_table_columns_to_match_ordered_list(
self,
column_list,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the columns to exactly match a specified list.
expect_table_columns_to_match_ordered_list is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column_list (list of str): \
The column names, in the correct order.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
columns = self.get_table_columns()
if column_list is None or list(columns) == list(column_list):
return {"success": True, "result": {"observed_value": list(columns)}}
else:
# In the case of differing column lengths between the defined expectation and the observed column set, the
# max is determined to generate the column_index.
number_of_columns = max(len(column_list), len(columns))
column_index = range(number_of_columns)
# Create a list of the mismatched details
compared_lists = list(
zip_longest(column_index, list(column_list), list(columns))
)
mismatched = [
{"Expected Column Position": i, "Expected": k, "Found": v}
for i, k, v in compared_lists
if k != v
]
return {
"success": False,
"result": {
"observed_value": list(columns),
"details": {"mismatched": mismatched},
},
}
@DocInherit
@DataAsset.expectation(["column_set", "exact_match"])
def expect_table_columns_to_match_set(
self,
column_set: Optional[Union[Set[str], List[str]]],
exact_match: Optional[bool] = True,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the columns to match a specified set.
expect_table_columns_to_match_set is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column_set (set of str or list of str): \
The column names you wish to check. If given a list, it will be converted to \
a set before processing. Column names are case sensitive.
exact_match (bool): \
Whether to make sure there are no extra columns in either the dataset or in \
the column_set.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
column_set = set(column_set) if column_set is not None else set()
dataset_columns_list = self.get_table_columns()
dataset_columns_set = set(dataset_columns_list)
if (
(column_set is None) and (exact_match is not True)
) or dataset_columns_set == column_set:
return {"success": True, "result": {"observed_value": dataset_columns_list}}
else:
# Convert to lists and sort to lock order for testing and output rendering
# unexpected_list contains items from the dataset columns that are not in column_set
unexpected_list = sorted(list(dataset_columns_set - column_set))
# missing_list contains items from column_set that are not in the dataset columns
missing_list = sorted(list(column_set - dataset_columns_set))
# observed_value contains items that are in the dataset columns
observed_value = sorted(dataset_columns_list)
mismatched = {}
if len(unexpected_list) > 0:
mismatched["unexpected"] = unexpected_list
if len(missing_list) > 0:
mismatched["missing"] = missing_list
result = {
"observed_value": observed_value,
"details": {"mismatched": mismatched},
}
return_success = {
"success": True,
"result": result,
}
return_failed = {
"success": False,
"result": result,
}
if exact_match:
return return_failed
else:
# Failed if there are items in the missing list (but OK to have unexpected_list)
if len(missing_list) > 0:
return return_failed
# Passed if there are no items in the missing list
else:
return return_success
# noinspection PyUnusedLocal
@DocInherit
@DataAsset.expectation(["min_value", "max_value"])
def expect_table_column_count_to_be_between(
self,
min_value=None,
max_value=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of columns to be between two values.
expect_table_column_count_to_be_between is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Keyword Args:
min_value (int or None): \
The minimum number of columns, inclusive.
max_value (int or None): \
The maximum number of columns, inclusive.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable columns \
has no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable columns \
has no maximum.
See Also:
expect_table_column_count_to_equal
"""
try:
if min_value is not None:
if not float(min_value).is_integer():
raise ValueError("min_value must be integer")
if max_value is not None:
if not float(max_value).is_integer():
raise ValueError("max_value must be integer")
except ValueError:
raise ValueError("min_value and max_value must be integers")
# check that min_value or max_value is set
# if min_value is None and max_value is None:
# raise Exception('Must specify either or both of min_value and max_value')
column_count = self.get_column_count()
if min_value is not None:
above_min = column_count >= min_value
else:
above_min = True
if max_value is not None:
below_max = column_count <= max_value
else:
below_max = True
outcome = above_min and below_max
return {"success": outcome, "result": {"observed_value": column_count}}
# noinspection PyUnusedLocal
@DocInherit
@DataAsset.expectation(["value"])
def expect_table_column_count_to_equal(
self,
value,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of columns to equal a value.
expect_table_column_count_to_equal is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
value (int): \
The expected number of columns.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_column_count_to_be_between
"""
try:
if not float(value).is_integer():
raise ValueError("value must be an integer")
except ValueError:
raise ValueError("value must be an integer")
column_count = self.get_column_count()
return {
"success": column_count == value,
"result": {"observed_value": column_count},
}
# noinspection PyUnusedLocal
@DocInherit
@DataAsset.expectation(["min_value", "max_value"])
def expect_table_row_count_to_be_between(
self,
min_value=None,
max_value=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of rows to be between two values.
expect_table_row_count_to_be_between is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Keyword Args:
min_value (int or None): \
The minimum number of rows, inclusive.
max_value (int or None): \
The maximum number of rows, inclusive.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has \
no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has \
no maximum.
See Also:
expect_table_row_count_to_equal
"""
try:
if min_value is not None:
if not float(min_value).is_integer():
raise ValueError("min_value must be integer")
if max_value is not None:
if not float(max_value).is_integer():
raise ValueError("max_value must be integer")
except ValueError:
raise ValueError("min_value and max_value must be integers")
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
# check that min_value or max_value is set
# if min_value is None and max_value is None:
# raise Exception('Must specify either or both of min_value and max_value')
row_count = self.get_row_count()
if min_value is not None:
above_min = row_count >= min_value
else:
above_min = True
if max_value is not None:
below_max = row_count <= max_value
else:
below_max = True
outcome = above_min and below_max
return {"success": outcome, "result": {"observed_value": row_count}}
# noinspection PyUnusedLocal
@DocInherit
@DataAsset.expectation(["value"])
def expect_table_row_count_to_equal(
self,
value,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of rows to equal a value.
expect_table_row_count_to_equal is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
value (int): \
The expected number of rows.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_row_count_to_be_between
"""
try:
if not float(value).is_integer():
raise ValueError("value must be an integer")
except ValueError:
raise ValueError("value must be an integer")
row_count = self.get_row_count()
return {"success": row_count == value, "result": {"observed_value": row_count}}
###
#
# Missing values, unique values, and types
#
###
def expect_column_values_to_be_unique(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect each column value to be unique.
This expectation detects duplicates. All duplicated values are counted as exceptions.
For example, `[1, 2, 3, 3, 3]` will return `[3, 3, 3]` in `result.exceptions_list`, with \
`unexpected_percent = 60.0`.
expect_column_values_to_be_unique is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_values_to_not_be_null(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to not be null.
To be counted as an exception, values must be explicitly null or missing, such as a NULL in PostgreSQL or an
np.NaN in pandas. Empty strings don't count as null unless they have been coerced to a null type.
expect_column_values_to_not_be_null is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_null \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_null>`
"""
raise NotImplementedError
def expect_column_values_to_be_null(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be null.
expect_column_values_to_be_null is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_be_null \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_be_null>`
"""
raise NotImplementedError
def expect_column_values_to_be_of_type(
self,
column,
type_,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect a column to contain values of a specified data type.
expect_column_values_to_be_of_type is a :func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>` for typed-column backends,
and also for PandasDataset where the column dtype and provided type_ are unambiguous constraints (any dtype
except 'object' or dtype of 'object' with type_ specified as 'object').
For PandasDataset columns with dtype of 'object' expect_column_values_to_be_of_type is a
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>` and will
independently check each row's type.
Args:
column (str): \
The column name.
type\\_ (str): \
A string representing the data type that each column should have as entries. Valid types are defined
by the current backend implementation and are dynamically loaded. For example, valid types for
PandasDataset include any numpy dtype values (such as 'int64') or native python types (such as 'int'),
whereas valid types for a SqlAlchemyDataset include types named by the current driver such as 'INTEGER'
in most SQL dialects and 'TEXT' in dialects such as postgresql. Valid types for SparkDFDataset include
'StringType', 'BooleanType' and other pyspark-defined type names.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See also:
:func:`expect_column_values_to_be_in_type_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_type_list>`
"""
raise NotImplementedError
def expect_column_values_to_be_in_type_list(
self,
column,
type_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect a column to contain values from a specified type list.
expect_column_values_to_be_in_type_list is a :func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>` for typed-column backends,
and also for PandasDataset where the column dtype provides an unambiguous constraints (any dtype except
'object'). For PandasDataset columns with dtype of 'object' expect_column_values_to_be_of_type is a
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>` and will
independently check each row's type.
Args:
column (str): \
The column name.
type_list (str): \
A list of strings representing the data type that each column should have as entries. Valid types are
defined by the current backend implementation and are dynamically loaded. For example, valid types for
PandasDataset include any numpy dtype values (such as 'int64') or native python types (such as 'int'),
whereas valid types for a SqlAlchemyDataset include types named by the current driver such as 'INTEGER'
in most SQL dialects and 'TEXT' in dialects such as postgresql. Valid types for SparkDFDataset include
'StringType', 'BooleanType' and other pyspark-defined type names.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See also:
:func:`expect_column_values_to_be_of_type \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_of_type>`
"""
raise NotImplementedError
###
#
# Sets and ranges
#
####
def expect_column_values_to_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect each column value to be in a given set.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_values_to_be_in_set(
"my_col",
[2,3]
)
{
"success": false
"result": {
"unexpected_count": 1
"unexpected_percent": 16.66666666666666666,
"unexpected_percent_nonmissing": 16.66666666666666666,
"partial_unexpected_list": [
1
],
},
}
expect_column_values_to_be_in_set is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed as \
datetimes before making comparisons.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_be_in_set>`
"""
raise NotImplementedError
def expect_column_values_to_not_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect column entries to not be in the set.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_values_to_not_be_in_set(
"my_col",
[1,2]
)
{
"success": false
"result": {
"unexpected_count": 3
"unexpected_percent": 50.0,
"unexpected_percent_nonmissing": 50.0,
"partial_unexpected_list": [
1, 2, 2
],
},
}
expect_column_values_to_not_be_in_set is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_set>`
"""
raise NotImplementedError
def expect_column_values_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False,
# tolerance=1e-9,
allow_cross_type_comparisons=None,
parse_strings_as_datetimes=False,
output_strftime_format=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be between a minimum value and a maximum value (inclusive).
expect_column_values_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
min_value (comparable type or None): The minimum value for a column entry.
max_value (comparable type or None): The maximum value for a column entry.
Keyword Args:
strict_min (boolean):
If True, values must be strictly larger than min_value, default=False
strict_max (boolean):
If True, values must be strictly smaller than max_value, default=False
allow_cross_type_comparisons (boolean or None) : If True, allow comparisons between types (e.g. integer and\
string). Otherwise, attempting such comparisons will raise an exception.
parse_strings_as_datetimes (boolean or None) : If True, parse min_value, max_value, and all non-null column\
values to datetimes before making comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound, and there is no minimum value checked.
* If max_value is None, then min_value is treated as a lower bound, and there is no maximum value checked.
See Also:
:func:`expect_column_value_lengths_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_be_between>`
"""
raise NotImplementedError
def expect_column_values_to_be_increasing(
self,
column,
strictly=None,
parse_strings_as_datetimes=False,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be increasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly increasing--equal values are treated as failures.
expect_column_values_to_be_increasing is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_decreasing \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_decreasing>`
"""
raise NotImplementedError
def expect_column_values_to_be_decreasing(
self,
column,
strictly=None,
parse_strings_as_datetimes=False,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be decreasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly decreasing--equal values are treated as failures.
expect_column_values_to_be_decreasing is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_increasing \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_increasing>`
"""
raise NotImplementedError
###
#
# String matching
#
###
def expect_column_value_lengths_to_be_between(
self,
column,
min_value=None,
max_value=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings with length between a minimum value and a maximum value (inclusive).
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_value_lengths_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
min_value (int or None): \
The minimum value for a column entry length.
max_value (int or None): \
The maximum value for a column entry length.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has \
no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has \
no maximum.
See Also:
:func:`expect_column_value_lengths_to_equal \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_equal>`
"""
raise NotImplementedError
def expect_column_value_lengths_to_equal(
self,
column,
value,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings with length equal to the provided value.
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_values_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value (int or None): \
The expected value for a column entry length.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_value_lengths_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_be_between>`
"""
raise NotImplementedError
def expect_column_values_to_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings that match a given regular expression. Valid matches can be found \
anywhere in the string, for example "[at]+" will identify the following strings as expected: "cat", "hat", \
"aa", "a", and "t", and the following strings as unexpected: "fish", "dog".
expect_column_values_to_match_regex is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex (str): \
The regular expression the column entries should match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_match_regex>`
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
"""
raise NotImplementedError
def expect_column_values_to_not_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings that do NOT match a given regular expression. The regex must not match \
any portion of the provided string. For example, "[at]+" would identify the following strings as expected: \
"fish", "dog", and the following as unexpected: "cat", "hat".
expect_column_values_to_not_match_regex is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex (str): \
The regular expression the column entries should NOT match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex>`
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
"""
raise NotImplementedError
def expect_column_values_to_match_regex_list(
self,
column,
regex_list,
match_on="any",
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column entries to be strings that can be matched to either any of or all of a list of regular
expressions. Matches can be anywhere in the string.
expect_column_values_to_match_regex_list is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex_list (list): \
The list of regular expressions which the column entries should match
Keyword Args:
match_on= (string): \
"any" or "all".
Use "any" if the value should match at least one regular expression in the list.
Use "all" if it should match each regular expression in the list.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex>`
:func:`expect_column_values_to_not_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_match_regex>`
"""
raise NotImplementedError
def expect_column_values_to_not_match_regex_list(
self,
column,
regex_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column entries to be strings that do not match any of a list of regular expressions. Matches can
be anywhere in the string.
expect_column_values_to_not_match_regex_list is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex_list (list): \
The list of regular expressions which the column entries should not match
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
"""
raise NotImplementedError
###
#
# Datetime and JSON parsing
#
###
def expect_column_values_to_match_strftime_format(
self,
column,
strftime_format,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings representing a date or time with a given format.
expect_column_values_to_match_strftime_format is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
strftime_format (str): \
A strftime format string to use for matching
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_values_to_be_dateutil_parseable(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be parsable using dateutil.
expect_column_values_to_be_dateutil_parseable is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_values_to_be_json_parseable(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be data written in JavaScript Object Notation.
expect_column_values_to_be_json_parseable is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_json_schema \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_json_schema>`
"""
raise NotImplementedError
def expect_column_values_to_match_json_schema(
self,
column,
json_schema,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be JSON objects matching a given JSON schema.
expect_column_values_to_match_json_schema is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_json_parseable \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_json_parseable>`
The `JSON-schema docs <http://json-schema.org/>`_.
"""
raise NotImplementedError
###
#
# Aggregate functions
#
####
def expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than(
self,
column,
distribution,
p_value=0.05,
params=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect the column values to be distributed similarly to a scipy distribution. \
This expectation compares the provided column to the specified continuous distribution with a parametric \
Kolmogorov-Smirnov test. The K-S test compares the provided column to the cumulative density function (CDF) of \
the specified scipy distribution. If you don't know the desired distribution shape parameters, use the \
`ge.dataset.util.infer_distribution_parameters()` utility function to estimate them.
It returns 'success'=True if the p-value from the K-S test is greater than or equal to the provided p-value.
``expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than`` is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
distribution (str): \
The scipy distribution name. See: `<https://docs.scipy.org/doc/scipy/reference/stats.html>`_ Currently
supported distributions are listed in the Notes section below.
p_value (float): \
The threshold p-value for a passing test. Default is 0.05.
params (dict or list) : \
A dictionary or positional list of shape parameters that describe the distribution you want to test the\
data against. Include key values specific to the distribution from the appropriate scipy \
distribution CDF function. 'loc' and 'scale' are used as translational parameters.\
See `<https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions>`_
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"details":
"expected_params" (dict): The specified or inferred parameters of the distribution to test \
against
"ks_results" (dict): The raw result of stats.kstest()
}
* The Kolmogorov-Smirnov test's null hypothesis is that the column is similar to the provided distribution.
* Supported scipy distributions:
* norm
* beta
* gamma
* uniform
* chi2
* expon
"""
raise NotImplementedError
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_distinct_values_to_be_in_set(
self,
column,
value_set,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect the set of distinct column values to be contained by a given set.
The success value for this expectation will match that of expect_column_values_to_be_in_set. However,
expect_column_distinct_values_to_be_in_set is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_distinct_values_to_be_in_set(
"my_col",
[2, 3, 4]
)
{
"success": false
"result": {
"observed_value": [1,2,3],
"details": {
"value_counts": [
{
"value": 1,
"count": 1
},
{
"value": 2,
"count": 1
},
{
"value": 3,
"count": 1
}
]
}
}
}
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed \
as datetimes before making comparisons.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_distinct_values_to_contain_set \
<great_expectations.dataset.dataset.Dataset.expect_column_distinct_values_to_contain_set>`
"""
observed_value_counts = self.get_column_value_counts(column)
if value_set is None:
# Vacuously true
success = True
parsed_observed_value_set = set(observed_value_counts.index)
else:
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
parsed_observed_value_set = set(
self._parse_value_set(observed_value_counts.index)
)
else:
parsed_value_set = value_set
parsed_observed_value_set = set(observed_value_counts.index)
expected_value_set = set(parsed_value_set)
success = parsed_observed_value_set.issubset(expected_value_set)
return {
"success": success,
"result": {
"observed_value": sorted(list(parsed_observed_value_set)),
"details": {"value_counts": observed_value_counts},
},
}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_distinct_values_to_equal_set(
self,
column,
value_set,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect the set of distinct column values to equal a given set.
In contrast to expect_column_distinct_values_to_contain_set() this ensures not only that a certain set of \
values are present in the column but that these *and only these* values are present.
expect_column_distinct_values_to_equal_set is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_distinct_values_to_equal_set(
"my_col",
[2,3]
)
{
"success": false
"result": {
"observed_value": [1,2,3]
},
}
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed as \
datetimes before making comparisons.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_distinct_values_to_contain_set \
<great_expectations.dataset.dataset.Dataset.expect_column_distinct_values_to_contain_set>`
"""
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
observed_value_counts = self.get_column_value_counts(column)
expected_value_set = set(parsed_value_set)
observed_value_set = set(observed_value_counts.index)
return {
"success": observed_value_set == expected_value_set,
"result": {
"observed_value": sorted(list(observed_value_set)),
"details": {"value_counts": observed_value_counts},
},
}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_distinct_values_to_contain_set(
self,
column,
value_set,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect the set of distinct column values to contain a given set.
In contrast to expect_column_values_to_be_in_set() this ensures not that all column values are members of the
given set but that values from the set *must* be present in the column.
expect_column_distinct_values_to_contain_set is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_distinct_values_to_contain_set(
"my_col",
[2,3]
)
{
"success": true
"result": {
"observed_value": [1,2,3]
},
}
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed as \
datetimes before making comparisons.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_distinct_values_to_equal_set \
<great_expectations.dataset.dataset.Dataset.expect_column_distinct_values_to_equal_set>`
"""
observed_value_counts = self.get_column_value_counts(column)
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
observed_value_counts.index = pd.to_datetime(observed_value_counts.index)
else:
parsed_value_set = value_set
expected_value_set = set(parsed_value_set)
observed_value_set = set(observed_value_counts.index)
return {
"success": observed_value_set.issuperset(expected_value_set),
"result": {
"observed_value": sorted(list(observed_value_set)),
"details": {"value_counts": observed_value_counts},
},
}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_mean_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column mean to be between a minimum value and a maximum value (inclusive).
expect_column_mean_to_be_between is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (float or None): \
The minimum value for the column mean.
max_value (float or None): \
The maximum value for the column mean.
strict_min (boolean):
If True, the column mean must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the column mean must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true mean for the column
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound.
* If max_value is None, then min_value is treated as a lower bound.
See Also:
:func:`expect_column_median_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_median_to_be_between>`
:func:`expect_column_stdev_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_stdev_to_be_between>`
"""
if min_value is not None and not isinstance(min_value, Number):
raise ValueError("min_value must be a number")
if max_value is not None and not isinstance(max_value, Number):
raise ValueError("max_value must be a number")
column_mean = self.get_column_mean(column)
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
# Handle possible missing values
if column_mean is None:
return {"success": False, "result": {"observed_value": column_mean}}
if min_value is not None:
if strict_min:
above_min = column_mean > min_value
else:
above_min = column_mean >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = column_mean < max_value
else:
below_max = column_mean <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": column_mean}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_median_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column median to be between a minimum value and a maximum value.
expect_column_median_to_be_between is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (int or None): \
The minimum value for the column median.
max_value (int or None): \
The maximum value for the column median.
strict_min (boolean):
If True, the column median must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the column median must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true median for the column
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
:func:`expect_column_mean_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_mean_to_be_between>`
:func:`expect_column_stdev_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_stdev_to_be_between>`
"""
column_median = self.get_column_median(column)
if column_median is None:
return {"success": False, "result": {"observed_value": None}}
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
if min_value is not None:
if strict_min:
above_min = column_median > min_value
else:
above_min = column_median >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = column_median < max_value
else:
below_max = column_median <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": column_median}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_quantile_values_to_be_between(
self,
column,
quantile_ranges,
allow_relative_error=False,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect specific provided column quantiles to be between provided minimum and maximum values.
``quantile_ranges`` must be a dictionary with two keys:
* ``quantiles``: (list of float) increasing ordered list of desired quantile values
* ``value_ranges``: (list of lists): Each element in this list consists of a list with two values, a lower \
and upper bound (inclusive) for the corresponding quantile.
For each provided range:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound only
* If max_value is None, then min_value is treated as a lower bound only
The length of the quantiles list and quantile_values list must be equal.
For example:
::
# my_df.my_col = [1,2,2,3,3,3,4]
>>> my_df.expect_column_quantile_values_to_be_between(
"my_col",
{
"quantiles": [0., 0.333, 0.6667, 1.],
"value_ranges": [[0,1], [2,3], [3,4], [4,5]]
}
)
{
"success": True,
"result": {
"observed_value": {
"quantiles: [0., 0.333, 0.6667, 1.],
"values": [1, 2, 3, 4],
}
"element_count": 7,
"missing_count": 0,
"missing_percent": 0.0,
"details": {
"success_details": [true, true, true, true]
}
}
}
}
`expect_column_quantile_values_to_be_between` can be computationally intensive for large datasets.
expect_column_quantile_values_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
quantile_ranges (dictionary): \
Quantiles and associated value ranges for the column. See above for details.
allow_relative_error (boolean): \
Whether to allow relative error in quantile communications on backends that support or require it.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
details.success_details
See Also:
:func:`expect_column_min_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_min_to_be_between>`
:func:`expect_column_max_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_max_to_be_between>`
:func:`expect_column_median_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_median_to_be_between>`
"""
quantiles = quantile_ranges["quantiles"]
quantile_value_ranges = quantile_ranges["value_ranges"]
if len(quantiles) != len(quantile_value_ranges):
raise ValueError(
"quntile_values and quantiles must have the same number of elements"
)
quantile_vals = self.get_column_quantiles(
column, tuple(quantiles), allow_relative_error=allow_relative_error
)
# We explicitly allow "None" to be interpreted as +/- infinity
comparison_quantile_ranges = [
[
-np.inf if lower_bound is None else lower_bound,
np.inf if upper_bound is None else upper_bound,
]
for (lower_bound, upper_bound) in quantile_value_ranges
]
success_details = [
range_[0] <= quantile_vals[idx] <= range_[1]
for idx, range_ in enumerate(comparison_quantile_ranges)
]
return {
"success": np.all(success_details),
"result": {
"observed_value": {"quantiles": quantiles, "values": quantile_vals},
"details": {"success_details": success_details},
},
}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_stdev_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column standard deviation to be between a minimum value and a maximum value.
Uses sample standard deviation (normalized by N-1).
expect_column_stdev_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (float or None): \
The minimum value for the column standard deviation.
max_value (float or None): \
The maximum value for the column standard deviation.
strict_min (boolean):
If True, the column standard deviation must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the column standard deviation must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true standard deviation for the column
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
:func:`expect_column_mean_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_mean_to_be_between>`
:func:`expect_column_median_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_median_to_be_between>`
"""
column_stdev = self.get_column_stdev(column)
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
if min_value is not None:
if strict_min:
above_min = column_stdev > min_value
else:
above_min = column_stdev >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = column_stdev < max_value
else:
below_max = column_stdev <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": column_stdev}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_unique_value_count_to_be_between(
self,
column,
min_value=None,
max_value=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of unique values to be between a minimum value and a maximum value.
expect_column_unique_value_count_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (int or None): \
The minimum number of unique values allowed.
max_value (int or None): \
The maximum number of unique values allowed.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (int) The number of unique values in the column
}
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
:func:`expect_column_proportion_of_unique_values_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_proportion_of_unique_values_to_be_between>`
"""
unique_value_count = self.get_column_unique_count(column)
if unique_value_count is None:
return {"success": False, "result": {"observed_value": unique_value_count}}
if min_value is not None:
above_min = unique_value_count >= min_value
else:
above_min = True
if max_value is not None:
below_max = unique_value_count <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": unique_value_count}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_proportion_of_unique_values_to_be_between(
self,
column,
min_value=0,
max_value=1,
strict_min=False,
strict_max=False, # tolerance=1e-9,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the proportion of unique values to be between a minimum value and a maximum value.
For example, in a column containing [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], there are 4 unique values and 10 total \
values for a proportion of 0.4.
expect_column_proportion_of_unique_values_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (float or None): \
The minimum proportion of unique values. (Proportions are on the range 0 to 1)
max_value (float or None): \
The maximum proportion of unique values. (Proportions are on the range 0 to 1)
strict_min (boolean):
If True, the minimum proportion of unique values must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximum proportion of unique values must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The proportion of unique values in the column
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
:func:`expect_column_unique_value_count_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_unique_value_count_to_be_between>`
"""
# Tolerance docstring for later use:
# tolerance (float):
# tolerance for strict_min, strict_max, default=1e-9
unique_value_count = self.get_column_unique_count(column)
total_value_count = self.get_column_nonnull_count(column)
if total_value_count > 0:
proportion_unique = float(unique_value_count) / total_value_count
else:
proportion_unique = None
# if strict_min:
# if min_value:
# min_value += tolerance
#
# if strict_max:
# if max_value:
# max_value -= tolerance
if min_value is not None:
if strict_min:
above_min = proportion_unique > min_value
else:
above_min = proportion_unique >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = proportion_unique < max_value
else:
below_max = proportion_unique <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": proportion_unique}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_most_common_value_to_be_in_set(
self,
column,
value_set,
ties_okay=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the most common value to be within the designated value set
expect_column_most_common_value_to_be_in_set is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
value_set (set-like): \
A list of potential values to match
Keyword Args:
ties_okay (boolean or None): \
If True, then the expectation will still succeed if values outside the designated set are as common \
(but not more common) than designated values
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The most common values in the column
}
`observed_value` contains a list of the most common values.
Often, this will just be a single element. But if there's a tie for most common among multiple values,
`observed_value` will contain a single copy of each most common value.
"""
mode_list = self.get_column_modes(column)
intersection_count = len(set(value_set).intersection(mode_list))
if ties_okay:
success = intersection_count > 0
else:
success = len(mode_list) == 1 and intersection_count == 1
return {"success": success, "result": {"observed_value": mode_list}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_sum_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column to sum to be between an min and max value
expect_column_sum_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimal sum allowed.
max_value (comparable type or None): \
The maximal sum allowed.
strict_min (boolean):
If True, the minimal sum must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximal sum must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column sum
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
column_sum = self.get_column_sum(column)
# Handle possible missing values
if column_sum is None:
return {"success": False, "result": {"observed_value": column_sum}}
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
if min_value is not None:
if strict_min:
above_min = column_sum > min_value
else:
above_min = column_sum >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = column_sum < max_value
else:
below_max = column_sum <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": column_sum}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_min_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
parse_strings_as_datetimes=False,
output_strftime_format=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column minimum to be between an min and max value
expect_column_min_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimal column minimum allowed.
max_value (comparable type or None): \
The maximal column minimum allowed.
strict_min (boolean):
If True, the minimal column minimum must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximal column minimum must be strictly smaller than max_value, default=False
Keyword Args:
parse_strings_as_datetimes (Boolean or None): \
If True, parse min_value, max_values, and all non-null column values to datetimes before making \
comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column min
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
# Tolerance docstring for later implementation:
# tolerance(float):
# tolerance for strict_min, strict_max, default=1e-9.If parse_strings_as_datetimes is True, this tolerance is measured in number of days
if parse_strings_as_datetimes:
# tolerance = timedelta(days=tolerance)
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
column_min = self.get_column_min(column, parse_strings_as_datetimes)
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
if column_min is None:
success = False
else:
if min_value is not None:
if isinstance(column_min, datetime):
try:
min_value = parse(min_value)
except (ValueError, TypeError) as e:
pass
if strict_min:
above_min = column_min > min_value
else:
above_min = column_min >= min_value
else:
above_min = True
if max_value is not None:
if isinstance(column_min, datetime):
try:
max_value = parse(max_value)
except (ValueError, TypeError) as e:
pass
if strict_max:
below_max = column_min < max_value
else:
below_max = column_min <= max_value
else:
below_max = True
success = above_min and below_max
if parse_strings_as_datetimes:
if output_strftime_format:
column_min = datetime.strftime(column_min, output_strftime_format)
else:
column_min = str(column_min)
return {"success": success, "result": {"observed_value": column_min}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_max_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False,
# tolerance=1e-9,
parse_strings_as_datetimes=False,
output_strftime_format=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column max to be between an min and max value
expect_column_max_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimum number of unique values allowed.
max_value (comparable type or None): \
The maximum number of unique values allowed.
Keyword Args:
parse_strings_as_datetimes (Boolean or None): \
If True, parse min_value, max_values, and all non-null column values to datetimes before making \
comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
strict_min (boolean):
If True, the minimal column minimum must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximal column minimum must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column max
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
if parse_strings_as_datetimes:
# tolerance = timedelta(days=tolerance)
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
# else:
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
column_max = self.get_column_max(column, parse_strings_as_datetimes)
if column_max is None:
success = False
else:
if min_value is not None:
if isinstance(column_max, datetime):
try:
min_value = parse(min_value)
except (ValueError, TypeError) as e:
pass
if strict_min:
above_min = column_max > min_value
else:
above_min = column_max >= min_value
else:
above_min = True
if max_value is not None:
if isinstance(column_max, datetime):
try:
max_value = parse(max_value)
except (ValueError, TypeError) as e:
pass
if strict_max:
below_max = column_max < max_value
else:
below_max = column_max <= max_value
else:
below_max = True
success = above_min and below_max
if parse_strings_as_datetimes:
if output_strftime_format:
column_max = datetime.strftime(column_max, output_strftime_format)
else:
column_max = str(column_max)
return {"success": success, "result": {"observed_value": column_max}}
###
#
# Distributional expectations
#
###
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_chisquare_test_p_value_to_be_greater_than(
self,
column,
partition_object=None,
p=0.05,
tail_weight_holdout=0,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be distributed similarly to the provided categorical partition. \
This expectation compares categorical distributions using a Chi-squared test. \
It returns `success=True` if values in the column match the distribution of the provided partition.
expect_column_chisquare_test_p_value_to_be_greater_than is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
partition_object (dict): \
The expected partition object (see :ref:`partition_object`).
p (float): \
The p-value threshold for rejecting the null hypothesis of the Chi-Squared test.\
For values below the specified threshold, the expectation will return `success=False`,\
rejecting the null hypothesis that the distributions are the same.\
Defaults to 0.05.
Keyword Args:
tail_weight_holdout (float between 0 and 1 or None): \
The amount of weight to split uniformly between values observed in the data but not present in the \
provided partition. tail_weight_holdout provides a mechanism to make the test less strict by \
assigning positive weights to unknown values observed in the data that are not present in the \
partition.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true p-value of the Chi-squared test
"details": {
"observed_partition" (dict):
The partition observed in the data.
"expected_partition" (dict):
The partition expected from the data, after including tail_weight_holdout
}
}
"""
if not is_valid_categorical_partition_object(partition_object):
raise ValueError("Invalid partition object.")
element_count = self.get_column_nonnull_count(column)
observed_frequencies = self.get_column_value_counts(column)
# Convert to Series object to allow joining on index values
expected_column = (
pd.Series(
partition_object["weights"],
index=partition_object["values"],
name="expected",
)
* element_count
)
# Join along the indices to allow proper comparison of both types of possible missing values
# Sort parameter not available before pandas 0.23.0
# test_df = pd.concat([expected_column, observed_frequencies], axis=1, sort=True)
test_df = pd.concat([expected_column, observed_frequencies], axis=1)
na_counts = test_df.isnull().sum()
# Handle NaN: if we expected something that's not there, it's just not there.
test_df["count"] = test_df["count"].fillna(0)
# Handle NaN: if something's there that was not expected, substitute the relevant value for tail_weight_holdout
if na_counts["expected"] > 0:
# Scale existing expected values
test_df["expected"] *= 1 - tail_weight_holdout
# Fill NAs with holdout.
test_df["expected"] = test_df["expected"].fillna(
element_count * (tail_weight_holdout / na_counts["expected"])
)
test_result = stats.chisquare(test_df["count"], test_df["expected"])[1]
# Normalize the ouputs so they can be used as partitions into other expectations
# GH653
expected_weights = (test_df["expected"] / test_df["expected"].sum()).tolist()
observed_weights = (test_df["count"] / test_df["count"].sum()).tolist()
return {
"success": test_result > p,
"result": {
"observed_value": test_result,
"details": {
"observed_partition": {
"values": test_df.index.tolist(),
"weights": observed_weights,
},
"expected_partition": {
"values": test_df.index.tolist(),
"weights": expected_weights,
},
},
},
}
def expect_column_bootstrapped_ks_test_p_value_to_be_greater_than(
self,
column,
partition_object=None,
p=0.05,
bootstrap_samples=None,
bootstrap_sample_size=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be distributed similarly to the provided continuous partition. This expectation \
compares continuous distributions using a bootstrapped Kolmogorov-Smirnov test. It returns `success=True` if \
values in the column match the distribution of the provided partition.
The expected cumulative density function (CDF) is constructed as a linear interpolation between the bins, \
using the provided weights. Consequently the test expects a piecewise uniform distribution using the bins from \
the provided partition object.
``expect_column_bootstrapped_ks_test_p_value_to_be_greater_than`` is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
partition_object (dict): \
The expected partition object (see :ref:`partition_object`).
p (float): \
The p-value threshold for the Kolmogorov-Smirnov test.
For values below the specified threshold the expectation will return `success=False`, rejecting the \
null hypothesis that the distributions are the same. \
Defaults to 0.05.
Keyword Args:
bootstrap_samples (int): \
The number bootstrap rounds. Defaults to 1000.
bootstrap_sample_size (int): \
The number of samples to take from the column for each bootstrap. A larger sample will increase the \
specificity of the test. Defaults to 2 * len(partition_object['weights'])
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true p-value of the KS test
"details": {
"bootstrap_samples": The number of bootstrap rounds used
"bootstrap_sample_size": The number of samples taken from
the column in each bootstrap round
"observed_cdf": The cumulative density function observed
in the data, a dict containing 'x' values and cdf_values
(suitable for plotting)
"expected_cdf" (dict):
The cumulative density function expected based on the
partition object, a dict containing 'x' values and
cdf_values (suitable for plotting)
"observed_partition" (dict):
The partition observed on the data, using the provided
bins but also expanding from min(column) to max(column)
"expected_partition" (dict):
The partition expected from the data. For KS test,
this will always be the partition_object parameter
}
}
"""
raise NotImplementedError
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_kl_divergence_to_be_less_than(
self,
column,
partition_object=None,
threshold=None,
tail_weight_holdout=0,
internal_weight_holdout=0,
bucketize_data=True,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the Kulback-Leibler (KL) divergence (relative entropy) of the specified column with respect to the \
partition object to be lower than the provided threshold.
KL divergence compares two distributions. The higher the divergence value (relative entropy), the larger the \
difference between the two distributions. A relative entropy of zero indicates that the data are \
distributed identically, `when binned according to the provided partition`.
In many practical contexts, choosing a value between 0.5 and 1 will provide a useful test.
This expectation works on both categorical and continuous partitions. See notes below for details.
``expect_column_kl_divergence_to_be_less_than`` is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
partition_object (dict): \
The expected partition object (see :ref:`partition_object`).
threshold (float): \
The maximum KL divergence to for which to return `success=True`. If KL divergence is larger than the\
provided threshold, the test will return `success=False`.
Keyword Args:
internal_weight_holdout (float between 0 and 1 or None): \
The amount of weight to split uniformly among zero-weighted partition bins. internal_weight_holdout \
provides a mechanisms to make the test less strict by assigning positive weights to values observed in \
the data for which the partition explicitly expected zero weight. With no internal_weight_holdout, \
any value observed in such a region will cause KL divergence to rise to +Infinity.\
Defaults to 0.
tail_weight_holdout (float between 0 and 1 or None): \
The amount of weight to add to the tails of the histogram. Tail weight holdout is split evenly between\
(-Infinity, min(partition_object['bins'])) and (max(partition_object['bins']), +Infinity). \
tail_weight_holdout provides a mechanism to make the test less strict by assigning positive weights to \
values observed in the data that are not present in the partition. With no tail_weight_holdout, \
any value observed outside the provided partition_object will cause KL divergence to rise to +Infinity.\
Defaults to 0.
bucketize_data (boolean): If True, then continuous data will be bucketized before evaluation. Setting
this parameter to false allows evaluation of KL divergence with a None partition object for profiling
against discrete data.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true KL divergence (relative entropy) or None if the value is \
calculated as infinity, -infinity, or NaN
"details": {
"observed_partition": (dict) The partition observed in the data
"expected_partition": (dict) The partition against which the data were compared,
after applying specified weight holdouts.
}
}
If the partition_object is categorical, this expectation will expect the values in column to also be \
categorical.
* If the column includes values that are not present in the partition, the tail_weight_holdout will be \
equally split among those values, providing a mechanism to weaken the strictness of the expectation \
(otherwise, relative entropy would immediately go to infinity).
* If the partition includes values that are not present in the column, the test will simply include \
zero weight for that value.
If the partition_object is continuous, this expectation will discretize the values in the column according \
to the bins specified in the partition_object, and apply the test to the resulting distribution.
* The internal_weight_holdout and tail_weight_holdout parameters provide a mechanism to weaken the \
expectation, since an expected weight of zero would drive relative entropy to be infinite if any data \
are observed in that interval.
* If internal_weight_holdout is specified, that value will be distributed equally among any intervals \
with weight zero in the partition_object.
* If tail_weight_holdout is specified, that value will be appended to the tails of the bins \
((-Infinity, min(bins)) and (max(bins), Infinity).
If relative entropy/kl divergence goes to infinity for any of the reasons mentioned above, the observed value\
will be set to None. This is because inf, -inf, Nan, are not json serializable and cause some json parsers to\
crash when encountered. The python None token will be serialized to null in json.
See also:
:func:`expect_column_chisquare_test_p_value_to_be_greater_than \
<great_expectations.dataset.dataset.Dataset.expect_column_unique_value_count_to_be_between>`
:func:`expect_column_bootstrapped_ks_test_p_value_to_be_greater_than \
<great_expectations.dataset.dataset.Dataset.expect_column_unique_value_count_to_be_between>`
"""
if partition_object is None:
if bucketize_data:
partition_object = build_continuous_partition_object(
dataset=self, column=column
)
else:
partition_object = build_categorical_partition_object(
dataset=self, column=column
)
if not is_valid_partition_object(partition_object):
raise ValueError("Invalid partition object.")
if threshold is not None and (
(not isinstance(threshold, (int, float))) or (threshold < 0)
):
raise ValueError(
"Threshold must be specified, greater than or equal to zero."
)
if (
(not isinstance(tail_weight_holdout, (int, float)))
or (tail_weight_holdout < 0)
or (tail_weight_holdout > 1)
):
raise ValueError("tail_weight_holdout must be between zero and one.")
if (
(not isinstance(internal_weight_holdout, (int, float)))
or (internal_weight_holdout < 0)
or (internal_weight_holdout > 1)
):
raise ValueError("internal_weight_holdout must be between zero and one.")
if tail_weight_holdout != 0 and "tail_weights" in partition_object:
raise ValueError(
"tail_weight_holdout must be 0 when using tail_weights in partition object"
)
# TODO: add checks for duplicate values in is_valid_categorical_partition_object
if is_valid_categorical_partition_object(partition_object):
if internal_weight_holdout > 0:
raise ValueError(
"Internal weight holdout cannot be used for discrete data."
)
# Data are expected to be discrete, use value_counts
observed_weights = self.get_column_value_counts(
column
) / self.get_column_nonnull_count(column)
expected_weights = pd.Series(
partition_object["weights"],
index=partition_object["values"],
name="expected",
)
# Sort not available before pandas 0.23.0
# test_df = pd.concat([expected_weights, observed_weights], axis=1, sort=True)
test_df = pd.concat([expected_weights, observed_weights], axis=1)
na_counts = test_df.isnull().sum()
# Handle NaN: if we expected something that's not there, it's just not there.
pk = test_df["count"].fillna(0)
# Handle NaN: if something's there that was not expected,
# substitute the relevant value for tail_weight_holdout
if na_counts["expected"] > 0:
# Scale existing expected values
test_df["expected"] *= 1 - tail_weight_holdout
# Fill NAs with holdout.
qk = test_df["expected"].fillna(
tail_weight_holdout / na_counts["expected"]
)
else:
qk = test_df["expected"]
kl_divergence = stats.entropy(pk, qk)
if np.isinf(kl_divergence) or np.isnan(kl_divergence):
observed_value = None
else:
observed_value = kl_divergence
if threshold is None:
success = True
else:
success = kl_divergence <= threshold
return_obj = {
"success": success,
"result": {
"observed_value": observed_value,
"details": {
"observed_partition": {
"values": test_df.index.tolist(),
"weights": pk.tolist(),
},
"expected_partition": {
"values": test_df.index.tolist(),
"weights": qk.tolist(),
},
},
},
}
else:
# Data are expected to be continuous; discretize first
if bucketize_data is False:
raise ValueError(
"KL Divergence cannot be computed with a continuous partition object and the bucketize_data "
"parameter set to false."
)
# Build the histogram first using expected bins so that the largest bin is >=
hist = np.array(
self.get_column_hist(column, tuple(partition_object["bins"]))
)
# np.histogram(column, partition_object['bins'], density=False)
bin_edges = partition_object["bins"]
# Add in the frequencies observed above or below the provided partition
# below_partition = len(np.where(column < partition_object['bins'][0])[0])
# above_partition = len(np.where(column > partition_object['bins'][-1])[0])
below_partition = self.get_column_count_in_range(
column, max_val=partition_object["bins"][0]
)
above_partition = self.get_column_count_in_range(
column, min_val=partition_object["bins"][-1], strict_min=True
)
# Observed Weights is just the histogram values divided by the total number of observations
observed_weights = np.array(hist) / self.get_column_nonnull_count(column)
# Adjust expected_weights to account for tail_weight and internal_weight
if "tail_weights" in partition_object:
partition_tail_weight_holdout = np.sum(partition_object["tail_weights"])
else:
partition_tail_weight_holdout = 0
expected_weights = np.array(partition_object["weights"]) * (
1 - tail_weight_holdout - internal_weight_holdout
)
# Assign internal weight holdout values if applicable
if internal_weight_holdout > 0:
zero_count = len(expected_weights) - np.count_nonzero(expected_weights)
if zero_count > 0:
for index, value in enumerate(expected_weights):
if value == 0:
expected_weights[index] = (
internal_weight_holdout / zero_count
)
# Assign tail weight holdout if applicable
# We need to check cases to only add tail weight holdout if it makes sense based on the provided partition.
if (partition_object["bins"][0] == -np.inf) and (
partition_object["bins"][-1]
) == np.inf:
if tail_weight_holdout > 0:
raise ValueError(
"tail_weight_holdout cannot be used for partitions with infinite endpoints."
)
if "tail_weights" in partition_object:
raise ValueError(
"There can be no tail weights for partitions with one or both endpoints at infinity"
)
# Remove -inf and inf
expected_bins = partition_object["bins"][1:-1]
comb_expected_weights = expected_weights
# Set aside tail weights
expected_tail_weights = np.concatenate(
([expected_weights[0]], [expected_weights[-1]])
)
# Remove tail weights
expected_weights = expected_weights[1:-1]
comb_observed_weights = observed_weights
# Set aside tail weights
observed_tail_weights = np.concatenate(
([observed_weights[0]], [observed_weights[-1]])
)
# Remove tail weights
observed_weights = observed_weights[1:-1]
elif partition_object["bins"][0] == -np.inf:
if "tail_weights" in partition_object:
raise ValueError(
"There can be no tail weights for partitions with one or both endpoints at infinity"
)
# Remove -inf
expected_bins = partition_object["bins"][1:]
comb_expected_weights = np.concatenate(
(expected_weights, [tail_weight_holdout])
)
# Set aside left tail weight and holdout
expected_tail_weights = np.concatenate(
([expected_weights[0]], [tail_weight_holdout])
)
# Remove left tail weight from main expected_weights
expected_weights = expected_weights[1:]
comb_observed_weights = np.concatenate(
(
observed_weights,
[above_partition / self.get_column_nonnull_count(column)],
)
)
# Set aside left tail weight and above partition weight
observed_tail_weights = np.concatenate(
(
[observed_weights[0]],
[above_partition / self.get_column_nonnull_count(column)],
)
)
# Remove left tail weight from main observed_weights
observed_weights = observed_weights[1:]
elif partition_object["bins"][-1] == np.inf:
if "tail_weights" in partition_object:
raise ValueError(
"There can be no tail weights for partitions with one or both endpoints at infinity"
)
# Remove inf
expected_bins = partition_object["bins"][:-1]
comb_expected_weights = np.concatenate(
([tail_weight_holdout], expected_weights)
)
# Set aside right tail weight and holdout
expected_tail_weights = np.concatenate(
([tail_weight_holdout], [expected_weights[-1]])
)
# Remove right tail weight from main expected_weights
expected_weights = expected_weights[:-1]
comb_observed_weights = np.concatenate(
(
[below_partition / self.get_column_nonnull_count(column)],
observed_weights,
)
)
# Set aside right tail weight and below partition weight
observed_tail_weights = np.concatenate(
(
[below_partition / self.get_column_nonnull_count(column)],
[observed_weights[-1]],
)
)
# Remove right tail weight from main observed_weights
observed_weights = observed_weights[:-1]
else:
# No need to remove -inf or inf
expected_bins = partition_object["bins"]
if "tail_weights" in partition_object:
tail_weights = partition_object["tail_weights"]
# Tack on tail weights
comb_expected_weights = np.concatenate(
([tail_weights[0]], expected_weights, [tail_weights[1]])
)
# Tail weights are just tail_weights
expected_tail_weights = np.array(tail_weights)
else:
comb_expected_weights = np.concatenate(
(
[tail_weight_holdout / 2],
expected_weights,
[tail_weight_holdout / 2],
)
)
# Tail weights are just tail_weight holdout divided equally to both tails
expected_tail_weights = np.concatenate(
([tail_weight_holdout / 2], [tail_weight_holdout / 2])
)
comb_observed_weights = np.concatenate(
(
[below_partition / self.get_column_nonnull_count(column)],
observed_weights,
[above_partition / self.get_column_nonnull_count(column)],
)
)
# Tail weights are just the counts on either side of the partition
observed_tail_weights = np.concatenate(
([below_partition], [above_partition])
) / self.get_column_nonnull_count(column)
# Main expected_weights and main observed weights had no tail_weights, so nothing needs to be removed.
# TODO: VERIFY THAT THIS STILL WORKS BASED ON CHANGE TO HIST
# comb_expected_weights = np.array(comb_expected_weights).astype(float)
# comb_observed_weights = np.array(comb_observed_weights).astype(float)
kl_divergence = stats.entropy(comb_observed_weights, comb_expected_weights)
if np.isinf(kl_divergence) or np.isnan(kl_divergence):
observed_value = None
else:
observed_value = kl_divergence
if threshold is None:
success = True
else:
success = kl_divergence <= threshold
return_obj = {
"success": success,
"result": {
"observed_value": observed_value,
"details": {
"observed_partition": {
# return expected_bins, since we used those bins to compute the observed_weights
"bins": expected_bins,
"weights": observed_weights.tolist(),
"tail_weights": observed_tail_weights.tolist(),
},
"expected_partition": {
"bins": expected_bins,
"weights": expected_weights.tolist(),
"tail_weights": expected_tail_weights.tolist(),
},
},
},
}
return return_obj
@MetaDataset.column_aggregate_expectation
def expect_column_pair_cramers_phi_value_to_be_less_than(
self,
column_A,
column_B,
bins_A=None,
bins_B=None,
n_bins_A=None,
n_bins_B=None,
threshold=0.1,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect the values in column_A to be independent of those in column_B.
Args:
column_A (str): The first column name
column_B (str): The second column name
threshold (float): Maximum allowed value of cramers V for expectation to pass.
Keyword Args:
bins_A (list of float): Bins for column_A.
bins_B (list of float): Bins for column_B.
n_bins_A (int): Number of bins for column_A. Ignored if bins_A is not None.
n_bins_B (int): Number of bins for column_B. Ignored if bins_B is not None.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
crosstab = self.get_crosstab(
column_A, column_B, bins_A, bins_B, n_bins_A, n_bins_B
)
chi2_result = stats.chi2_contingency(crosstab)
# See e.g. https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V
cramers_V = max(
min(
np.sqrt(
chi2_result[0] / self.get_row_count() / (min(crosstab.shape) - 1)
),
1,
),
0,
)
return_obj = {
"success": cramers_V <= threshold,
"result": {
"observed_value": cramers_V,
"unexpected_list": crosstab,
"details": {"crosstab": crosstab},
},
}
return return_obj
###
#
# Column pairs
#
###
def expect_column_pair_values_to_be_equal(
self,
column_A,
column_B,
ignore_row_if="both_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect the values in column A to be the same as column B.
Args:
column_A (str): The first column name
column_B (str): The second column name
Keyword Args:
ignore_row_if (str): "both_values_are_missing", "either_value_is_missing", "neither"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_pair_values_A_to_be_greater_than_B(
self,
column_A,
column_B,
or_equal=None,
parse_strings_as_datetimes=False,
allow_cross_type_comparisons=None,
ignore_row_if="both_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect values in column A to be greater than column B.
Args:
column_A (str): The first column name
column_B (str): The second column name
or_equal (boolean or None): If True, then values can be equal, not strictly greater
Keyword Args:
allow_cross_type_comparisons (boolean or None) : If True, allow comparisons between types (e.g. integer and\
string). Otherwise, attempting such comparisons will raise an exception.
Keyword Args:
ignore_row_if (str): "both_values_are_missing", "either_value_is_missing", "neither
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_pair_values_to_be_in_set(
self,
column_A,
column_B,
value_pairs_set,
ignore_row_if="both_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect paired values from columns A and B to belong to a set of valid pairs.
Args:
column_A (str): The first column name
column_B (str): The second column name
value_pairs_set (list of tuples): All the valid pairs to be matched
Keyword Args:
ignore_row_if (str): "both_values_are_missing", "either_value_is_missing", "never"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
###
#
# Multicolumn
#
###
def expect_multicolumn_values_to_be_unique(
self,
column_list,
ignore_row_if="all_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
NOTE: This method is deprecated. Please use expect_select_column_values_to_be_unique_within_record instead
Expect the values for each record to be unique across the columns listed.
Note that records can be duplicated.
For example::
A B C
1 1 2 Fail
1 2 3 Pass
8 2 7 Pass
1 2 3 Pass
4 4 4 Fail
Args:
column_list (tuple or list): The column names to evaluate
Keyword Args:
ignore_row_if (str): "all_values_are_missing", "any_value_is_missing", "never"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_select_column_values_to_be_unique_within_record(
self,
column_list,
ignore_row_if="all_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect the values for each record to be unique across the columns listed.
Note that records can be duplicated.
For example::
A B C
1 1 2 Fail
1 2 3 Pass
8 2 7 Pass
1 2 3 Pass
4 4 4 Fail
Args:
column_list (tuple or list): The column names to evaluate
Keyword Args:
ignore_row_if (str): "all_values_are_missing", "any_value_is_missing", "never"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_compound_columns_to_be_unique(
self,
column_list,
ignore_row_if="all_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect that the columns are unique together, e.g. a multi-column primary key
Note that all instances of any duplicates are considered failed
For example::
A B C
1 1 2 Fail
1 2 3 Pass
1 1 2 Fail
2 2 2 Pass
3 2 3 Pass
Args:
column_list (tuple or list): The column names to evaluate
Keyword Args:
ignore_row_if (str): "all_values_are_missing", "any_value_is_missing", "never"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_multicolumn_sum_to_equal(
self,
column_list,
sum_total,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
""" Multi-Column Map Expectation
Expects that sum of all rows for a set of columns is equal to a specific value
Args:
column_list (List[str]): \
Set of columns to be checked
sum_total (int): \
expected sum of columns
"""
raise NotImplementedError
@staticmethod
def _parse_value_set(value_set):
parsed_value_set = [
parse(value) if isinstance(value, str) else value for value in value_set
]
return parsed_value_set
def attempt_allowing_relative_error(self) -> Union[bool, float]:
"""
Subclasses can override this method if the respective data source (e.g., Redshift) supports "approximate" mode.
In certain cases (e.g., for SparkDFDataset), a fraction between 0 and 1 (i.e., not only a boolean) is allowed.
"""
return False
| apache-2.0 |
joeythesaint/yocto-autobuilder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/process/cache.py | 4 | 2983 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.util import lru
from buildbot import config
from twisted.application import service
class CacheManager(config.ReconfigurableServiceMixin, service.Service):
"""
A manager for a collection of caches, each for different types of objects
and with potentially-overlapping key spaces.
There is generally only one instance of this class, available at
C{master.caches}.
"""
# a cache of length one still has many benefits: it collects objects that
# remain referenced elsewhere; it collapses simultaneous misses into one
# miss function; and it will optimize repeated fetches of the same object.
DEFAULT_CACHE_SIZE = 1
def __init__(self):
self.setName('caches')
self.config = {}
self._caches = {}
def get_cache(self, cache_name, miss_fn):
"""
Get an L{AsyncLRUCache} object with the given name. If such an object
does not exist, it will be created. Since the cache is permanent, this
method can be called only once, e.g., in C{startService}, and it value
stored indefinitely.
@param cache_name: name of the cache (usually the name of the type of
object it stores)
@param miss_fn: miss function for the cache; see L{AsyncLRUCache}
constructor.
@returns: L{AsyncLRUCache} instance
"""
try:
return self._caches[cache_name]
except KeyError:
max_size = self.config.get(cache_name, self.DEFAULT_CACHE_SIZE)
assert max_size >= 1
c = self._caches[cache_name] = lru.AsyncLRUCache(miss_fn, max_size)
return c
def reconfigService(self, new_config):
self.config = new_config.caches
for name, cache in self._caches.iteritems():
cache.set_max_size(new_config.caches.get(name,
self.DEFAULT_CACHE_SIZE))
return config.ReconfigurableServiceMixin.reconfigService(self,
new_config)
def get_metrics(self):
return dict([
(n, dict(hits=c.hits, refhits=c.refhits,
misses=c.misses, max_size=c.max_size))
for n, c in self._caches.iteritems()])
| gpl-2.0 |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/pytz/tzinfo.py | 380 | 19368 | '''Base classes and helpers for building zone specific tzinfo classes'''
from datetime import datetime, timedelta, tzinfo
from bisect import bisect_right
try:
set
except NameError:
from sets import Set as set
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
__all__ = []
_timedelta_cache = {}
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
_epoch = datetime.utcfromtimestamp(0)
_datetime_cache = {0: _epoch}
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
_ttinfo_cache = {}
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
_notime = memorized_timedelta(0)
def _to_seconds(td):
'''Convert a timedelta to seconds'''
return td.seconds + td.days * 24 * 60 * 60
class BaseTzInfo(tzinfo):
# Overridden in subclass
_utcoffset = None
_tzname = None
zone = None
def __str__(self):
return self.zone
class StaticTzInfo(BaseTzInfo):
'''A timezone that has a constant offset from UTC
These timezones are rare, as most locations have changed their
offset at some point in their history
'''
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if dt.tzinfo is not None and dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
return (dt + self._utcoffset).replace(tzinfo=self)
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return _notime
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._tzname
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime.
This is normally a no-op, as StaticTzInfo timezones never have
ambiguous cases to correct:
>>> from pytz import timezone
>>> gmt = timezone('GMT')
>>> isinstance(gmt, StaticTzInfo)
True
>>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt)
>>> gmt.normalize(dt) is dt
True
The supported method of converting between timezones is to use
datetime.astimezone(). Currently normalize() also works:
>>> la = timezone('America/Los_Angeles')
>>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> gmt.normalize(dt).strftime(fmt)
'2011-05-07 08:02:03 GMT (+0000)'
'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return '<StaticTzInfo %r>' % (self.zone,)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (self.zone,)
class DstTzInfo(BaseTzInfo):
'''A timezone that has a variable offset from UTC
The offset might change if daylight saving time comes into effect,
or at a point in history when the region decides to change their
timezone definition.
'''
# Overridden in subclass
_utc_transition_times = None # Sorted list of DST transition times in UTC
_transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding
# to _utc_transition_times entries
zone = None
# Set in __init__
_tzinfos = None
_dst = None # DST offset
def __init__(self, _inf=None, _tzinfos=None):
if _inf:
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = _inf
else:
_tzinfos = {}
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = self._transition_info[0]
_tzinfos[self._transition_info[0]] = self
for inf in self._transition_info[1:]:
if inf not in _tzinfos:
_tzinfos[inf] = self.__class__(inf, _tzinfos)
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if (dt.tzinfo is not None
and getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos):
raise ValueError('fromutc: dt.tzinfo is not self')
dt = dt.replace(tzinfo=None)
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
inf = self._transition_info[idx]
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
def normalize(self, dt):
'''Correct the timezone information on the given datetime
If date arithmetic crosses DST boundaries, the tzinfo
is not magically adjusted. This method normalizes the
tzinfo to the correct one.
To test, first we need to do some setup
>>> from pytz import timezone
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
We next create a datetime right on an end-of-DST transition point,
the instant when the wallclocks are wound back one hour.
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
Now, if we subtract a few minutes from it, note that the timezone
information has not changed.
>>> before = loc_dt - timedelta(minutes=10)
>>> before.strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
But we can fix that by calling the normalize method
>>> before = eastern.normalize(before)
>>> before.strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
The supported method of converting between timezones is to use
datetime.astimezone(). Currently, normalize() also works:
>>> th = timezone('Asia/Bangkok')
>>> am = timezone('Europe/Amsterdam')
>>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> am.normalize(dt).strftime(fmt)
'2011-05-06 20:02:03 CEST (+0200)'
'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
# Convert dt in localtime to UTC
offset = dt.tzinfo._utcoffset
dt = dt.replace(tzinfo=None)
dt = dt - offset
# convert it back, and return it
return self.fromutc(dt)
def localize(self, dt, is_dst=False):
'''Convert naive time to local time.
This method should be used to construct localtimes, rather
than passing a tzinfo argument to a datetime constructor.
is_dst is used to determine the correct timezone in the ambigous
period at the end of daylight saving time.
>>> from pytz import timezone
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> amdam = timezone('Europe/Amsterdam')
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
>>> loc_dt1.strftime(fmt)
'2004-10-31 02:00:00 CEST (+0200)'
>>> loc_dt2.strftime(fmt)
'2004-10-31 02:00:00 CET (+0100)'
>>> str(loc_dt2 - loc_dt1)
'1:00:00'
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
times at the end of daylight saving time
>>> try:
... loc_dt1 = amdam.localize(dt, is_dst=None)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
is_dst defaults to False
>>> amdam.localize(dt) == amdam.localize(dt, False)
True
is_dst is also used to determine the correct timezone in the
wallclock times jumped over at the start of daylight saving time.
>>> pacific = timezone('US/Pacific')
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
>>> ploc_dt1.strftime(fmt)
'2008-03-09 02:00:00 PDT (-0700)'
>>> ploc_dt2.strftime(fmt)
'2008-03-09 02:00:00 PST (-0800)'
>>> str(ploc_dt2 - ploc_dt1)
'1:00:00'
Use is_dst=None to raise a NonExistentTimeError for these skipped
times.
>>> try:
... loc_dt1 = pacific.localize(dt, is_dst=None)
... except NonExistentTimeError:
... print('Non-existent')
Non-existent
'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
# Find the two best possibilities.
possible_loc_dt = set()
for delta in [timedelta(days=-1), timedelta(days=1)]:
loc_dt = dt + delta
idx = max(0, bisect_right(
self._utc_transition_times, loc_dt) - 1)
inf = self._transition_info[idx]
tzinfo = self._tzinfos[inf]
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
if loc_dt.replace(tzinfo=None) == dt:
possible_loc_dt.add(loc_dt)
if len(possible_loc_dt) == 1:
return possible_loc_dt.pop()
# If there are no possibly correct timezones, we are attempting
# to convert a time that never happened - the time period jumped
# during the start-of-DST transition period.
if len(possible_loc_dt) == 0:
# If we refuse to guess, raise an exception.
if is_dst is None:
raise NonExistentTimeError(dt)
# If we are forcing the pre-DST side of the DST transition, we
# obtain the correct timezone by winding the clock forward a few
# hours.
elif is_dst:
return self.localize(
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
# If we are forcing the post-DST side of the DST transition, we
# obtain the correct timezone by winding the clock back.
else:
return self.localize(
dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6)
# If we get this far, we have multiple possible timezones - this
# is an ambiguous case occuring during the end-of-DST transition.
# If told to be strict, raise an exception since we have an
# ambiguous case
if is_dst is None:
raise AmbiguousTimeError(dt)
# Filter out the possiblilities that don't match the requested
# is_dst
filtered_possible_loc_dt = [
p for p in possible_loc_dt
if bool(p.tzinfo._dst) == is_dst
]
# Hopefully we only have one possibility left. Return it.
if len(filtered_possible_loc_dt) == 1:
return filtered_possible_loc_dt[0]
if len(filtered_possible_loc_dt) == 0:
filtered_possible_loc_dt = list(possible_loc_dt)
# If we get this far, we have in a wierd timezone transition
# where the clocks have been wound back but is_dst is the same
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
# At this point, we just have to guess unless we allow more
# hints to be passed in (such as the UTC offset or abbreviation),
# but that is just getting silly.
#
# Choose the earliest (by UTC) applicable timezone if is_dst=True
# Choose the latest (by UTC) applicable timezone if is_dst=False
# i.e., behave like end-of-DST transition
dates = {} # utc -> local
for local_dt in filtered_possible_loc_dt:
utc_time = local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset
assert utc_time not in dates
dates[utc_time] = local_dt
return dates[[min, max][not is_dst](dates)]
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname
def __repr__(self):
if self._dst:
dst = 'DST'
else:
dst = 'STD'
if self._utcoffset > _notime:
return '<DstTzInfo %r %s+%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
else:
return '<DstTzInfo %r %s%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
| apache-2.0 |
ezequielpereira/Time-Line | libs64/wx/tools/dbg.py | 5 | 8768 | #----------------------------------------------------------------------------
# Name: dbg.py
# RCS-ID: $Id: dbg.py 39667 2006-06-11 00:13:05Z RD $
# Author: Will Sadkin
# Email: wsadkin@nameconnector.com
# Created: 07/11/2002
# Copyright: (c) 2002 by Will Sadkin, 2002
# License: wxWindows license
#----------------------------------------------------------------------------
# 12/21/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o V2.5 compatability update
#
"""
This module provides a useful debugging framework that supports
showing nesting of function calls and allows a program to contain
lots of debugging print statements that can easily be turned on
or off to debug the code. It also supports the ability to
have each function indent the debugging statements contained
within it, including those of any other function called within
its scope, thus allowing you to see in what order functions are
being called, and from where.
This capability is particularly useful in wxPython applications,
where exactly events occur that cause functions to be called is
not entirely clear, and because wxPython programs can't be run
from inside other debugging environments that have their own
message loops.
This module defines a Logger class, responsible for managing
debugging output. Each Logger instance can be given a name
at construction; if this is done, '<name>:' will precede each
logging output made by that Logger instance.
The log() function this class provides takes a set of positional
arguments that are printed in order if debugging is enabled
(just like print does), followed by a set of keyword arguments
that control the behavior of the log() function itself on subsequent
calls. The current keyword arguments are:
indent
When set to a value of 1, this increments the current
indentation level, causing all subsequent dbg() outputs to be
indented by 3 more spaces. When set to a value of 0,
this process is reversed, causing the indent to decrease by
3 spaces. The default indentation level is 0.
enable
When set to a value of 1, this turns on dbg() output for
for program importing this module, until told to do otherwise.
When set to a value of 0, dbg output is turned off. (dbg
output is off by default.)
suspend
When set to a value of 1, this increments the current
"suspension" level. This makes it possible for a function
to temporarily suspend its and any of its dependents'
potential outputs that use the same Logger instance.
When set to a value of 0, the suspension level is
decremented. When the value goes back to 0, potential
logging is resumed (actual output depends on the
"enable" status of the Logger instance in question.)
wxlog
When set to a value of 1, the output will be sent to the
active wxLog target.
stream
When set to a non-None value, the current output stream
(default of sys.stdout) is pushed onto a stack of streams,
and is replaced in the dbg system with the specified stream.
When called with a value of None, the previous stream will
be restored (if stacked.) If set to None without previously
changing it will result in no action being taken.
You can also call the log function implicitly on the Logger
instance, ie. you can type::
from wxPython.tools.dbg import Logger
dbg = Logger()
dbg('something to print')
Using this fairly simple mechanism, it is possible to get fairly
useful debugging output in a program. Consider the following
code example:
>>> d = {1:'a', 2:'dictionary', 3:'of', 4:'words'}
>>> dbg = dbg.Logger('module')
>>> dbg(enable=1)
module: dbg enabled
>>> def foo(d):
... dbg('foo', indent=1)
... bar(d)
... dbg('end of foo', indent=0)
...
>>> def bar(d):
... dbg('bar', indent=1)
... dbg('contents of d:', indent=1)
... l = d.items()
... l.sort()
... for key, value in l:
... dbg('%d =' % key, value)
... dbg(indent=0)
... dbg('end of bar', indent=0)
...
>>> foo(d)
module: foo
module: bar
module: contents of d:
module: 1 = a
module: 2 = dictionary
module: 3 = of
module: 4 = words
module: end of bar
module: end of foo
>>>
"""
class Logger:
def __init__(self, name=None):
import sys
self.name = name
self._indent = 0 # current number of indentations
self._dbg = 0 # enable/disable flag
self._suspend = 0 # allows code to "suspend/resume" potential dbg output
self._wxLog = 0 # use wxLogMessage for debug output
self._outstream = sys.stdout # default output stream
self._outstream_stack = [] # for restoration of streams as necessary
def IsEnabled():
return self._dbg
def IsSuspended():
return _suspend
def log(self, *args, **kwargs):
"""
This function provides a useful framework for generating
optional debugging output that can be displayed at an
arbitrary level of indentation.
"""
if not self._dbg and not 'enable' in kwargs.keys():
return
if self._dbg and len(args) and not self._suspend:
# (emulate print functionality; handle unicode as best as possible:)
strs = []
for arg in args:
try:
strs.append(str(arg))
except:
strs.append(repr(arg))
output = ' '.join(strs)
if self.name: output = self.name+': ' + output
output = ' ' * 3 * self._indent + output
if self._wxLog:
from wxPython.wx import wxLogMessage # (if not already imported)
wxLogMessage(output)
else:
self._outstream.write(output + '\n')
self._outstream.flush()
# else do nothing
# post process args:
for kwarg, value in kwargs.items():
if kwarg == 'indent':
self.SetIndent(value)
elif kwarg == 'enable':
self.SetEnabled(value)
elif kwarg == 'suspend':
self.SetSuspend(value)
elif kwarg == 'wxlog':
self.SetWxLog(value)
elif kwarg == 'stream':
self.SetStream(value)
# aliases for the log function
dbg = log # backwards compatible
msg = log #
__call__ = log # this one lets you 'call' the instance directly
def SetEnabled(self, value):
if value:
old_dbg = self._dbg
self._dbg = 1
if not old_dbg:
self.dbg('dbg enabled')
else:
if self._dbg:
self.dbg('dbg disabled')
self._dbg = 0
def SetSuspend(self, value):
if value:
self._suspend += 1
elif self._suspend > 0:
self._suspend -= 1
def SetIndent(self, value):
if value:
self._indent += 1
elif self._indent > 0:
self._indent -= 1
def SetWxLog(self, value):
self._wxLog = value
def SetStream(self, value):
if value:
self._outstream_stack.append( self._outstream )
self._outstream = value
elif value is None and len(self._outstream_stack) > 0:
self._outstream = self._outstream_stack.pop(-1)
#------------------------------------------------------------
if __name__ == "__main__":
import sys
import wx
wx.Log_SetActiveTarget( wx.LogStderr() )
logger = Logger('module')
dbg = logger.dbg
dbg(enable=1)
logger('test __call__ interface')
dbg('testing wxLog output to stderr:', wxlog=1, indent=1)
dbg('1,2,3...')
dbg('testing wx.LogNull:')
devnull = wx.LogNull()
dbg('4,5,6...') # shouldn't print, according to doc...
del devnull
dbg('(resuming to wx.LogStdErr)', '7,8,9...', indent=0)
dbg('disabling wx.Log output, switching to stderr:')
dbg(wxlog=0, stream=sys.stderr)
dbg(logger._outstream, 'switching back to stdout:')
dbg(stream=None)
dbg(logger._outstream )
def foo(str):
dbg('foo:', indent=1)
dbg(str, indent=0)
foo('testing dbg inside function')
class bar(Logger):
def __init__(self, name):
Logger.__init__(self, name)
def enable(self, value):
self.dbg(enable=value)
def foo(self, str):
self.dbg('foo:', indent=1)
self.dbg(str, indent=0)
f = bar('class mixin')
f.foo("shouldn't print")
f.enable(1)
f.foo("should print")
dbg('test completed.', enable=0)
dbg('(double-checking ;-)')
| gpl-3.0 |
hightower8083/chimeraCL | examples/lpa_script_small.py | 1 | 2948 | import sys
from time import time
from copy import deepcopy
import numpy as np
from chimeraCL.methods.generic_methods_cl import Communicator
from chimeraCL.particles import Particles
from chimeraCL.solver import Solver
from chimeraCL.frame import Frame
from chimeraCL.laser import add_gausian_pulse
from chimeraCL.diagnostics import Diagnostics
from chimeraCL.pic_loop import PIC_loop
########################################
############ USER-END SETUP ############
### NB: mind numbers dtypes ############
########################################
# Simulation steps
Nsteps = 8000
# Diagnostics
diag_in = {'Interval': 1000,
'ScalarFields': ['rho', 'Ez'], }
# Grid
xmin, xmax = -43., 43.
rmin, rmax = 0., 36.
Nx, Nr, M = 900, 90, 1
# Laser
a0 = 3
Lx, w0 = 10., 12.
x0, x_foc = 0., 100.
# Plasma
dens = 7e18 / (1.1e21/0.8**2)
Npx, Npr, Npth = 2, 2, 4
# Frame (maganes plasma injection at right boundary)
frame_velocity = 1.
frameSteps = 20
dens_profiles = [{'coord': 'x',
'points': [-100, 43.1, 90, 5e5],
'values': [ 0, 0, 1, 1]}, ]
####################################################################
### SIMULATION CONSTRUCTOR (don't touch without asking me first) ###
####################################################################
comm = Communicator()
grid_in = {'Xmin': xmin, 'Xmax': xmax, 'Nx': Nx,
'Rmin': rmin, 'Rmax': rmax, 'Nr': Nr,
'M': M, 'DampCells': 50
}
laser_in = {
'k0': 1., 'a0': a0, 'x0': x0,
'Lx': Lx, 'R': w0, 'x_foc': x_foc}
grid_in['dt'] = (grid_in['Xmax']-grid_in['Xmin']) / grid_in['Nx']
solver = Solver(grid_in, comm)
add_gausian_pulse(solver, laser=laser_in)
eons_in = {'Nppc': (Npx, Npr, Npth),
'dx': solver.Args['dx'],
'dr': solver.Args['dr'],
'dt': solver.Args['dt'],
'dens': dens,
'charge': -1,
}
ions_in = deepcopy(eons_in)
ions_in['charge'] = 1
ions_in['Immobile'] = True
eons = Particles(eons_in, comm)
ions = Particles(ions_in, comm)
ions.Args['InjectorSource'] = eons
frame_in = {'Velocity': frame_velocity,
'dt': solver.Args['dt'],
'Steps': frameSteps,
'DensityProfiles': dens_profiles
}
frame = Frame(frame_in)
diag = Diagnostics(solver=solver, species=[eons, ], frame=frame,
configs_in = diag_in)
loop = PIC_loop(solvers=[solver, ], species=[eons, ions],
frames=[frame, ], diags = [diag, ])
########################################
######### RUN THE SIMULATION ###########
########################################
t0 = time()
while loop.it<Nsteps+1:
loop.step()
if np.mod(loop.it, 10) == 0:
sys.stdout.write("\rstep {:d} of {:d}".format(loop.it, Nsteps))
sys.stdout.flush()
comm.queue.finish()
t0 = time() - t0
print("\nTotal time is {:g} mins \nMean step time is {:g} ms ".\
format(t0/60., t0/Nsteps*1e3) )
| gpl-3.0 |
alexandrucoman/vbox-neutron-agent | neutron/tests/unit/plugins/embrane/test_embrane_neutron_plugin.py | 40 | 2774 | # Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_config import cfg
from neutron.plugins.embrane.common import config # noqa
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
PLUGIN_NAME = ('neutron.plugins.embrane.plugins.embrane_fake_plugin.'
'EmbraneFakePlugin')
class EmbranePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = PLUGIN_NAME
def setUp(self):
cfg.CONF.set_override('admin_password', "admin123", 'heleos')
p = mock.patch.dict(sys.modules, {'heleosapi': mock.Mock()})
p.start()
# dict patches must be explicitly stopped
self.addCleanup(p.stop)
super(EmbranePluginV2TestCase, self).setUp(self._plugin_name)
class TestEmbraneBasicGet(test_plugin.TestBasicGet, EmbranePluginV2TestCase):
pass
class TestEmbraneV2HTTPResponse(test_plugin.TestV2HTTPResponse,
EmbranePluginV2TestCase):
pass
class TestEmbranePortsV2(test_plugin.TestPortsV2, EmbranePluginV2TestCase):
def test_create_ports_bulk_emulated_plugin_failure(self):
self.skip("Temporary skipping due to incompatibility with the"
" plugin dynamic class type")
def test_recycle_expired_previously_run_within_context(self):
self.skip("Temporary skipping due to incompatibility with the"
" plugin dynamic class type")
def test_recycle_held_ip_address(self):
self.skip("Temporary skipping due to incompatibility with the"
" plugin dynamic class type")
class TestEmbraneNetworksV2(test_plugin.TestNetworksV2,
EmbranePluginV2TestCase):
def test_create_networks_bulk_emulated_plugin_failure(self):
self.skip("Temporary skipping due to incompatibility with the"
" plugin dynamic class type")
class TestEmbraneSubnetsV2(test_plugin.TestSubnetsV2,
EmbranePluginV2TestCase):
def test_create_subnets_bulk_emulated_plugin_failure(self):
self.skip("Temporary skipping due to incompatibility with the"
" plugin dynamic class type")
| apache-2.0 |
Ayub-Khan/edx-platform | common/djangoapps/student/management/tests/test_transfer_students.py | 122 | 6240 | """
Tests the transfer student management command
"""
from django.conf import settings
from mock import patch, call
from opaque_keys.edx import locator
import unittest
import ddt
from shoppingcart.models import Order, CertificateItem # pylint: disable=import-error
from course_modes.models import CourseMode
from student.management.commands import transfer_students
from student.models import CourseEnrollment, UNENROLL_DONE, EVENT_NAME_ENROLLMENT_DEACTIVATED, \
EVENT_NAME_ENROLLMENT_ACTIVATED, EVENT_NAME_ENROLLMENT_MODE_CHANGED
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.ddt
class TestTransferStudents(ModuleStoreTestCase):
"""Tests for transferring students between courses."""
PASSWORD = 'test'
signal_fired = False
def setUp(self, **kwargs):
"""Connect a stub receiver, and analytics event tracking."""
super(TestTransferStudents, self).setUp()
UNENROLL_DONE.connect(self.assert_unenroll_signal)
patcher = patch('student.models.tracker')
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
self.addCleanup(UNENROLL_DONE.disconnect, self.assert_unenroll_signal)
def assert_unenroll_signal(self, skip_refund=False, **kwargs): # pylint: disable=unused-argument
""" Signal Receiver stub for testing that the unenroll signal was fired. """
self.assertFalse(self.signal_fired)
self.assertTrue(skip_refund)
self.signal_fired = True
def test_transfer_students(self):
""" Verify the transfer student command works as intended. """
student = UserFactory.create()
student.set_password(self.PASSWORD)
student.save()
mode = 'verified'
# Original Course
original_course_location = locator.CourseLocator('Org0', 'Course0', 'Run0')
course = self._create_course(original_course_location)
# Enroll the student in 'verified'
CourseEnrollment.enroll(student, course.id, mode="verified")
# Create and purchase a verified cert for the original course.
self._create_and_purchase_verified(student, course.id)
# New Course 1
course_location_one = locator.CourseLocator('Org1', 'Course1', 'Run1')
new_course_one = self._create_course(course_location_one)
# New Course 2
course_location_two = locator.CourseLocator('Org2', 'Course2', 'Run2')
new_course_two = self._create_course(course_location_two)
original_key = unicode(course.id)
new_key_one = unicode(new_course_one.id)
new_key_two = unicode(new_course_two.id)
# Run the actual management command
transfer_students.Command().handle(
source_course=original_key, dest_course_list=new_key_one + "," + new_key_two
)
self.assertTrue(self.signal_fired)
# Confirm the analytics event was emitted.
self.mock_tracker.emit.assert_has_calls( # pylint: disable=maybe-no-member
[
call(
EVENT_NAME_ENROLLMENT_ACTIVATED,
{'course_id': original_key, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_MODE_CHANGED,
{'course_id': original_key, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_DEACTIVATED,
{'course_id': original_key, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_ACTIVATED,
{'course_id': new_key_one, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_MODE_CHANGED,
{'course_id': new_key_one, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_ACTIVATED,
{'course_id': new_key_two, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_MODE_CHANGED,
{'course_id': new_key_two, 'user_id': student.id, 'mode': mode}
)
]
)
self.mock_tracker.reset_mock()
# Confirm the enrollment mode is verified on the new courses, and enrollment is enabled as appropriate.
self.assertEquals((mode, False), CourseEnrollment.enrollment_mode_for_user(student, course.id))
self.assertEquals((mode, True), CourseEnrollment.enrollment_mode_for_user(student, new_course_one.id))
self.assertEquals((mode, True), CourseEnrollment.enrollment_mode_for_user(student, new_course_two.id))
# Confirm the student has not be refunded.
target_certs = CertificateItem.objects.filter(
course_id=course.id, user_id=student, status='purchased', mode=mode
)
self.assertTrue(target_certs[0])
self.assertFalse(target_certs[0].refund_requested_time)
self.assertEquals(target_certs[0].order.status, 'purchased')
def _create_course(self, course_location):
""" Creates a course """
return CourseFactory.create(
org=course_location.org,
number=course_location.course,
run=course_location.run
)
def _create_and_purchase_verified(self, student, course_id):
""" Creates a verified mode for the course and purchases it for the student. """
course_mode = CourseMode(course_id=course_id,
mode_slug="verified",
mode_display_name="verified cert",
min_price=50)
course_mode.save()
# When there is no expiration date on a verified mode, the user can always get a refund
cart = Order.get_cart_for_user(user=student)
CertificateItem.add_to_order(cart, course_id, 50, 'verified')
cart.purchase()
| agpl-3.0 |
tseaver/google-cloud-python | automl/google/cloud/automl_v1beta1/proto/service_pb2.py | 2 | 109317 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/automl_v1beta1/proto/service.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.automl_v1beta1.proto import (
annotation_payload_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__payload__pb2,
)
from google.cloud.automl_v1beta1.proto import (
annotation_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2,
)
from google.cloud.automl_v1beta1.proto import (
column_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2,
)
from google.cloud.automl_v1beta1.proto import (
dataset_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2,
)
from google.cloud.automl_v1beta1.proto import (
image_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2,
)
from google.cloud.automl_v1beta1.proto import (
io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2,
)
from google.cloud.automl_v1beta1.proto import (
model_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2,
)
from google.cloud.automl_v1beta1.proto import (
model_evaluation_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2,
)
from google.cloud.automl_v1beta1.proto import (
operations_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_operations__pb2,
)
from google.cloud.automl_v1beta1.proto import (
table_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/automl_v1beta1/proto/service.proto",
package="google.cloud.automl.v1beta1",
syntax="proto3",
serialized_options=_b(
"\n\037com.google.cloud.automl.v1beta1B\013AutoMlProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1"
),
serialized_pb=_b(
'\n/google/cloud/automl_v1beta1/proto/service.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a/google/cloud/automl_v1beta1/proto/dataset.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a-google/cloud/automl_v1beta1/proto/model.proto\x1a\x38google/cloud/automl_v1beta1/proto/model_evaluation.proto\x1a\x32google/cloud/automl_v1beta1/proto/operations.proto\x1a\x32google/cloud/automl_v1beta1/proto/table_spec.proto\x1a#google/longrunning/operations.proto\x1a google/protobuf/field_mask.proto\x1a\x17google/api/client.proto"]\n\x14\x43reateDatasetRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x35\n\x07\x64\x61taset\x18\x02 \x01(\x0b\x32$.google.cloud.automl.v1beta1.Dataset"!\n\x11GetDatasetRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\\\n\x13ListDatasetsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"g\n\x14ListDatasetsResponse\x12\x36\n\x08\x64\x61tasets\x18\x01 \x03(\x0b\x32$.google.cloud.automl.v1beta1.Dataset\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"~\n\x14UpdateDatasetRequest\x12\x35\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32$.google.cloud.automl.v1beta1.Dataset\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"$\n\x14\x44\x65leteDatasetRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"a\n\x11ImportDataRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12>\n\x0cinput_config\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfig"c\n\x11\x45xportDataRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\routput_config\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.OutputConfig"(\n\x18GetAnnotationSpecRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"S\n\x13GetTableSpecRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x8e\x01\n\x15ListTableSpecsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"n\n\x16ListTableSpecsResponse\x12;\n\x0btable_specs\x18\x01 \x03(\x0b\x32&.google.cloud.automl.v1beta1.TableSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x85\x01\n\x16UpdateTableSpecRequest\x12:\n\ntable_spec\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.TableSpec\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"T\n\x14GetColumnSpecRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x8f\x01\n\x16ListColumnSpecsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"q\n\x17ListColumnSpecsResponse\x12=\n\x0c\x63olumn_specs\x18\x01 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x88\x01\n\x17UpdateColumnSpecRequest\x12<\n\x0b\x63olumn_spec\x18\x01 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"W\n\x12\x43reateModelRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x31\n\x05model\x18\x04 \x01(\x0b\x32".google.cloud.automl.v1beta1.Model"\x1f\n\x0fGetModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"Z\n\x11ListModelsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"`\n\x12ListModelsResponse\x12\x31\n\x05model\x18\x01 \x03(\x0b\x32".google.cloud.automl.v1beta1.Model\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t""\n\x12\x44\x65leteModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xca\x02\n\x12\x44\x65ployModelRequest\x12\x84\x01\n0image_object_detection_model_deployment_metadata\x18\x02 \x01(\x0b\x32H.google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadataH\x00\x12\x81\x01\n.image_classification_model_deployment_metadata\x18\x04 \x01(\x0b\x32G.google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\tB\x1b\n\x19model_deployment_metadata"$\n\x14UndeployModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"o\n\x12\x45xportModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12K\n\routput_config\x18\x03 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.ModelExportOutputConfig"\x87\x01\n\x1e\x45xportEvaluatedExamplesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12W\n\routput_config\x18\x03 \x01(\x0b\x32@.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig")\n\x19GetModelEvaluationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"d\n\x1bListModelEvaluationsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"\x7f\n\x1cListModelEvaluationsResponse\x12\x46\n\x10model_evaluation\x18\x01 \x03(\x0b\x32,.google.cloud.automl.v1beta1.ModelEvaluation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xd9"\n\x06\x41utoMl\x12\xac\x01\n\rCreateDataset\x12\x31.google.cloud.automl.v1beta1.CreateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"B\x82\xd3\xe4\x93\x02<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\x07\x64\x61taset\x12\x9d\x01\n\nGetDataset\x12..google.cloud.automl.v1beta1.GetDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{name=projects/*/locations/*/datasets/*}\x12\xae\x01\n\x0cListDatasets\x12\x30.google.cloud.automl.v1beta1.ListDatasetsRequest\x1a\x31.google.cloud.automl.v1beta1.ListDatasetsResponse"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{parent=projects/*/locations/*}/datasets\x12\xb4\x01\n\rUpdateDataset\x12\x31.google.cloud.automl.v1beta1.UpdateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"J\x82\xd3\xe4\x93\x02\x44\x32\x39/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\x07\x64\x61taset\x12\x9c\x01\n\rDeleteDataset\x12\x31.google.cloud.automl.v1beta1.DeleteDatasetRequest\x1a\x1d.google.longrunning.Operation"9\x82\xd3\xe4\x93\x02\x33*1/v1beta1/{name=projects/*/locations/*/datasets/*}\x12\xa4\x01\n\nImportData\x12..google.cloud.automl.v1beta1.ImportDataRequest\x1a\x1d.google.longrunning.Operation"G\x82\xd3\xe4\x93\x02\x41"</v1beta1/{name=projects/*/locations/*/datasets/*}:importData:\x01*\x12\xa4\x01\n\nExportData\x12..google.cloud.automl.v1beta1.ExportDataRequest\x1a\x1d.google.longrunning.Operation"G\x82\xd3\xe4\x93\x02\x41"</v1beta1/{name=projects/*/locations/*/datasets/*}:exportData:\x01*\x12\xc4\x01\n\x11GetAnnotationSpec\x12\x35.google.cloud.automl.v1beta1.GetAnnotationSpecRequest\x1a+.google.cloud.automl.v1beta1.AnnotationSpec"K\x82\xd3\xe4\x93\x02\x45\x12\x43/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\x12\xb0\x01\n\x0cGetTableSpec\x12\x30.google.cloud.automl.v1beta1.GetTableSpecRequest\x1a&.google.cloud.automl.v1beta1.TableSpec"F\x82\xd3\xe4\x93\x02@\x12>/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}\x12\xc1\x01\n\x0eListTableSpecs\x12\x32.google.cloud.automl.v1beta1.ListTableSpecsRequest\x1a\x33.google.cloud.automl.v1beta1.ListTableSpecsResponse"F\x82\xd3\xe4\x93\x02@\x12>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs\x12\xcd\x01\n\x0fUpdateTableSpec\x12\x33.google.cloud.automl.v1beta1.UpdateTableSpecRequest\x1a&.google.cloud.automl.v1beta1.TableSpec"]\x82\xd3\xe4\x93\x02W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec\x12\xc1\x01\n\rGetColumnSpec\x12\x31.google.cloud.automl.v1beta1.GetColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"T\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}\x12\xd2\x01\n\x0fListColumnSpecs\x12\x33.google.cloud.automl.v1beta1.ListColumnSpecsRequest\x1a\x34.google.cloud.automl.v1beta1.ListColumnSpecsResponse"T\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs\x12\xe0\x01\n\x10UpdateColumnSpec\x12\x34.google.cloud.automl.v1beta1.UpdateColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"m\x82\xd3\xe4\x93\x02g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\x0b\x63olumn_spec\x12\x9d\x01\n\x0b\x43reateModel\x12/.google.cloud.automl.v1beta1.CreateModelRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"//v1beta1/{parent=projects/*/locations/*}/models:\x05model\x12\x95\x01\n\x08GetModel\x12,.google.cloud.automl.v1beta1.GetModelRequest\x1a".google.cloud.automl.v1beta1.Model"7\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{name=projects/*/locations/*/models/*}\x12\xa6\x01\n\nListModels\x12..google.cloud.automl.v1beta1.ListModelsRequest\x1a/.google.cloud.automl.v1beta1.ListModelsResponse"7\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{parent=projects/*/locations/*}/models\x12\x96\x01\n\x0b\x44\x65leteModel\x12/.google.cloud.automl.v1beta1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"7\x82\xd3\xe4\x93\x02\x31*//v1beta1/{name=projects/*/locations/*/models/*}\x12\xa0\x01\n\x0b\x44\x65ployModel\x12/.google.cloud.automl.v1beta1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"A\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\x01*\x12\xa6\x01\n\rUndeployModel\x12\x31.google.cloud.automl.v1beta1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation"C\x82\xd3\xe4\x93\x02="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\x12\xa0\x01\n\x0b\x45xportModel\x12/.google.cloud.automl.v1beta1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"A\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\x01*\x12\xc9\x01\n\x17\x45xportEvaluatedExamples\x12;.google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest\x1a\x1d.google.longrunning.Operation"R\x82\xd3\xe4\x93\x02L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\x01*\x12\xc6\x01\n\x12GetModelEvaluation\x12\x36.google.cloud.automl.v1beta1.GetModelEvaluationRequest\x1a,.google.cloud.automl.v1beta1.ModelEvaluation"J\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\x12\xd7\x01\n\x14ListModelEvaluations\x12\x38.google.cloud.automl.v1beta1.ListModelEvaluationsRequest\x1a\x39.google.cloud.automl.v1beta1.ListModelEvaluationsResponse"J\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb2\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x0b\x41utoMlProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__payload__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_operations__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2.DESCRIPTOR,
google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,
google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,
google_dot_api_dot_client__pb2.DESCRIPTOR,
],
)
_CREATEDATASETREQUEST = _descriptor.Descriptor(
name="CreateDatasetRequest",
full_name="google.cloud.automl.v1beta1.CreateDatasetRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.automl.v1beta1.CreateDatasetRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dataset",
full_name="google.cloud.automl.v1beta1.CreateDatasetRequest.dataset",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=725,
serialized_end=818,
)
_GETDATASETREQUEST = _descriptor.Descriptor(
name="GetDatasetRequest",
full_name="google.cloud.automl.v1beta1.GetDatasetRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.GetDatasetRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=820,
serialized_end=853,
)
_LISTDATASETSREQUEST = _descriptor.Descriptor(
name="ListDatasetsRequest",
full_name="google.cloud.automl.v1beta1.ListDatasetsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.automl.v1beta1.ListDatasetsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.automl.v1beta1.ListDatasetsRequest.filter",
index=1,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.automl.v1beta1.ListDatasetsRequest.page_size",
index=2,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.automl.v1beta1.ListDatasetsRequest.page_token",
index=3,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=855,
serialized_end=947,
)
_LISTDATASETSRESPONSE = _descriptor.Descriptor(
name="ListDatasetsResponse",
full_name="google.cloud.automl.v1beta1.ListDatasetsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="datasets",
full_name="google.cloud.automl.v1beta1.ListDatasetsResponse.datasets",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.automl.v1beta1.ListDatasetsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=949,
serialized_end=1052,
)
_UPDATEDATASETREQUEST = _descriptor.Descriptor(
name="UpdateDatasetRequest",
full_name="google.cloud.automl.v1beta1.UpdateDatasetRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="dataset",
full_name="google.cloud.automl.v1beta1.UpdateDatasetRequest.dataset",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.automl.v1beta1.UpdateDatasetRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1054,
serialized_end=1180,
)
_DELETEDATASETREQUEST = _descriptor.Descriptor(
name="DeleteDatasetRequest",
full_name="google.cloud.automl.v1beta1.DeleteDatasetRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.DeleteDatasetRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1182,
serialized_end=1218,
)
_IMPORTDATAREQUEST = _descriptor.Descriptor(
name="ImportDataRequest",
full_name="google.cloud.automl.v1beta1.ImportDataRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.ImportDataRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="input_config",
full_name="google.cloud.automl.v1beta1.ImportDataRequest.input_config",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1220,
serialized_end=1317,
)
_EXPORTDATAREQUEST = _descriptor.Descriptor(
name="ExportDataRequest",
full_name="google.cloud.automl.v1beta1.ExportDataRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.ExportDataRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="output_config",
full_name="google.cloud.automl.v1beta1.ExportDataRequest.output_config",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1319,
serialized_end=1418,
)
_GETANNOTATIONSPECREQUEST = _descriptor.Descriptor(
name="GetAnnotationSpecRequest",
full_name="google.cloud.automl.v1beta1.GetAnnotationSpecRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.GetAnnotationSpecRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1420,
serialized_end=1460,
)
_GETTABLESPECREQUEST = _descriptor.Descriptor(
name="GetTableSpecRequest",
full_name="google.cloud.automl.v1beta1.GetTableSpecRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.GetTableSpecRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="field_mask",
full_name="google.cloud.automl.v1beta1.GetTableSpecRequest.field_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1462,
serialized_end=1545,
)
_LISTTABLESPECSREQUEST = _descriptor.Descriptor(
name="ListTableSpecsRequest",
full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="field_mask",
full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest.field_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest.filter",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest.page_size",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest.page_token",
index=4,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1548,
serialized_end=1690,
)
_LISTTABLESPECSRESPONSE = _descriptor.Descriptor(
name="ListTableSpecsResponse",
full_name="google.cloud.automl.v1beta1.ListTableSpecsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_specs",
full_name="google.cloud.automl.v1beta1.ListTableSpecsResponse.table_specs",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.automl.v1beta1.ListTableSpecsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1692,
serialized_end=1802,
)
_UPDATETABLESPECREQUEST = _descriptor.Descriptor(
name="UpdateTableSpecRequest",
full_name="google.cloud.automl.v1beta1.UpdateTableSpecRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_spec",
full_name="google.cloud.automl.v1beta1.UpdateTableSpecRequest.table_spec",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.automl.v1beta1.UpdateTableSpecRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1805,
serialized_end=1938,
)
_GETCOLUMNSPECREQUEST = _descriptor.Descriptor(
name="GetColumnSpecRequest",
full_name="google.cloud.automl.v1beta1.GetColumnSpecRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.GetColumnSpecRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="field_mask",
full_name="google.cloud.automl.v1beta1.GetColumnSpecRequest.field_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1940,
serialized_end=2024,
)
_LISTCOLUMNSPECSREQUEST = _descriptor.Descriptor(
name="ListColumnSpecsRequest",
full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="field_mask",
full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest.field_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest.filter",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest.page_size",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest.page_token",
index=4,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2027,
serialized_end=2170,
)
_LISTCOLUMNSPECSRESPONSE = _descriptor.Descriptor(
name="ListColumnSpecsResponse",
full_name="google.cloud.automl.v1beta1.ListColumnSpecsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="column_specs",
full_name="google.cloud.automl.v1beta1.ListColumnSpecsResponse.column_specs",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.automl.v1beta1.ListColumnSpecsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2172,
serialized_end=2285,
)
_UPDATECOLUMNSPECREQUEST = _descriptor.Descriptor(
name="UpdateColumnSpecRequest",
full_name="google.cloud.automl.v1beta1.UpdateColumnSpecRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="column_spec",
full_name="google.cloud.automl.v1beta1.UpdateColumnSpecRequest.column_spec",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.automl.v1beta1.UpdateColumnSpecRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2288,
serialized_end=2424,
)
_CREATEMODELREQUEST = _descriptor.Descriptor(
name="CreateModelRequest",
full_name="google.cloud.automl.v1beta1.CreateModelRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.automl.v1beta1.CreateModelRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="model",
full_name="google.cloud.automl.v1beta1.CreateModelRequest.model",
index=1,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2426,
serialized_end=2513,
)
_GETMODELREQUEST = _descriptor.Descriptor(
name="GetModelRequest",
full_name="google.cloud.automl.v1beta1.GetModelRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.GetModelRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2515,
serialized_end=2546,
)
_LISTMODELSREQUEST = _descriptor.Descriptor(
name="ListModelsRequest",
full_name="google.cloud.automl.v1beta1.ListModelsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.automl.v1beta1.ListModelsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.automl.v1beta1.ListModelsRequest.filter",
index=1,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.automl.v1beta1.ListModelsRequest.page_size",
index=2,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.automl.v1beta1.ListModelsRequest.page_token",
index=3,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2548,
serialized_end=2638,
)
_LISTMODELSRESPONSE = _descriptor.Descriptor(
name="ListModelsResponse",
full_name="google.cloud.automl.v1beta1.ListModelsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="model",
full_name="google.cloud.automl.v1beta1.ListModelsResponse.model",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.automl.v1beta1.ListModelsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2640,
serialized_end=2736,
)
_DELETEMODELREQUEST = _descriptor.Descriptor(
name="DeleteModelRequest",
full_name="google.cloud.automl.v1beta1.DeleteModelRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.DeleteModelRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2738,
serialized_end=2772,
)
_DEPLOYMODELREQUEST = _descriptor.Descriptor(
name="DeployModelRequest",
full_name="google.cloud.automl.v1beta1.DeployModelRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="image_object_detection_model_deployment_metadata",
full_name="google.cloud.automl.v1beta1.DeployModelRequest.image_object_detection_model_deployment_metadata",
index=0,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="image_classification_model_deployment_metadata",
full_name="google.cloud.automl.v1beta1.DeployModelRequest.image_classification_model_deployment_metadata",
index=1,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.DeployModelRequest.name",
index=2,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="model_deployment_metadata",
full_name="google.cloud.automl.v1beta1.DeployModelRequest.model_deployment_metadata",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=2775,
serialized_end=3105,
)
_UNDEPLOYMODELREQUEST = _descriptor.Descriptor(
name="UndeployModelRequest",
full_name="google.cloud.automl.v1beta1.UndeployModelRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.UndeployModelRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3107,
serialized_end=3143,
)
_EXPORTMODELREQUEST = _descriptor.Descriptor(
name="ExportModelRequest",
full_name="google.cloud.automl.v1beta1.ExportModelRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.ExportModelRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="output_config",
full_name="google.cloud.automl.v1beta1.ExportModelRequest.output_config",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3145,
serialized_end=3256,
)
_EXPORTEVALUATEDEXAMPLESREQUEST = _descriptor.Descriptor(
name="ExportEvaluatedExamplesRequest",
full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="output_config",
full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest.output_config",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3259,
serialized_end=3394,
)
_GETMODELEVALUATIONREQUEST = _descriptor.Descriptor(
name="GetModelEvaluationRequest",
full_name="google.cloud.automl.v1beta1.GetModelEvaluationRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.automl.v1beta1.GetModelEvaluationRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3396,
serialized_end=3437,
)
_LISTMODELEVALUATIONSREQUEST = _descriptor.Descriptor(
name="ListModelEvaluationsRequest",
full_name="google.cloud.automl.v1beta1.ListModelEvaluationsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.automl.v1beta1.ListModelEvaluationsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.automl.v1beta1.ListModelEvaluationsRequest.filter",
index=1,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.automl.v1beta1.ListModelEvaluationsRequest.page_size",
index=2,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.automl.v1beta1.ListModelEvaluationsRequest.page_token",
index=3,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3439,
serialized_end=3539,
)
_LISTMODELEVALUATIONSRESPONSE = _descriptor.Descriptor(
name="ListModelEvaluationsResponse",
full_name="google.cloud.automl.v1beta1.ListModelEvaluationsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="model_evaluation",
full_name="google.cloud.automl.v1beta1.ListModelEvaluationsResponse.model_evaluation",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.automl.v1beta1.ListModelEvaluationsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3541,
serialized_end=3668,
)
_CREATEDATASETREQUEST.fields_by_name[
"dataset"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET
)
_LISTDATASETSRESPONSE.fields_by_name[
"datasets"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET
)
_UPDATEDATASETREQUEST.fields_by_name[
"dataset"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET
)
_UPDATEDATASETREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_IMPORTDATAREQUEST.fields_by_name[
"input_config"
].message_type = google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._INPUTCONFIG
_EXPORTDATAREQUEST.fields_by_name[
"output_config"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._OUTPUTCONFIG
)
_GETTABLESPECREQUEST.fields_by_name[
"field_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTTABLESPECSREQUEST.fields_by_name[
"field_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTTABLESPECSRESPONSE.fields_by_name[
"table_specs"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2._TABLESPEC
)
_UPDATETABLESPECREQUEST.fields_by_name[
"table_spec"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2._TABLESPEC
)
_UPDATETABLESPECREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_GETCOLUMNSPECREQUEST.fields_by_name[
"field_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTCOLUMNSPECSREQUEST.fields_by_name[
"field_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTCOLUMNSPECSRESPONSE.fields_by_name[
"column_specs"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC
)
_UPDATECOLUMNSPECREQUEST.fields_by_name[
"column_spec"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC
)
_UPDATECOLUMNSPECREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_CREATEMODELREQUEST.fields_by_name[
"model"
].message_type = google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2._MODEL
_LISTMODELSRESPONSE.fields_by_name[
"model"
].message_type = google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2._MODEL
_DEPLOYMODELREQUEST.fields_by_name[
"image_object_detection_model_deployment_metadata"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2._IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA
)
_DEPLOYMODELREQUEST.fields_by_name[
"image_classification_model_deployment_metadata"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2._IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA
)
_DEPLOYMODELREQUEST.oneofs_by_name["model_deployment_metadata"].fields.append(
_DEPLOYMODELREQUEST.fields_by_name[
"image_object_detection_model_deployment_metadata"
]
)
_DEPLOYMODELREQUEST.fields_by_name[
"image_object_detection_model_deployment_metadata"
].containing_oneof = _DEPLOYMODELREQUEST.oneofs_by_name["model_deployment_metadata"]
_DEPLOYMODELREQUEST.oneofs_by_name["model_deployment_metadata"].fields.append(
_DEPLOYMODELREQUEST.fields_by_name["image_classification_model_deployment_metadata"]
)
_DEPLOYMODELREQUEST.fields_by_name[
"image_classification_model_deployment_metadata"
].containing_oneof = _DEPLOYMODELREQUEST.oneofs_by_name["model_deployment_metadata"]
_EXPORTMODELREQUEST.fields_by_name[
"output_config"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._MODELEXPORTOUTPUTCONFIG
)
_EXPORTEVALUATEDEXAMPLESREQUEST.fields_by_name[
"output_config"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG
)
_LISTMODELEVALUATIONSRESPONSE.fields_by_name[
"model_evaluation"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION
)
DESCRIPTOR.message_types_by_name["CreateDatasetRequest"] = _CREATEDATASETREQUEST
DESCRIPTOR.message_types_by_name["GetDatasetRequest"] = _GETDATASETREQUEST
DESCRIPTOR.message_types_by_name["ListDatasetsRequest"] = _LISTDATASETSREQUEST
DESCRIPTOR.message_types_by_name["ListDatasetsResponse"] = _LISTDATASETSRESPONSE
DESCRIPTOR.message_types_by_name["UpdateDatasetRequest"] = _UPDATEDATASETREQUEST
DESCRIPTOR.message_types_by_name["DeleteDatasetRequest"] = _DELETEDATASETREQUEST
DESCRIPTOR.message_types_by_name["ImportDataRequest"] = _IMPORTDATAREQUEST
DESCRIPTOR.message_types_by_name["ExportDataRequest"] = _EXPORTDATAREQUEST
DESCRIPTOR.message_types_by_name["GetAnnotationSpecRequest"] = _GETANNOTATIONSPECREQUEST
DESCRIPTOR.message_types_by_name["GetTableSpecRequest"] = _GETTABLESPECREQUEST
DESCRIPTOR.message_types_by_name["ListTableSpecsRequest"] = _LISTTABLESPECSREQUEST
DESCRIPTOR.message_types_by_name["ListTableSpecsResponse"] = _LISTTABLESPECSRESPONSE
DESCRIPTOR.message_types_by_name["UpdateTableSpecRequest"] = _UPDATETABLESPECREQUEST
DESCRIPTOR.message_types_by_name["GetColumnSpecRequest"] = _GETCOLUMNSPECREQUEST
DESCRIPTOR.message_types_by_name["ListColumnSpecsRequest"] = _LISTCOLUMNSPECSREQUEST
DESCRIPTOR.message_types_by_name["ListColumnSpecsResponse"] = _LISTCOLUMNSPECSRESPONSE
DESCRIPTOR.message_types_by_name["UpdateColumnSpecRequest"] = _UPDATECOLUMNSPECREQUEST
DESCRIPTOR.message_types_by_name["CreateModelRequest"] = _CREATEMODELREQUEST
DESCRIPTOR.message_types_by_name["GetModelRequest"] = _GETMODELREQUEST
DESCRIPTOR.message_types_by_name["ListModelsRequest"] = _LISTMODELSREQUEST
DESCRIPTOR.message_types_by_name["ListModelsResponse"] = _LISTMODELSRESPONSE
DESCRIPTOR.message_types_by_name["DeleteModelRequest"] = _DELETEMODELREQUEST
DESCRIPTOR.message_types_by_name["DeployModelRequest"] = _DEPLOYMODELREQUEST
DESCRIPTOR.message_types_by_name["UndeployModelRequest"] = _UNDEPLOYMODELREQUEST
DESCRIPTOR.message_types_by_name["ExportModelRequest"] = _EXPORTMODELREQUEST
DESCRIPTOR.message_types_by_name[
"ExportEvaluatedExamplesRequest"
] = _EXPORTEVALUATEDEXAMPLESREQUEST
DESCRIPTOR.message_types_by_name[
"GetModelEvaluationRequest"
] = _GETMODELEVALUATIONREQUEST
DESCRIPTOR.message_types_by_name[
"ListModelEvaluationsRequest"
] = _LISTMODELEVALUATIONSREQUEST
DESCRIPTOR.message_types_by_name[
"ListModelEvaluationsResponse"
] = _LISTMODELEVALUATIONSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateDatasetRequest = _reflection.GeneratedProtocolMessageType(
"CreateDatasetRequest",
(_message.Message,),
dict(
DESCRIPTOR=_CREATEDATASETREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset].
Attributes:
parent:
The resource name of the project to create the dataset for.
dataset:
The dataset to create.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateDatasetRequest)
),
)
_sym_db.RegisterMessage(CreateDatasetRequest)
GetDatasetRequest = _reflection.GeneratedProtocolMessageType(
"GetDatasetRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETDATASETREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset].
Attributes:
name:
The resource name of the dataset to retrieve.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetDatasetRequest)
),
)
_sym_db.RegisterMessage(GetDatasetRequest)
ListDatasetsRequest = _reflection.GeneratedProtocolMessageType(
"ListDatasetsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTDATASETSREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets].
Attributes:
parent:
The resource name of the project from which to list datasets.
filter:
An expression for filtering the results of the request. -
``dataset_metadata`` - for existence of the case (e.g.
image\_classification\_dataset\_metadata:\*). Some examples of
using the filter are: -
``translation_dataset_metadata:*`` --> The dataset has
translation\_dataset\_metadata.
page_size:
Requested page size. Server may return fewer results than
requested. If unspecified, server will pick a default size.
page_token:
A token identifying a page of results for the server to return
Typically obtained via [ListDatasetsResponse.next\_page\_token
][google.cloud.automl.v1beta1.ListDatasetsResponse.next\_page\
_token] of the previous [AutoMl.ListDatasets][google.cloud.aut
oml.v1beta1.AutoMl.ListDatasets] call.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListDatasetsRequest)
),
)
_sym_db.RegisterMessage(ListDatasetsRequest)
ListDatasetsResponse = _reflection.GeneratedProtocolMessageType(
"ListDatasetsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTDATASETSRESPONSE,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Response message for
[AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets].
Attributes:
datasets:
The datasets read.
next_page_token:
A token to retrieve next page of results. Pass to [ListDataset
sRequest.page\_token][google.cloud.automl.v1beta1.ListDatasets
Request.page\_token] to obtain that page.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListDatasetsResponse)
),
)
_sym_db.RegisterMessage(ListDatasetsResponse)
UpdateDatasetRequest = _reflection.GeneratedProtocolMessageType(
"UpdateDatasetRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATEDATASETREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset]
Attributes:
dataset:
The dataset which replaces the resource on the server.
update_mask:
The update mask applies to the resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateDatasetRequest)
),
)
_sym_db.RegisterMessage(UpdateDatasetRequest)
DeleteDatasetRequest = _reflection.GeneratedProtocolMessageType(
"DeleteDatasetRequest",
(_message.Message,),
dict(
DESCRIPTOR=_DELETEDATASETREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset].
Attributes:
name:
The resource name of the dataset to delete.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteDatasetRequest)
),
)
_sym_db.RegisterMessage(DeleteDatasetRequest)
ImportDataRequest = _reflection.GeneratedProtocolMessageType(
"ImportDataRequest",
(_message.Message,),
dict(
DESCRIPTOR=_IMPORTDATAREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData].
Attributes:
name:
Required. Dataset name. Dataset must already exist. All
imported annotations and examples will be added.
input_config:
Required. The desired input location and its domain specific
semantics, if any.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImportDataRequest)
),
)
_sym_db.RegisterMessage(ImportDataRequest)
ExportDataRequest = _reflection.GeneratedProtocolMessageType(
"ExportDataRequest",
(_message.Message,),
dict(
DESCRIPTOR=_EXPORTDATAREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData].
Attributes:
name:
Required. The resource name of the dataset.
output_config:
Required. The desired output location.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportDataRequest)
),
)
_sym_db.RegisterMessage(ExportDataRequest)
GetAnnotationSpecRequest = _reflection.GeneratedProtocolMessageType(
"GetAnnotationSpecRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETANNOTATIONSPECREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec].
Attributes:
name:
The resource name of the annotation spec to retrieve.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetAnnotationSpecRequest)
),
)
_sym_db.RegisterMessage(GetAnnotationSpecRequest)
GetTableSpecRequest = _reflection.GeneratedProtocolMessageType(
"GetTableSpecRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETTABLESPECREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec].
Attributes:
name:
The resource name of the table spec to retrieve.
field_mask:
Mask specifying which fields to read.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetTableSpecRequest)
),
)
_sym_db.RegisterMessage(GetTableSpecRequest)
ListTableSpecsRequest = _reflection.GeneratedProtocolMessageType(
"ListTableSpecsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTTABLESPECSREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs].
Attributes:
parent:
The resource name of the dataset to list table specs from.
field_mask:
Mask specifying which fields to read.
filter:
Filter expression, see go/filtering.
page_size:
Requested page size. The server can return fewer results than
requested. If unspecified, the server will pick a default
size.
page_token:
A token identifying a page of results for the server to
return. Typically obtained from the [ListTableSpecsResponse.ne
xt\_page\_token][google.cloud.automl.v1beta1.ListTableSpecsRes
ponse.next\_page\_token] field of the previous [AutoMl.ListTab
leSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]
call.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListTableSpecsRequest)
),
)
_sym_db.RegisterMessage(ListTableSpecsRequest)
ListTableSpecsResponse = _reflection.GeneratedProtocolMessageType(
"ListTableSpecsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTTABLESPECSRESPONSE,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Response message for
[AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs].
Attributes:
table_specs:
The table specs read.
next_page_token:
A token to retrieve next page of results. Pass to [ListTableSp
ecsRequest.page\_token][google.cloud.automl.v1beta1.ListTableS
pecsRequest.page\_token] to obtain that page.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListTableSpecsResponse)
),
)
_sym_db.RegisterMessage(ListTableSpecsResponse)
UpdateTableSpecRequest = _reflection.GeneratedProtocolMessageType(
"UpdateTableSpecRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATETABLESPECREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec]
Attributes:
table_spec:
The table spec which replaces the resource on the server.
update_mask:
The update mask applies to the resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateTableSpecRequest)
),
)
_sym_db.RegisterMessage(UpdateTableSpecRequest)
GetColumnSpecRequest = _reflection.GeneratedProtocolMessageType(
"GetColumnSpecRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETCOLUMNSPECREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec].
Attributes:
name:
The resource name of the column spec to retrieve.
field_mask:
Mask specifying which fields to read.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetColumnSpecRequest)
),
)
_sym_db.RegisterMessage(GetColumnSpecRequest)
ListColumnSpecsRequest = _reflection.GeneratedProtocolMessageType(
"ListColumnSpecsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTCOLUMNSPECSREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs].
Attributes:
parent:
The resource name of the table spec to list column specs from.
field_mask:
Mask specifying which fields to read.
filter:
Filter expression, see go/filtering.
page_size:
Requested page size. The server can return fewer results than
requested. If unspecified, the server will pick a default
size.
page_token:
A token identifying a page of results for the server to
return. Typically obtained from the [ListColumnSpecsResponse.n
ext\_page\_token][google.cloud.automl.v1beta1.ListColumnSpecsR
esponse.next\_page\_token] field of the previous [AutoMl.ListC
olumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs
] call.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListColumnSpecsRequest)
),
)
_sym_db.RegisterMessage(ListColumnSpecsRequest)
ListColumnSpecsResponse = _reflection.GeneratedProtocolMessageType(
"ListColumnSpecsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTCOLUMNSPECSRESPONSE,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Response message for
[AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs].
Attributes:
column_specs:
The column specs read.
next_page_token:
A token to retrieve next page of results. Pass to [ListColumnS
pecsRequest.page\_token][google.cloud.automl.v1beta1.ListColum
nSpecsRequest.page\_token] to obtain that page.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListColumnSpecsResponse)
),
)
_sym_db.RegisterMessage(ListColumnSpecsResponse)
UpdateColumnSpecRequest = _reflection.GeneratedProtocolMessageType(
"UpdateColumnSpecRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATECOLUMNSPECREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec]
Attributes:
column_spec:
The column spec which replaces the resource on the server.
update_mask:
The update mask applies to the resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateColumnSpecRequest)
),
)
_sym_db.RegisterMessage(UpdateColumnSpecRequest)
CreateModelRequest = _reflection.GeneratedProtocolMessageType(
"CreateModelRequest",
(_message.Message,),
dict(
DESCRIPTOR=_CREATEMODELREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel].
Attributes:
parent:
Resource name of the parent project where the model is being
created.
model:
The model to create.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateModelRequest)
),
)
_sym_db.RegisterMessage(CreateModelRequest)
GetModelRequest = _reflection.GeneratedProtocolMessageType(
"GetModelRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETMODELREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel].
Attributes:
name:
Resource name of the model.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetModelRequest)
),
)
_sym_db.RegisterMessage(GetModelRequest)
ListModelsRequest = _reflection.GeneratedProtocolMessageType(
"ListModelsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTMODELSREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels].
Attributes:
parent:
Resource name of the project, from which to list the models.
filter:
An expression for filtering the results of the request. -
``model_metadata`` - for existence of the case (e.g.
video\_classification\_model\_metadata:\*). - ``dataset_id``
- for = or !=. Some examples of using the filter are: -
``image_classification_model_metadata:*`` --> The model has
image\_classification\_model\_metadata. - ``dataset_id=5``
--> The model was created from a dataset with ID 5.
page_size:
Requested page size.
page_token:
A token identifying a page of results for the server to return
Typically obtained via [ListModelsResponse.next\_page\_token][
google.cloud.automl.v1beta1.ListModelsResponse.next\_page\_tok
en] of the previous [AutoMl.ListModels][google.cloud.automl.v1
beta1.AutoMl.ListModels] call.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelsRequest)
),
)
_sym_db.RegisterMessage(ListModelsRequest)
ListModelsResponse = _reflection.GeneratedProtocolMessageType(
"ListModelsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTMODELSRESPONSE,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Response message for
[AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels].
Attributes:
model:
List of models in the requested page.
next_page_token:
A token to retrieve next page of results. Pass to [ListModelsR
equest.page\_token][google.cloud.automl.v1beta1.ListModelsRequ
est.page\_token] to obtain that page.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelsResponse)
),
)
_sym_db.RegisterMessage(ListModelsResponse)
DeleteModelRequest = _reflection.GeneratedProtocolMessageType(
"DeleteModelRequest",
(_message.Message,),
dict(
DESCRIPTOR=_DELETEMODELREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel].
Attributes:
name:
Resource name of the model being deleted.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteModelRequest)
),
)
_sym_db.RegisterMessage(DeleteModelRequest)
DeployModelRequest = _reflection.GeneratedProtocolMessageType(
"DeployModelRequest",
(_message.Message,),
dict(
DESCRIPTOR=_DEPLOYMODELREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel].
Attributes:
model_deployment_metadata:
The per-domain specific deployment parameters.
image_object_detection_model_deployment_metadata:
Model deployment metadata specific to Image Object Detection.
image_classification_model_deployment_metadata:
Model deployment metadata specific to Image Classification.
name:
Resource name of the model to deploy.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeployModelRequest)
),
)
_sym_db.RegisterMessage(DeployModelRequest)
UndeployModelRequest = _reflection.GeneratedProtocolMessageType(
"UndeployModelRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UNDEPLOYMODELREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel].
Attributes:
name:
Resource name of the model to undeploy.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UndeployModelRequest)
),
)
_sym_db.RegisterMessage(UndeployModelRequest)
ExportModelRequest = _reflection.GeneratedProtocolMessageType(
"ExportModelRequest",
(_message.Message,),
dict(
DESCRIPTOR=_EXPORTMODELREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel].
Models need to be enabled for exporting, otherwise an error code will be
returned.
Attributes:
name:
Required. The resource name of the model to export.
output_config:
Required. The desired output location and configuration.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportModelRequest)
),
)
_sym_db.RegisterMessage(ExportModelRequest)
ExportEvaluatedExamplesRequest = _reflection.GeneratedProtocolMessageType(
"ExportEvaluatedExamplesRequest",
(_message.Message,),
dict(
DESCRIPTOR=_EXPORTEVALUATEDEXAMPLESREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples].
Attributes:
name:
Required. The resource name of the model whose evaluated
examples are to be exported.
output_config:
Required. The desired output location and configuration.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest)
),
)
_sym_db.RegisterMessage(ExportEvaluatedExamplesRequest)
GetModelEvaluationRequest = _reflection.GeneratedProtocolMessageType(
"GetModelEvaluationRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETMODELEVALUATIONREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation].
Attributes:
name:
Resource name for the model evaluation.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetModelEvaluationRequest)
),
)
_sym_db.RegisterMessage(GetModelEvaluationRequest)
ListModelEvaluationsRequest = _reflection.GeneratedProtocolMessageType(
"ListModelEvaluationsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTMODELEVALUATIONSREQUEST,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Request message for
[AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations].
Attributes:
parent:
Resource name of the model to list the model evaluations for.
If modelId is set as "-", this will list model evaluations
from across all models of the parent location.
filter:
An expression for filtering the results of the request. -
``annotation_spec_id`` - for =, != or existence. See example
below for the last. Some examples of using the filter are:
- ``annotation_spec_id!=4`` --> The model evaluation was done
for annotation spec with ID different than 4. - ``NOT
annotation_spec_id:*`` --> The model evaluation was done for
aggregate of all annotation specs.
page_size:
Requested page size.
page_token:
A token identifying a page of results for the server to
return. Typically obtained via [ListModelEvaluationsResponse.n
ext\_page\_token][google.cloud.automl.v1beta1.ListModelEvaluat
ionsResponse.next\_page\_token] of the previous [AutoMl.ListMo
delEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEv
aluations] call.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelEvaluationsRequest)
),
)
_sym_db.RegisterMessage(ListModelEvaluationsRequest)
ListModelEvaluationsResponse = _reflection.GeneratedProtocolMessageType(
"ListModelEvaluationsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTMODELEVALUATIONSRESPONSE,
__module__="google.cloud.automl_v1beta1.proto.service_pb2",
__doc__="""Response message for
[AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations].
Attributes:
model_evaluation:
List of model evaluations in the requested page.
next_page_token:
A token to retrieve next page of results. Pass to the [ListMod
elEvaluationsRequest.page\_token][google.cloud.automl.v1beta1.
ListModelEvaluationsRequest.page\_token] field of a new [AutoM
l.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.Lis
tModelEvaluations] request to obtain that page.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelEvaluationsResponse)
),
)
_sym_db.RegisterMessage(ListModelEvaluationsResponse)
DESCRIPTOR._options = None
_AUTOML = _descriptor.ServiceDescriptor(
name="AutoMl",
full_name="google.cloud.automl.v1beta1.AutoMl",
file=DESCRIPTOR,
index=0,
serialized_options=_b(
"\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform"
),
serialized_start=3671,
serialized_end=8112,
methods=[
_descriptor.MethodDescriptor(
name="CreateDataset",
full_name="google.cloud.automl.v1beta1.AutoMl.CreateDataset",
index=0,
containing_service=None,
input_type=_CREATEDATASETREQUEST,
output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET,
serialized_options=_b(
'\202\323\344\223\002<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\007dataset'
),
),
_descriptor.MethodDescriptor(
name="GetDataset",
full_name="google.cloud.automl.v1beta1.AutoMl.GetDataset",
index=1,
containing_service=None,
input_type=_GETDATASETREQUEST,
output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET,
serialized_options=_b(
"\202\323\344\223\0023\0221/v1beta1/{name=projects/*/locations/*/datasets/*}"
),
),
_descriptor.MethodDescriptor(
name="ListDatasets",
full_name="google.cloud.automl.v1beta1.AutoMl.ListDatasets",
index=2,
containing_service=None,
input_type=_LISTDATASETSREQUEST,
output_type=_LISTDATASETSRESPONSE,
serialized_options=_b(
"\202\323\344\223\0023\0221/v1beta1/{parent=projects/*/locations/*}/datasets"
),
),
_descriptor.MethodDescriptor(
name="UpdateDataset",
full_name="google.cloud.automl.v1beta1.AutoMl.UpdateDataset",
index=3,
containing_service=None,
input_type=_UPDATEDATASETREQUEST,
output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET,
serialized_options=_b(
"\202\323\344\223\002D29/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset"
),
),
_descriptor.MethodDescriptor(
name="DeleteDataset",
full_name="google.cloud.automl.v1beta1.AutoMl.DeleteDataset",
index=4,
containing_service=None,
input_type=_DELETEDATASETREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
serialized_options=_b(
"\202\323\344\223\0023*1/v1beta1/{name=projects/*/locations/*/datasets/*}"
),
),
_descriptor.MethodDescriptor(
name="ImportData",
full_name="google.cloud.automl.v1beta1.AutoMl.ImportData",
index=5,
containing_service=None,
input_type=_IMPORTDATAREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
serialized_options=_b(
'\202\323\344\223\002A"</v1beta1/{name=projects/*/locations/*/datasets/*}:importData:\001*'
),
),
_descriptor.MethodDescriptor(
name="ExportData",
full_name="google.cloud.automl.v1beta1.AutoMl.ExportData",
index=6,
containing_service=None,
input_type=_EXPORTDATAREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
serialized_options=_b(
'\202\323\344\223\002A"</v1beta1/{name=projects/*/locations/*/datasets/*}:exportData:\001*'
),
),
_descriptor.MethodDescriptor(
name="GetAnnotationSpec",
full_name="google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec",
index=7,
containing_service=None,
input_type=_GETANNOTATIONSPECREQUEST,
output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2._ANNOTATIONSPEC,
serialized_options=_b(
"\202\323\344\223\002E\022C/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}"
),
),
_descriptor.MethodDescriptor(
name="GetTableSpec",
full_name="google.cloud.automl.v1beta1.AutoMl.GetTableSpec",
index=8,
containing_service=None,
input_type=_GETTABLESPECREQUEST,
output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2._TABLESPEC,
serialized_options=_b(
"\202\323\344\223\002@\022>/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}"
),
),
_descriptor.MethodDescriptor(
name="ListTableSpecs",
full_name="google.cloud.automl.v1beta1.AutoMl.ListTableSpecs",
index=9,
containing_service=None,
input_type=_LISTTABLESPECSREQUEST,
output_type=_LISTTABLESPECSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002@\022>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs"
),
),
_descriptor.MethodDescriptor(
name="UpdateTableSpec",
full_name="google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec",
index=10,
containing_service=None,
input_type=_UPDATETABLESPECREQUEST,
output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2._TABLESPEC,
serialized_options=_b(
"\202\323\344\223\002W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec"
),
),
_descriptor.MethodDescriptor(
name="GetColumnSpec",
full_name="google.cloud.automl.v1beta1.AutoMl.GetColumnSpec",
index=11,
containing_service=None,
input_type=_GETCOLUMNSPECREQUEST,
output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC,
serialized_options=_b(
"\202\323\344\223\002N\022L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}"
),
),
_descriptor.MethodDescriptor(
name="ListColumnSpecs",
full_name="google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs",
index=12,
containing_service=None,
input_type=_LISTCOLUMNSPECSREQUEST,
output_type=_LISTCOLUMNSPECSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002N\022L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs"
),
),
_descriptor.MethodDescriptor(
name="UpdateColumnSpec",
full_name="google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec",
index=13,
containing_service=None,
input_type=_UPDATECOLUMNSPECREQUEST,
output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC,
serialized_options=_b(
"\202\323\344\223\002g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\013column_spec"
),
),
_descriptor.MethodDescriptor(
name="CreateModel",
full_name="google.cloud.automl.v1beta1.AutoMl.CreateModel",
index=14,
containing_service=None,
input_type=_CREATEMODELREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
serialized_options=_b(
'\202\323\344\223\0028"//v1beta1/{parent=projects/*/locations/*}/models:\005model'
),
),
_descriptor.MethodDescriptor(
name="GetModel",
full_name="google.cloud.automl.v1beta1.AutoMl.GetModel",
index=15,
containing_service=None,
input_type=_GETMODELREQUEST,
output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2._MODEL,
serialized_options=_b(
"\202\323\344\223\0021\022//v1beta1/{name=projects/*/locations/*/models/*}"
),
),
_descriptor.MethodDescriptor(
name="ListModels",
full_name="google.cloud.automl.v1beta1.AutoMl.ListModels",
index=16,
containing_service=None,
input_type=_LISTMODELSREQUEST,
output_type=_LISTMODELSRESPONSE,
serialized_options=_b(
"\202\323\344\223\0021\022//v1beta1/{parent=projects/*/locations/*}/models"
),
),
_descriptor.MethodDescriptor(
name="DeleteModel",
full_name="google.cloud.automl.v1beta1.AutoMl.DeleteModel",
index=17,
containing_service=None,
input_type=_DELETEMODELREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
serialized_options=_b(
"\202\323\344\223\0021*//v1beta1/{name=projects/*/locations/*/models/*}"
),
),
_descriptor.MethodDescriptor(
name="DeployModel",
full_name="google.cloud.automl.v1beta1.AutoMl.DeployModel",
index=18,
containing_service=None,
input_type=_DEPLOYMODELREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
serialized_options=_b(
'\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\001*'
),
),
_descriptor.MethodDescriptor(
name="UndeployModel",
full_name="google.cloud.automl.v1beta1.AutoMl.UndeployModel",
index=19,
containing_service=None,
input_type=_UNDEPLOYMODELREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
serialized_options=_b(
'\202\323\344\223\002="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\001*'
),
),
_descriptor.MethodDescriptor(
name="ExportModel",
full_name="google.cloud.automl.v1beta1.AutoMl.ExportModel",
index=20,
containing_service=None,
input_type=_EXPORTMODELREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
serialized_options=_b(
'\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\001*'
),
),
_descriptor.MethodDescriptor(
name="ExportEvaluatedExamples",
full_name="google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples",
index=21,
containing_service=None,
input_type=_EXPORTEVALUATEDEXAMPLESREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
serialized_options=_b(
'\202\323\344\223\002L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\001*'
),
),
_descriptor.MethodDescriptor(
name="GetModelEvaluation",
full_name="google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation",
index=22,
containing_service=None,
input_type=_GETMODELEVALUATIONREQUEST,
output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION,
serialized_options=_b(
"\202\323\344\223\002D\022B/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}"
),
),
_descriptor.MethodDescriptor(
name="ListModelEvaluations",
full_name="google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations",
index=23,
containing_service=None,
input_type=_LISTMODELEVALUATIONSREQUEST,
output_type=_LISTMODELEVALUATIONSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002D\022B/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations"
),
),
],
)
_sym_db.RegisterServiceDescriptor(_AUTOML)
DESCRIPTOR.services_by_name["AutoMl"] = _AUTOML
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
tepickering/mmtwfs | mmtwfs/f9topbox.py | 2 | 4738 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# coding=utf-8
"""
Classes and utilities for controlling components of the MMTO's F/9 topbox
"""
import socket
import logging
import logging.handlers
from .utils import srvlookup
log = logging.getLogger("F/9 TopBox")
log.setLevel(logging.INFO)
__all__ = ['CompMirror']
class CompMirror(object):
"""
Defines how to query and command the comparison mirror within the F/9 topbox
"""
def __init__(self, host=None, port=None):
# get host/port for topbox communication. if not specified, use srvlookup to get from MMTO DNS.
if host is None and port is None:
self.host, self.port = srvlookup("_lampbox._tcp.mmto.arizona.edu")
else:
self.host = host
self.port = port
# use this boolean to determine if commands are actually to be sent
self.connected = False
def connect(self):
"""
Set state to connected so that commands will be sent
"""
if self.host is not None and not self.connected:
sock = self.netsock()
if sock is None:
self.connected = False
else:
log.info("Successfully connected to F/9 topbox.")
self.connected = True
sock.shutdown(socket.SHUT_RDWR)
sock.close()
def disconnect(self):
"""
Set state to disconnected
"""
self.connected = False
def netsock(self):
"""
Set up socket for communicating with the topbox
"""
try:
topbox_server = (self.host, self.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(topbox_server)
except Exception as e:
log.error(f"Error connecting to topbox server. Remaining disconnected...: {e}")
return None
return sock
def get_mirror(self):
"""
Query current status of the comparison mirror
"""
state = "N/A"
if self.connected:
sock = self.netsock()
sock.sendall(b"get_mirror\n")
result = sock.recv(4096).decode('utf8')
sock.shutdown(socket.SHUT_RDWR)
sock.close()
if "OUT" in result:
state = "out"
log.debug("Comparison mirror is OUT.")
if "IN" in result:
state = "in"
log.debug("Comparison mirror is IN.")
if "BUSY" in result:
state = "busy"
log.debug("Comparison mirror is BUSY.")
if "X" in result:
log.error("Error querying comparison mirror status.")
else:
log.warning("Topbox not connected. Can't get comparison mirror status.")
return state
def _move_mirror(self, cmd):
"""
Send network command to topbox to move the comparison mirror in or out
"""
state = "N/A"
if "in" in cmd or "out" in cmd:
if self.connected:
sock = self.netsock()
netcmd = f"set_mirror_exclusive {cmd}\n"
sock.sendall(netcmd.encode("utf8"))
result = sock.recv(4096).decode('utf8')
sock.shutdown(socket.SHUT_RDWR)
sock.close()
if "X" in result:
log.error(f"Error sending comparison mirror command: {cmd}.")
if "1" in result:
log.error(f"Comparison mirror command, {cmd}, timed out.")
if "0" in result:
log.info(f"Comparison mirror successfully moved {cmd}.")
state = cmd
else:
log.warning("Topbox not connected. Can't send motion command.")
else:
log.error(f"Invalid comparison mirror command, {cmd}, send to topbox. Must be 'in' or 'out'")
return state
def mirror_in(self):
"""
Sends command to move comparison mirror in
"""
state = self._move_mirror("in")
return state
def mirror_out(self):
"""
Sends command to move comparison mirror out
"""
state = self._move_mirror("out")
return state
def toggle_mirror(self):
"""
Checks comparison mirror state and sends appropriate command to toggle its state
"""
status = self.get_mirror()
if status == "in":
status = self.mirror_out()
elif status == "out":
status = self.mirror_in()
else:
log.warning(f"Cannot toggle comparison mirror status, {status}.")
return status
| gpl-3.0 |
Francis-Liu/animated-broccoli | nova/tests/unit/virt/vmwareapi/test_vim_util.py | 41 | 1539 | # Copyright (c) 2013 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import vim_util
class VMwareVIMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVIMUtilTestCase, self).setUp()
fake.reset()
self.vim = fake.FakeVim()
self.vim._login()
def test_get_inner_objects(self):
property = ['summary.name']
# Get the fake datastores directly from the cluster
cluster_refs = fake._get_object_refs('ClusterComputeResource')
cluster = fake._get_object(cluster_refs[0])
expected_ds = cluster.datastore.ManagedObjectReference
# Get the fake datastores using inner objects utility method
result = vim_util.get_inner_objects(
self.vim, cluster_refs[0], 'datastore', 'Datastore', property)
datastores = [oc.obj for oc in result.objects]
self.assertEqual(expected_ds, datastores)
| apache-2.0 |
gangadharkadam/office_erp | erpnext/selling/doctype/sms_center/sms_center.py | 3 | 2964 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr
from frappe import msgprint, _
from frappe.model.document import Document
from erpnext.setup.doctype.sms_settings.sms_settings import send_sms
class SMSCenter(Document):
def create_receiver_list(self):
rec, where_clause = '', ''
if self.send_to == 'All Customer Contact':
where_clause = self.customer and " and customer = '%s'" % \
self.customer.replace("'", "\'") or " and ifnull(customer, '') != ''"
if self.send_to == 'All Supplier Contact':
where_clause = self.supplier and \
" and ifnull(is_supplier, 0) = 1 and supplier = '%s'" % \
self.supplier.replace("'", "\'") or " and ifnull(supplier, '') != ''"
if self.send_to == 'All Sales Partner Contact':
where_clause = self.sales_partner and \
" and ifnull(is_sales_partner, 0) = 1 and sales_partner = '%s'" % \
self.sales_partner.replace("'", "\'") or " and ifnull(sales_partner, '') != ''"
if self.send_to in ['All Contact', 'All Customer Contact', 'All Supplier Contact', 'All Sales Partner Contact']:
rec = frappe.db.sql("""select CONCAT(ifnull(first_name,''), ' ', ifnull(last_name,'')),
mobile_no from `tabContact` where ifnull(mobile_no,'')!='' and
docstatus != 2 %s""" % where_clause)
elif self.send_to == 'All Lead (Open)':
rec = frappe.db.sql("""select lead_name, mobile_no from `tabLead` where
ifnull(mobile_no,'')!='' and docstatus != 2 and status='Open'""")
elif self.send_to == 'All Employee (Active)':
where_clause = self.department and " and department = '%s'" % \
self.department.replace("'", "\'") or ""
where_clause += self.branch and " and branch = '%s'" % \
self.branch.replace("'", "\'") or ""
rec = frappe.db.sql("""select employee_name, cell_number from
`tabEmployee` where status = 'Active' and docstatus < 2 and
ifnull(cell_number,'')!='' %s""" % where_clause)
elif self.send_to == 'All Sales Person':
rec = frappe.db.sql("""select sales_person_name, mobile_no from
`tabSales Person` where docstatus!=2 and ifnull(mobile_no,'')!=''""")
rec_list = ''
for d in rec:
rec_list += d[0] + ' - ' + d[1] + '\n'
self.receiver_list = rec_list
def get_receiver_nos(self):
receiver_nos = []
if self.receiver_list:
for d in self.receiver_list.split('\n'):
receiver_no = d
if '-' in d:
receiver_no = receiver_no.split('-')[1]
if receiver_no.strip():
receiver_nos.append(cstr(receiver_no).strip())
else:
msgprint(_("Receiver List is empty. Please create Receiver List"))
return receiver_nos
def send_sms(self):
receiver_list = []
if not self.message:
msgprint(_("Please enter message before sending"))
else:
receiver_list = self.get_receiver_nos()
if receiver_list:
send_sms(receiver_list, cstr(self.message))
| agpl-3.0 |
AustinRoy7/Pomodoro-timer | venv/Lib/keyword.py | 162 | 2211 | #! /usr/bin/env python3
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# load the output skeleton from the target, taking care to preserve its
# newline convention.
with open(optfile, newline='') as fp:
format = fp.readlines()
nl = format[0][len(format[0].strip()):] if format else '\n'
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "'," + nl)
lines.sort()
# insert the lines of keywords into the skeleton
try:
start = format.index("#--start keywords--" + nl) + 1
end = format.index("#--end keywords--" + nl)
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
with open(optfile, 'w', newline='') as fp:
fp.writelines(format)
if __name__ == "__main__":
main()
| mit |
direvus/ansible | lib/ansible/module_utils/json_utils.py | 89 | 3293 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
# NB: a copy of this function exists in ../../modules/core/async_wrapper.py. Ensure any
# changes are propagated there.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
for line in trailing_junk:
if line.strip():
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
break
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
| gpl-3.0 |
catapult-project/catapult | dashboard/dashboard/update_test_suites.py | 3 | 6804 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for fetching and updating a list of top-level tests."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
import logging
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from dashboard.common import datastore_hooks
from dashboard.common import descriptor
from dashboard.common import request_handler
from dashboard.common import stored_object
from dashboard.common import namespaced_stored_object
from dashboard.common import utils
from dashboard.models import graph_data
# TestMetadata suite cache key.
_LIST_SUITES_CACHE_KEY = 'list_tests_get_test_suites'
TEST_SUITES_2_CACHE_KEY = 'test_suites_2'
@ndb.synctasklet
def FetchCachedTestSuites2():
result = yield FetchCachedTestSuites2Async()
raise ndb.Return(result)
@ndb.tasklet
def FetchCachedTestSuites2Async():
results = yield namespaced_stored_object.GetAsync(TEST_SUITES_2_CACHE_KEY)
raise ndb.Return(results)
def FetchCachedTestSuites():
"""Fetches cached test suite data."""
cached = namespaced_stored_object.Get(_LIST_SUITES_CACHE_KEY)
if cached is None:
# If the cache test suite list is not set, update it before fetching.
# This is for convenience when testing sending of data to a local instance.
UpdateTestSuites(datastore_hooks.GetNamespace())
cached = namespaced_stored_object.Get(_LIST_SUITES_CACHE_KEY)
return cached
class UpdateTestSuitesHandler(request_handler.RequestHandler):
"""A simple request handler to refresh the cached test suites info."""
def get(self):
"""Refreshes the cached test suites list."""
self.post()
def post(self):
"""Refreshes the cached test suites list."""
if self.request.get('internal_only') == 'true':
logging.info('Going to update internal-only test suites data.')
# Update internal-only test suites data.
datastore_hooks.SetPrivilegedRequest()
UpdateTestSuites(datastore_hooks.INTERNAL)
else:
logging.info('Going to update externally-visible test suites data.')
# Update externally-visible test suites data.
UpdateTestSuites(datastore_hooks.EXTERNAL)
def UpdateTestSuites(permissions_namespace):
"""Updates test suite data for either internal or external users."""
logging.info('Updating test suite data for: %s', permissions_namespace)
suite_dict = _CreateTestSuiteDict()
key = namespaced_stored_object.NamespaceKey(_LIST_SUITES_CACHE_KEY,
permissions_namespace)
stored_object.Set(key, suite_dict)
stored_object.Set(
namespaced_stored_object.NamespaceKey(TEST_SUITES_2_CACHE_KEY,
permissions_namespace),
_ListTestSuites())
@ndb.tasklet
def _ListTestSuitesAsync(test_suites, partial_tests, parent_test=None):
# Some test suites are composed of multiple test path components. See
# Descriptor. When a TestMetadata key doesn't contain enough test path
# components to compose a full test suite, add its key to partial_tests so
# that the caller can run another query with parent_test.
query = graph_data.TestMetadata.query()
query = query.filter(graph_data.TestMetadata.parent_test == parent_test)
query = query.filter(graph_data.TestMetadata.deprecated == False)
keys = yield query.fetch_async(keys_only=True)
for key in keys:
test_path = utils.TestPath(key)
desc = yield descriptor.Descriptor.FromTestPathAsync(test_path)
if desc.test_suite:
test_suites.add(desc.test_suite)
elif partial_tests is not None:
partial_tests.add(key)
else:
logging.error('Unable to parse "%s"', test_path)
@ndb.synctasklet
def _ListTestSuites():
test_suites = set()
partial_tests = set()
yield _ListTestSuitesAsync(test_suites, partial_tests)
yield [_ListTestSuitesAsync(test_suites, None, key) for key in partial_tests]
test_suites = list(test_suites)
test_suites.sort()
raise ndb.Return(test_suites)
def _CreateTestSuiteDict():
"""Returns a dictionary with information about top-level tests.
This method is used to generate the global JavaScript variable TEST_SUITES
for the report page. This variable is used to initially populate the select
menus.
Note that there will be multiple top level TestMetadata entities for each
suite name, since each suite name appears under multiple bots.
Returns:
A dictionary of the form:
{
'my_test_suite': {
'mas': {'ChromiumPerf': {'mac': False, 'linux': False}},
'dep': True,
'des': 'A description.'
},
...
}
Where 'mas', 'dep', and 'des' are abbreviations for 'masters',
'deprecated', and 'description', respectively.
"""
result = collections.defaultdict(lambda: {'suites': []})
for s in _FetchSuites():
result[s.test_name]['suites'].append(s)
# Should have a dict of {suite: [all suites]}
# Now generate masters
for k, v in result.items():
current_suites = v['suites']
v['mas'] = {}
if current_suites:
if current_suites[0].description:
v['des'] = current_suites[0].description
if all(s.deprecated for s in current_suites):
v['dep'] = True
for s in current_suites:
master_name = s.master_name
bot_name = s.bot_name
if not master_name in v['mas']:
v['mas'][master_name] = {}
if not bot_name in v['mas'][master_name]:
v['mas'][master_name][bot_name] = s.deprecated
# We don't need these suites anymore so free them.
del result[k]['suites']
return dict(result)
def _FetchSuites():
"""Fetches Tests with deprecated and description projections."""
suite_query = graph_data.TestMetadata.query(
graph_data.TestMetadata.parent_test == None)
cursor = None
more = True
try:
while more:
some_suites, cursor, more = suite_query.fetch_page(
2000,
start_cursor=cursor,
projection=['deprecated', 'description'],
use_cache=False,
use_memcache=False)
for s in some_suites:
yield s
except datastore_errors.Timeout:
logging.error('Timeout fetching test suites.')
return
def _GetTestSubPath(key):
"""Gets the part of the test path after the suite, for the given test key.
For example, for a test with the test path 'MyMaster/bot/my_suite/foo/bar',
this should return 'foo/bar'.
Args:
key: The key of the TestMetadata entity.
Returns:
Slash-separated test path part after master/bot/suite.
"""
return '/'.join(p for p in key.string_id().split('/')[3:])
| bsd-3-clause |
sfriesel/libcloud | libcloud/test/compute/test_base.py | 42 | 4305 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.common.base import Response
from libcloud.common.base import Connection, ConnectionKey, ConnectionUserAndKey
from libcloud.common.types import LibcloudError
from libcloud.compute.base import Node, NodeSize, NodeImage, NodeDriver, StorageVolume
from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword
from libcloud.compute.types import StorageVolumeState
from libcloud.test import MockResponse # pylint: disable-msg=E0611
class FakeDriver(object):
type = 0
class BaseTests(unittest.TestCase):
def test_base_node(self):
Node(id=0, name=0, state=0, public_ips=0, private_ips=0,
driver=FakeDriver())
def test_base_node_size(self):
NodeSize(id=0, name=0, ram=0, disk=0, bandwidth=0, price=0,
driver=FakeDriver())
def test_base_node_image(self):
NodeImage(id=0, name=0, driver=FakeDriver())
def test_base_storage_volume(self):
StorageVolume(id="0", name="0", size=10, driver=FakeDriver(), state=StorageVolumeState.AVAILABLE)
def test_base_response(self):
Response(MockResponse(status=200, body='foo'), ConnectionKey('foo'))
def test_base_node_driver(self):
NodeDriver('foo')
def test_base_connection_key(self):
ConnectionKey('foo')
def test_base_connection_userkey(self):
ConnectionUserAndKey('foo', 'bar')
def test_base_connection_timeout(self):
Connection(timeout=10)
class TestValidateAuth(unittest.TestCase):
def test_get_auth_ssh(self):
n = NodeDriver('foo')
n.features = {'create_node': ['ssh_key']}
auth = NodeAuthSSHKey('pubkey...')
self.assertEqual(auth, n._get_and_check_auth(auth))
def test_get_auth_ssh_but_given_password(self):
n = NodeDriver('foo')
n.features = {'create_node': ['ssh_key']}
auth = NodeAuthPassword('password')
self.assertRaises(LibcloudError, n._get_and_check_auth, auth)
def test_get_auth_password(self):
n = NodeDriver('foo')
n.features = {'create_node': ['password']}
auth = NodeAuthPassword('password')
self.assertEqual(auth, n._get_and_check_auth(auth))
def test_get_auth_password_but_given_ssh_key(self):
n = NodeDriver('foo')
n.features = {'create_node': ['password']}
auth = NodeAuthSSHKey('publickey')
self.assertRaises(LibcloudError, n._get_and_check_auth, auth)
def test_get_auth_default_ssh_key(self):
n = NodeDriver('foo')
n.features = {'create_node': ['ssh_key']}
self.assertEqual(None, n._get_and_check_auth(None))
def test_get_auth_default_password(self):
n = NodeDriver('foo')
n.features = {'create_node': ['password']}
auth = n._get_and_check_auth(None)
self.assertTrue(isinstance(auth, NodeAuthPassword))
def test_get_auth_default_no_feature(self):
n = NodeDriver('foo')
self.assertEqual(None, n._get_and_check_auth(None))
def test_get_auth_generates_password_but_given_nonsense(self):
n = NodeDriver('foo')
n.features = {'create_node': ['generates_password']}
auth = "nonsense"
self.assertRaises(LibcloudError, n._get_and_check_auth, auth)
def test_get_auth_no_features_but_given_nonsense(self):
n = NodeDriver('foo')
auth = "nonsense"
self.assertRaises(LibcloudError, n._get_and_check_auth, auth)
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
viger/docker | proxy/proxy/code/default/python27/1.0/lib/win32/cffi/model.py | 43 | 21110 | import types, sys
import weakref
from .lock import allocate_lock
# type qualifiers
Q_CONST = 0x01
Q_RESTRICT = 0x02
Q_VOLATILE = 0x04
def qualify(quals, replace_with):
if quals & Q_CONST:
replace_with = ' const ' + replace_with.lstrip()
if quals & Q_VOLATILE:
replace_with = ' volatile ' + replace_with.lstrip()
if quals & Q_RESTRICT:
# It seems that __restrict is supported by gcc and msvc.
# If you hit some different compiler, add a #define in
# _cffi_include.h for it (and in its copies, documented there)
replace_with = ' __restrict ' + replace_with.lstrip()
return replace_with
class BaseTypeByIdentity(object):
is_array_type = False
is_raw_function = False
def get_c_name(self, replace_with='', context='a C file', quals=0):
result = self.c_name_with_marker
assert result.count('&') == 1
# some logic duplication with ffi.getctype()... :-(
replace_with = replace_with.strip()
if replace_with:
if replace_with.startswith('*') and '&[' in result:
replace_with = '(%s)' % replace_with
elif not replace_with[0] in '[(':
replace_with = ' ' + replace_with
replace_with = qualify(quals, replace_with)
result = result.replace('&', replace_with)
if '$' in result:
from .ffiplatform import VerificationError
raise VerificationError(
"cannot generate '%s' in %s: unknown type name"
% (self._get_c_name(), context))
return result
def _get_c_name(self):
return self.c_name_with_marker.replace('&', '')
def has_c_name(self):
return '$' not in self._get_c_name()
def is_integer_type(self):
return False
def get_cached_btype(self, ffi, finishlist, can_delay=False):
try:
BType = ffi._cached_btypes[self]
except KeyError:
BType = self.build_backend_type(ffi, finishlist)
BType2 = ffi._cached_btypes.setdefault(self, BType)
assert BType2 is BType
return BType
def __repr__(self):
return '<%s>' % (self._get_c_name(),)
def _get_items(self):
return [(name, getattr(self, name)) for name in self._attrs_]
class BaseType(BaseTypeByIdentity):
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self._get_items() == other._get_items())
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.__class__, tuple(self._get_items())))
class VoidType(BaseType):
_attrs_ = ()
def __init__(self):
self.c_name_with_marker = 'void&'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_void_type')
void_type = VoidType()
class BasePrimitiveType(BaseType):
pass
class PrimitiveType(BasePrimitiveType):
_attrs_ = ('name',)
ALL_PRIMITIVE_TYPES = {
'char': 'c',
'short': 'i',
'int': 'i',
'long': 'i',
'long long': 'i',
'signed char': 'i',
'unsigned char': 'i',
'unsigned short': 'i',
'unsigned int': 'i',
'unsigned long': 'i',
'unsigned long long': 'i',
'float': 'f',
'double': 'f',
'long double': 'f',
'_Bool': 'i',
# the following types are not primitive in the C sense
'wchar_t': 'c',
'int8_t': 'i',
'uint8_t': 'i',
'int16_t': 'i',
'uint16_t': 'i',
'int32_t': 'i',
'uint32_t': 'i',
'int64_t': 'i',
'uint64_t': 'i',
'int_least8_t': 'i',
'uint_least8_t': 'i',
'int_least16_t': 'i',
'uint_least16_t': 'i',
'int_least32_t': 'i',
'uint_least32_t': 'i',
'int_least64_t': 'i',
'uint_least64_t': 'i',
'int_fast8_t': 'i',
'uint_fast8_t': 'i',
'int_fast16_t': 'i',
'uint_fast16_t': 'i',
'int_fast32_t': 'i',
'uint_fast32_t': 'i',
'int_fast64_t': 'i',
'uint_fast64_t': 'i',
'intptr_t': 'i',
'uintptr_t': 'i',
'intmax_t': 'i',
'uintmax_t': 'i',
'ptrdiff_t': 'i',
'size_t': 'i',
'ssize_t': 'i',
}
def __init__(self, name):
assert name in self.ALL_PRIMITIVE_TYPES
self.name = name
self.c_name_with_marker = name + '&'
def is_char_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'c'
def is_integer_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'i'
def is_float_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'f'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_primitive_type', self.name)
class UnknownIntegerType(BasePrimitiveType):
_attrs_ = ('name',)
def __init__(self, name):
self.name = name
self.c_name_with_marker = name + '&'
def is_integer_type(self):
return True
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("integer type '%s' can only be used after "
"compilation" % self.name)
class UnknownFloatType(BasePrimitiveType):
_attrs_ = ('name', )
def __init__(self, name):
self.name = name
self.c_name_with_marker = name + '&'
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("float type '%s' can only be used after "
"compilation" % self.name)
class BaseFunctionType(BaseType):
_attrs_ = ('args', 'result', 'ellipsis', 'abi')
def __init__(self, args, result, ellipsis, abi=None):
self.args = args
self.result = result
self.ellipsis = ellipsis
self.abi = abi
#
reprargs = [arg._get_c_name() for arg in self.args]
if self.ellipsis:
reprargs.append('...')
reprargs = reprargs or ['void']
replace_with = self._base_pattern % (', '.join(reprargs),)
if abi is not None:
replace_with = replace_with[:1] + abi + ' ' + replace_with[1:]
self.c_name_with_marker = (
self.result.c_name_with_marker.replace('&', replace_with))
class RawFunctionType(BaseFunctionType):
# Corresponds to a C type like 'int(int)', which is the C type of
# a function, but not a pointer-to-function. The backend has no
# notion of such a type; it's used temporarily by parsing.
_base_pattern = '(&)(%s)'
is_raw_function = True
def build_backend_type(self, ffi, finishlist):
from . import api
raise api.CDefError("cannot render the type %r: it is a function "
"type, not a pointer-to-function type" % (self,))
def as_function_pointer(self):
return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi)
class FunctionPtrType(BaseFunctionType):
_base_pattern = '(*&)(%s)'
def build_backend_type(self, ffi, finishlist):
result = self.result.get_cached_btype(ffi, finishlist)
args = []
for tp in self.args:
args.append(tp.get_cached_btype(ffi, finishlist))
abi_args = ()
if self.abi == "__stdcall":
if not self.ellipsis: # __stdcall ignored for variadic funcs
try:
abi_args = (ffi._backend.FFI_STDCALL,)
except AttributeError:
pass
return global_cache(self, ffi, 'new_function_type',
tuple(args), result, self.ellipsis, *abi_args)
def as_raw_function(self):
return RawFunctionType(self.args, self.result, self.ellipsis, self.abi)
class PointerType(BaseType):
_attrs_ = ('totype', 'quals')
def __init__(self, totype, quals=0):
self.totype = totype
self.quals = quals
extra = qualify(quals, " *&")
if totype.is_array_type:
extra = "(%s)" % (extra.lstrip(),)
self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
def build_backend_type(self, ffi, finishlist):
BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True)
return global_cache(self, ffi, 'new_pointer_type', BItem)
voidp_type = PointerType(void_type)
def ConstPointerType(totype):
return PointerType(totype, Q_CONST)
const_voidp_type = ConstPointerType(void_type)
class NamedPointerType(PointerType):
_attrs_ = ('totype', 'name')
def __init__(self, totype, name, quals=0):
PointerType.__init__(self, totype, quals)
self.name = name
self.c_name_with_marker = name + '&'
class ArrayType(BaseType):
_attrs_ = ('item', 'length')
is_array_type = True
def __init__(self, item, length):
self.item = item
self.length = length
#
if length is None:
brackets = '&[]'
elif length == '...':
brackets = '&[/*...*/]'
else:
brackets = '&[%s]' % length
self.c_name_with_marker = (
self.item.c_name_with_marker.replace('&', brackets))
def resolve_length(self, newlength):
return ArrayType(self.item, newlength)
def build_backend_type(self, ffi, finishlist):
if self.length == '...':
from . import api
raise api.CDefError("cannot render the type %r: unknown length" %
(self,))
self.item.get_cached_btype(ffi, finishlist) # force the item BType
BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist)
return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length)
char_array_type = ArrayType(PrimitiveType('char'), None)
class StructOrUnionOrEnum(BaseTypeByIdentity):
_attrs_ = ('name',)
forcename = None
def build_c_name_with_marker(self):
name = self.forcename or '%s %s' % (self.kind, self.name)
self.c_name_with_marker = name + '&'
def force_the_name(self, forcename):
self.forcename = forcename
self.build_c_name_with_marker()
def get_official_name(self):
assert self.c_name_with_marker.endswith('&')
return self.c_name_with_marker[:-1]
class StructOrUnion(StructOrUnionOrEnum):
fixedlayout = None
completed = 0
partial = False
packed = False
def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None):
self.name = name
self.fldnames = fldnames
self.fldtypes = fldtypes
self.fldbitsize = fldbitsize
self.fldquals = fldquals
self.build_c_name_with_marker()
def has_anonymous_struct_fields(self):
if self.fldtypes is None:
return False
for name, type in zip(self.fldnames, self.fldtypes):
if name == '' and isinstance(type, StructOrUnion):
return True
return False
def enumfields(self):
fldquals = self.fldquals
if fldquals is None:
fldquals = (0,) * len(self.fldnames)
for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes,
self.fldbitsize, fldquals):
if name == '' and isinstance(type, StructOrUnion):
# nested anonymous struct/union
for result in type.enumfields():
yield result
else:
yield (name, type, bitsize, quals)
def force_flatten(self):
# force the struct or union to have a declaration that lists
# directly all fields returned by enumfields(), flattening
# nested anonymous structs/unions.
names = []
types = []
bitsizes = []
fldquals = []
for name, type, bitsize, quals in self.enumfields():
names.append(name)
types.append(type)
bitsizes.append(bitsize)
fldquals.append(quals)
self.fldnames = tuple(names)
self.fldtypes = tuple(types)
self.fldbitsize = tuple(bitsizes)
self.fldquals = tuple(fldquals)
def get_cached_btype(self, ffi, finishlist, can_delay=False):
BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist,
can_delay)
if not can_delay:
self.finish_backend_type(ffi, finishlist)
return BType
def finish_backend_type(self, ffi, finishlist):
if self.completed:
if self.completed != 2:
raise NotImplementedError("recursive structure declaration "
"for '%s'" % (self.name,))
return
BType = ffi._cached_btypes[self]
#
self.completed = 1
#
if self.fldtypes is None:
pass # not completing it: it's an opaque struct
#
elif self.fixedlayout is None:
fldtypes = [tp.get_cached_btype(ffi, finishlist)
for tp in self.fldtypes]
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize))
sflags = 0
if self.packed:
sflags = 8 # SF_PACKED
ffi._backend.complete_struct_or_union(BType, lst, self,
-1, -1, sflags)
#
else:
fldtypes = []
fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout
for i in range(len(self.fldnames)):
fsize = fieldsize[i]
ftype = self.fldtypes[i]
#
if isinstance(ftype, ArrayType) and ftype.length == '...':
# fix the length to match the total size
BItemType = ftype.item.get_cached_btype(ffi, finishlist)
nlen, nrest = divmod(fsize, ffi.sizeof(BItemType))
if nrest != 0:
self._verification_error(
"field '%s.%s' has a bogus size?" % (
self.name, self.fldnames[i] or '{}'))
ftype = ftype.resolve_length(nlen)
self.fldtypes = (self.fldtypes[:i] + (ftype,) +
self.fldtypes[i+1:])
#
BFieldType = ftype.get_cached_btype(ffi, finishlist)
if isinstance(ftype, ArrayType) and ftype.length is None:
assert fsize == 0
else:
bitemsize = ffi.sizeof(BFieldType)
if bitemsize != fsize:
self._verification_error(
"field '%s.%s' is declared as %d bytes, but is "
"really %d bytes" % (self.name,
self.fldnames[i] or '{}',
bitemsize, fsize))
fldtypes.append(BFieldType)
#
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs))
ffi._backend.complete_struct_or_union(BType, lst, self,
totalsize, totalalignment)
self.completed = 2
def _verification_error(self, msg):
from .ffiplatform import VerificationError
raise VerificationError(msg)
def check_not_partial(self):
if self.partial and self.fixedlayout is None:
from . import ffiplatform
raise ffiplatform.VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
finishlist.append(self)
#
return global_cache(self, ffi, 'new_%s_type' % self.kind,
self.get_official_name(), key=self)
class StructType(StructOrUnion):
kind = 'struct'
class UnionType(StructOrUnion):
kind = 'union'
class EnumType(StructOrUnionOrEnum):
kind = 'enum'
partial = False
partial_resolved = False
def __init__(self, name, enumerators, enumvalues, baseinttype=None):
self.name = name
self.enumerators = enumerators
self.enumvalues = enumvalues
self.baseinttype = baseinttype
self.build_c_name_with_marker()
def force_the_name(self, forcename):
StructOrUnionOrEnum.force_the_name(self, forcename)
if self.forcename is None:
name = self.get_official_name()
self.forcename = '$' + name.replace(' ', '_')
def check_not_partial(self):
if self.partial and not self.partial_resolved:
from . import ffiplatform
raise ffiplatform.VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
base_btype = self.build_baseinttype(ffi, finishlist)
return global_cache(self, ffi, 'new_enum_type',
self.get_official_name(),
self.enumerators, self.enumvalues,
base_btype, key=self)
def build_baseinttype(self, ffi, finishlist):
if self.baseinttype is not None:
return self.baseinttype.get_cached_btype(ffi, finishlist)
#
from . import api
if self.enumvalues:
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
import warnings
warnings.warn("%r has no values explicitly defined; next version "
"will refuse to guess which integer type it is "
"meant to be (unsigned/signed, int/long)"
% self._get_c_name())
smallest_value = largest_value = 0
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
candidate2 = PrimitiveType("long")
else:
sign = 0
candidate1 = PrimitiveType("unsigned int")
candidate2 = PrimitiveType("unsigned long")
btype1 = candidate1.get_cached_btype(ffi, finishlist)
btype2 = candidate2.get_cached_btype(ffi, finishlist)
size1 = ffi.sizeof(btype1)
size2 = ffi.sizeof(btype2)
if (smallest_value >= ((-1) << (8*size1-1)) and
largest_value < (1 << (8*size1-sign))):
return btype1
if (smallest_value >= ((-1) << (8*size2-1)) and
largest_value < (1 << (8*size2-sign))):
return btype2
raise api.CDefError("%s values don't all fit into either 'long' "
"or 'unsigned long'" % self._get_c_name())
def unknown_type(name, structname=None):
if structname is None:
structname = '$%s' % name
tp = StructType(structname, None, None, None)
tp.force_the_name(name)
tp.origin = "unknown_type"
return tp
def unknown_ptr_type(name, structname=None):
if structname is None:
structname = '$$%s' % name
tp = StructType(structname, None, None, None)
return NamedPointerType(tp, name)
global_lock = allocate_lock()
def global_cache(srctype, ffi, funcname, *args, **kwds):
key = kwds.pop('key', (funcname, args))
assert not kwds
try:
return ffi._backend.__typecache[key]
except KeyError:
pass
except AttributeError:
# initialize the __typecache attribute, either at the module level
# if ffi._backend is a module, or at the class level if ffi._backend
# is some instance.
if isinstance(ffi._backend, types.ModuleType):
ffi._backend.__typecache = weakref.WeakValueDictionary()
else:
type(ffi._backend).__typecache = weakref.WeakValueDictionary()
try:
res = getattr(ffi._backend, funcname)(*args)
except NotImplementedError as e:
raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e))
# note that setdefault() on WeakValueDictionary is not atomic
# and contains a rare bug (http://bugs.python.org/issue19542);
# we have to use a lock and do it ourselves
cache = ffi._backend.__typecache
with global_lock:
res1 = cache.get(key)
if res1 is None:
cache[key] = res
return res
else:
return res1
def pointer_cache(ffi, BType):
return global_cache('?', ffi, 'new_pointer_type', BType)
def attach_exception_info(e, name):
if e.args and type(e.args[0]) is str:
e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:]
| mit |
dpassante/ansible | test/support/integration/plugins/lookup/hashi_vault.py | 22 | 11350 | # (c) 2015, Jonathan Davila <jonathan(at)davila.io>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: hashi_vault
author: Jonathan Davila <jdavila(at)ansible.com>
version_added: "2.0"
short_description: retrieve secrets from HashiCorp's vault
requirements:
- hvac (python library)
description:
- retrieve secrets from HashiCorp's vault
notes:
- Due to a current limitation in the HVAC library there won't necessarily be an error if a bad endpoint is specified.
- As of Ansible 2.10, only the latest secret is returned when specifying a KV v2 path.
options:
secret:
description: query you are making.
required: True
token:
description: vault token.
env:
- name: VAULT_TOKEN
url:
description: URL to vault service.
env:
- name: VAULT_ADDR
default: 'http://127.0.0.1:8200'
username:
description: Authentication user name.
password:
description: Authentication password.
role_id:
description: Role id for a vault AppRole auth.
env:
- name: VAULT_ROLE_ID
secret_id:
description: Secret id for a vault AppRole auth.
env:
- name: VAULT_SECRET_ID
auth_method:
description:
- Authentication method to be used.
- C(userpass) is added in version 2.8.
env:
- name: VAULT_AUTH_METHOD
choices:
- userpass
- ldap
- approle
mount_point:
description: vault mount point, only required if you have a custom mount point.
default: ldap
ca_cert:
description: path to certificate to use for authentication.
aliases: [ cacert ]
validate_certs:
description: controls verification and validation of SSL certificates, mostly you only want to turn off with self signed ones.
type: boolean
default: True
namespace:
version_added: "2.8"
description: namespace where secrets reside. requires HVAC 0.7.0+ and Vault 0.11+.
"""
EXAMPLES = """
- debug:
msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}"
- name: Return all secrets from a path
debug:
msg: "{{ lookup('hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}"
- name: Vault that requires authentication via LDAP
debug:
msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=ldap mount_point=ldap username=myuser password=mypas url=http://myvault:8200')}}"
- name: Vault that requires authentication via username and password
debug:
msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=userpass username=myuser password=mypas url=http://myvault:8200')}}"
- name: Using an ssl vault
debug:
msg: "{{ lookup('hashi_vault', 'secret=secret/hola:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=https://myvault:8200 validate_certs=False')}}"
- name: using certificate auth
debug:
msg: "{{ lookup('hashi_vault', 'secret=secret/hi:value token=xxxx-xxx-xxx url=https://myvault:8200 validate_certs=True cacert=/cacert/path/ca.pem')}}"
- name: authenticate with a Vault app role
debug:
msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=approle role_id=myroleid secret_id=mysecretid url=http://myvault:8200')}}"
- name: Return all secrets from a path in a namespace
debug:
msg: "{{ lookup('hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200 namespace=teama/admins')}}"
# When using KV v2 the PATH should include "data" between the secret engine mount and path (e.g. "secret/data/:path")
# see: https://www.vaultproject.io/api/secret/kv/kv-v2.html#read-secret-version
- name: Return latest KV v2 secret from path
debug:
msg: "{{ lookup('hashi_vault', 'secret=secret/data/hello token=my_vault_token url=http://myvault_url:8200') }}"
"""
RETURN = """
_raw:
description:
- secrets(s) requested
"""
import os
from ansible.errors import AnsibleError
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.lookup import LookupBase
HAS_HVAC = False
try:
import hvac
HAS_HVAC = True
except ImportError:
HAS_HVAC = False
ANSIBLE_HASHI_VAULT_ADDR = 'http://127.0.0.1:8200'
if os.getenv('VAULT_ADDR') is not None:
ANSIBLE_HASHI_VAULT_ADDR = os.environ['VAULT_ADDR']
class HashiVault:
def __init__(self, **kwargs):
self.url = kwargs.get('url', ANSIBLE_HASHI_VAULT_ADDR)
self.namespace = kwargs.get('namespace', None)
self.avail_auth_method = ['approle', 'userpass', 'ldap']
# split secret arg, which has format 'secret/hello:value' into secret='secret/hello' and secret_field='value'
s = kwargs.get('secret')
if s is None:
raise AnsibleError("No secret specified for hashi_vault lookup")
s_f = s.rsplit(':', 1)
self.secret = s_f[0]
if len(s_f) >= 2:
self.secret_field = s_f[1]
else:
self.secret_field = ''
self.verify = self.boolean_or_cacert(kwargs.get('validate_certs', True), kwargs.get('cacert', ''))
# If a particular backend is asked for (and its method exists) we call it, otherwise drop through to using
# token auth. This means if a particular auth backend is requested and a token is also given, then we
# ignore the token and attempt authentication against the specified backend.
#
# to enable a new auth backend, simply add a new 'def auth_<type>' method below.
#
self.auth_method = kwargs.get('auth_method', os.environ.get('VAULT_AUTH_METHOD'))
self.verify = self.boolean_or_cacert(kwargs.get('validate_certs', True), kwargs.get('cacert', ''))
if self.auth_method and self.auth_method != 'token':
try:
if self.namespace is not None:
self.client = hvac.Client(url=self.url, verify=self.verify, namespace=self.namespace)
else:
self.client = hvac.Client(url=self.url, verify=self.verify)
# prefixing with auth_ to limit which methods can be accessed
getattr(self, 'auth_' + self.auth_method)(**kwargs)
except AttributeError:
raise AnsibleError("Authentication method '%s' not supported."
" Available options are %r" % (self.auth_method, self.avail_auth_method))
else:
self.token = kwargs.get('token', os.environ.get('VAULT_TOKEN', None))
if self.token is None and os.environ.get('HOME'):
token_filename = os.path.join(
os.environ.get('HOME'),
'.vault-token'
)
if os.path.exists(token_filename):
with open(token_filename) as token_file:
self.token = token_file.read().strip()
if self.token is None:
raise AnsibleError("No Vault Token specified")
if self.namespace is not None:
self.client = hvac.Client(url=self.url, token=self.token, verify=self.verify, namespace=self.namespace)
else:
self.client = hvac.Client(url=self.url, token=self.token, verify=self.verify)
if not self.client.is_authenticated():
raise AnsibleError("Invalid Hashicorp Vault Token Specified for hashi_vault lookup")
def get(self):
data = self.client.read(self.secret)
# Check response for KV v2 fields and flatten nested secret data.
#
# https://vaultproject.io/api/secret/kv/kv-v2.html#sample-response-1
try:
# sentinel field checks
check_dd = data['data']['data']
check_md = data['data']['metadata']
# unwrap nested data
data = data['data']
except KeyError:
pass
if data is None:
raise AnsibleError("The secret %s doesn't seem to exist for hashi_vault lookup" % self.secret)
if self.secret_field == '':
return data['data']
if self.secret_field not in data['data']:
raise AnsibleError("The secret %s does not contain the field '%s'. for hashi_vault lookup" % (self.secret, self.secret_field))
return data['data'][self.secret_field]
def check_params(self, **kwargs):
username = kwargs.get('username')
if username is None:
raise AnsibleError("Authentication method %s requires a username" % self.auth_method)
password = kwargs.get('password')
if password is None:
raise AnsibleError("Authentication method %s requires a password" % self.auth_method)
mount_point = kwargs.get('mount_point')
return username, password, mount_point
def auth_userpass(self, **kwargs):
username, password, mount_point = self.check_params(**kwargs)
if mount_point is None:
mount_point = 'userpass'
self.client.auth_userpass(username, password, mount_point=mount_point)
def auth_ldap(self, **kwargs):
username, password, mount_point = self.check_params(**kwargs)
if mount_point is None:
mount_point = 'ldap'
self.client.auth.ldap.login(username, password, mount_point=mount_point)
def boolean_or_cacert(self, validate_certs, cacert):
validate_certs = boolean(validate_certs, strict=False)
'''' return a bool or cacert '''
if validate_certs is True:
if cacert != '':
return cacert
else:
return True
else:
return False
def auth_approle(self, **kwargs):
role_id = kwargs.get('role_id', os.environ.get('VAULT_ROLE_ID', None))
if role_id is None:
raise AnsibleError("Authentication method app role requires a role_id")
secret_id = kwargs.get('secret_id', os.environ.get('VAULT_SECRET_ID', None))
if secret_id is None:
raise AnsibleError("Authentication method app role requires a secret_id")
self.client.auth_approle(role_id, secret_id)
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if not HAS_HVAC:
raise AnsibleError("Please pip install hvac to use the hashi_vault lookup module.")
vault_args = terms[0].split()
vault_dict = {}
ret = []
for param in vault_args:
try:
key, value = param.split('=')
except ValueError:
raise AnsibleError("hashi_vault lookup plugin needs key=value pairs, but received %s" % terms)
vault_dict[key] = value
if 'ca_cert' in vault_dict.keys():
vault_dict['cacert'] = vault_dict['ca_cert']
vault_dict.pop('ca_cert', None)
vault_conn = HashiVault(**vault_dict)
for term in terms:
key = term.split()[0]
value = vault_conn.get()
ret.append(value)
return ret
| gpl-3.0 |
ajayuranakar/django-blog | lib/python2.7/site-packages/setuptools/tests/test_test.py | 286 | 3710 | # -*- coding: UTF-8 -*-
"""develop tests
"""
import sys
import os, shutil, tempfile, unittest
import tempfile
import site
from distutils.errors import DistutilsError
from setuptools.compat import StringIO
from setuptools.command.test import test
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo',
packages=['name', 'name.space', 'name.space.tests'],
namespace_packages=['name'],
test_suite='name.space.tests.test_suite',
)
"""
NS_INIT = """# -*- coding: Latin-1 -*-
# Söme Arbiträry Ünicode to test Issüé 310
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
"""
# Make sure this is Latin-1 binary, before writing:
if sys.version_info < (3,):
NS_INIT = NS_INIT.decode('UTF-8')
NS_INIT = NS_INIT.encode('Latin-1')
TEST_PY = """import unittest
class TestTest(unittest.TestCase):
def test_test(self):
print "Foo" # Should fail under Python 3 unless 2to3 is used
test_suite = unittest.makeSuite(TestTest)
"""
class TestTestTest(unittest.TestCase):
def setUp(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix'):
return
# Directory structure
self.dir = tempfile.mkdtemp()
os.mkdir(os.path.join(self.dir, 'name'))
os.mkdir(os.path.join(self.dir, 'name', 'space'))
os.mkdir(os.path.join(self.dir, 'name', 'space', 'tests'))
# setup.py
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'wt')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
# name/__init__.py
init = os.path.join(self.dir, 'name', '__init__.py')
f = open(init, 'wb')
f.write(NS_INIT)
f.close()
# name/space/__init__.py
init = os.path.join(self.dir, 'name', 'space', '__init__.py')
f = open(init, 'wt')
f.write('#empty\n')
f.close()
# name/space/tests/__init__.py
init = os.path.join(self.dir, 'name', 'space', 'tests', '__init__.py')
f = open(init, 'wt')
f.write(TEST_PY)
f.close()
os.chdir(self.dir)
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix'):
return
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_test(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix'):
return
dist = Distribution(dict(
name='foo',
packages=['name', 'name.space', 'name.space.tests'],
namespace_packages=['name'],
test_suite='name.space.tests.test_suite',
use_2to3=True,
))
dist.script_name = 'setup.py'
cmd = test(dist)
cmd.user = 1
cmd.ensure_finalized()
cmd.install_dir = site.USER_SITE
cmd.user = 1
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
try: # try/except/finally doesn't work in Python 2.4, so we need nested try-statements.
cmd.run()
except SystemExit: # The test runner calls sys.exit, stop that making an error.
pass
finally:
sys.stdout = old_stdout
| gpl-3.0 |
RanadeepPolavarapu/kuma | kuma/search/tests/test_serializers.py | 17 | 3169 | import mock
from nose.tools import ok_, eq_
from django.utils import translation
from kuma.wiki.search import WikiDocumentType
from . import ElasticTestCase
from ..fields import SearchQueryField, SiteURLField
from ..models import Filter, FilterGroup
from ..serializers import (DocumentSerializer, FilterSerializer,
FilterWithGroupSerializer)
class SerializerTests(ElasticTestCase):
fixtures = ElasticTestCase.fixtures + ['wiki/documents.json',
'search/filters.json']
def test_filter_serializer(self):
group = FilterGroup.objects.get(name='Group')
filter_ = Filter.objects.create(name='Serializer', slug='serializer',
group=group)
filter_.tags.add('tag')
filter_serializer = FilterWithGroupSerializer(filter_)
eq_(filter_serializer.data, {
'name': 'Serializer',
'slug': 'serializer',
'tags': ['tag'],
'operator': 'OR',
'group': {'name': 'Group', 'slug': 'group', 'order': 1},
'shortcut': None})
@mock.patch('kuma.search.serializers._')
def test_filter_serializer_with_translations(self, _mock):
_mock.return_value = u'Juegos'
translation.activate('es')
filter_ = Filter(name='Games', slug='games')
serializer = FilterSerializer(filter_)
eq_(serializer.data, {
'name': u'Juegos',
'slug': u'games',
'shortcut': None})
def test_document_serializer(self):
search = WikiDocumentType.search()
result = search.execute()
doc_serializer = DocumentSerializer(result, many=True)
list_data = doc_serializer.data
eq_(len(list_data), 7)
ok_(isinstance(list_data, list))
ok_(1 in [data['id'] for data in list_data])
doc_serializer = DocumentSerializer(result[0], many=False)
dict_data = doc_serializer.data
ok_(isinstance(dict_data, dict))
eq_(dict_data['id'], result[0].id)
def test_excerpt(self):
search = WikiDocumentType.search()
search = search.query('match', summary='CSS')
search = search.highlight(*WikiDocumentType.excerpt_fields)
result = search.execute()
data = DocumentSerializer(result, many=True).data
eq_(data[0]['excerpt'], u'A <em>CSS</em> article')
class FieldTests(ElasticTestCase):
def test_SearchQueryField(self):
request = self.get_request('/?q=test')
# APIRequestFactory doesn't actually return APIRequest objects
# but standard HttpRequest objects due to the way it initializes
# the request when APIViews are called
request.QUERY_PARAMS = request.GET
field = SearchQueryField()
field.context = {'request': request}
eq_(field.to_native(None), 'test')
def test_SiteURLField(self):
class FakeValue(object):
slug = 'Firefox'
locale = 'de'
field = SiteURLField('wiki.document', args=['slug'])
value = field.to_native(FakeValue())
ok_('/de/docs/Firefox' in value)
| mpl-2.0 |
gregdek/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualmachineimage_facts.py | 29 | 6807 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachineimage_facts
version_added: "2.1"
short_description: Get virtual machine image facts.
description:
- Get facts for virtual machine images.
options:
location:
description:
- Azure location value (ie. westus, eastus, eastus2, northcentralus, etc.). Supplying only a
location value will yield a list of available publishers for the location.
required: true
publisher:
description:
- Name of an image publisher. List image offerings associated with a particular publisher.
offer:
description:
- Name of an image offering. Combine with sku to see a list of available image versions.
sku:
description:
- Image offering SKU. Combine with offer to see a list of available versions.
version:
description:
- Specific version number of an image.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for a specific image
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
offer: CentOS
sku: '7.1'
version: '7.1.20160308'
- name: List available versions
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
offer: CentOS
sku: '7.1'
- name: List available offers
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
- name: List available publishers
azure_rm_virtualmachineimage_facts:
location: eastus
'''
RETURN = '''
azure_vmimages:
description: List of image dicts.
returned: always
type: list
example: []
'''
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
class AzureRMVirtualMachineImageFacts(AzureRMModuleBase):
def __init__(self, **kwargs):
self.module_arg_spec = dict(
location=dict(type='str', required=True),
publisher=dict(type='str'),
offer=dict(type='str'),
sku=dict(type='str'),
version=dict(type='str')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_vmimages=[])
)
self.location = None
self.publisher = None
self.offer = None
self.sku = None
self.version = None
super(AzureRMVirtualMachineImageFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.location and self.publisher and self.offer and self.sku and self.version:
self.results['ansible_facts']['azure_vmimages'] = self.get_item()
elif self.location and self.publisher and self.offer and self.sku:
self.results['ansible_facts']['azure_vmimages'] = self.list_images()
elif self.location and self.publisher:
self.results['ansible_facts']['azure_vmimages'] = self.list_offers()
elif self.location:
self.results['ansible_facts']['azure_vmimages'] = self.list_publishers()
return self.results
def get_item(self):
item = None
result = []
try:
item = self.compute_client.virtual_machine_images.get(self.location,
self.publisher,
self.offer,
self.sku,
self.version)
except CloudError:
pass
if item:
result = [self.serialize_obj(item, 'VirtualMachineImage', enum_modules=AZURE_ENUM_MODULES)]
return result
def list_images(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list(self.location,
self.publisher,
self.offer,
self.sku,)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list images: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def list_offers(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list_offers(self.location,
self.publisher)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list offers: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def list_publishers(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list_publishers(self.location)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list publishers: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def main():
AzureRMVirtualMachineImageFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
laiy/Database_Project | third_party/nltk/sem/drt.py | 3 | 50337 | # Natural Language Toolkit: Discourse Representation Theory (DRT)
#
# Author: Dan Garrette <dhgarrette@gmail.com>
#
# Copyright (C) 2001-2015 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
import operator
from functools import reduce
from nltk.compat import string_types, python_2_unicode_compatible
from nltk.sem.logic import (APP, AbstractVariableExpression, AllExpression,
AndExpression, ApplicationExpression, BinaryExpression,
BooleanExpression, ConstantExpression, EqualityExpression,
EventVariableExpression, ExistsExpression, Expression,
FunctionVariableExpression, ImpExpression,
IndividualVariableExpression, LambdaExpression, Tokens,
LogicParser, NegatedExpression, OrExpression, Variable,
is_eventvar, is_funcvar, is_indvar, unique_variable)
# Import Tkinter-based modules if they are available
try:
# imports are fixed for Python 2.x by nltk.compat
from tkinter import Canvas
from tkinter import Tk
from tkinter.font import Font
from nltk.util import in_idle
except ImportError:
# No need to print a warning here, nltk.draw has already printed one.
pass
class DrtTokens(Tokens):
DRS = 'DRS'
DRS_CONC = '+'
PRONOUN = 'PRO'
OPEN_BRACKET = '['
CLOSE_BRACKET = ']'
COLON = ':'
PUNCT = [DRS_CONC, OPEN_BRACKET, CLOSE_BRACKET, COLON]
SYMBOLS = Tokens.SYMBOLS + PUNCT
TOKENS = Tokens.TOKENS + [DRS] + PUNCT
class DrtParser(LogicParser):
"""A lambda calculus expression parser."""
def __init__(self):
LogicParser.__init__(self)
self.operator_precedence = dict(
[(x,1) for x in DrtTokens.LAMBDA_LIST] + \
[(x,2) for x in DrtTokens.NOT_LIST] + \
[(APP,3)] + \
[(x,4) for x in DrtTokens.EQ_LIST+Tokens.NEQ_LIST] + \
[(DrtTokens.COLON,5)] + \
[(DrtTokens.DRS_CONC,6)] + \
[(x,7) for x in DrtTokens.OR_LIST] + \
[(x,8) for x in DrtTokens.IMP_LIST] + \
[(None,9)])
def get_all_symbols(self):
"""This method exists to be overridden"""
return DrtTokens.SYMBOLS
def isvariable(self, tok):
return tok not in DrtTokens.TOKENS
def handle(self, tok, context):
"""This method is intended to be overridden for logics that
use different operators or expressions"""
if tok in DrtTokens.NOT_LIST:
return self.handle_negation(tok, context)
elif tok in DrtTokens.LAMBDA_LIST:
return self.handle_lambda(tok, context)
elif tok == DrtTokens.OPEN:
if self.inRange(0) and self.token(0) == DrtTokens.OPEN_BRACKET:
return self.handle_DRS(tok, context)
else:
return self.handle_open(tok, context)
elif tok.upper() == DrtTokens.DRS:
self.assertNextToken(DrtTokens.OPEN)
return self.handle_DRS(tok, context)
elif self.isvariable(tok):
if self.inRange(0) and self.token(0) == DrtTokens.COLON:
return self.handle_prop(tok, context)
else:
return self.handle_variable(tok, context)
def make_NegatedExpression(self, expression):
return DrtNegatedExpression(expression)
def handle_DRS(self, tok, context):
# a DRS
refs = self.handle_refs()
if self.inRange(0) and self.token(0) == DrtTokens.COMMA: #if there is a comma (it's optional)
self.token() # swallow the comma
conds = self.handle_conds(context)
self.assertNextToken(DrtTokens.CLOSE)
return DRS(refs, conds, None)
def handle_refs(self):
self.assertNextToken(DrtTokens.OPEN_BRACKET)
refs = []
while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET:
# Support expressions like: DRS([x y],C) == DRS([x,y],C)
if refs and self.token(0) == DrtTokens.COMMA:
self.token() # swallow the comma
refs.append(self.get_next_token_variable('quantified'))
self.assertNextToken(DrtTokens.CLOSE_BRACKET)
return refs
def handle_conds(self, context):
self.assertNextToken(DrtTokens.OPEN_BRACKET)
conds = []
while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET:
# Support expressions like: DRS([x y],C) == DRS([x, y],C)
if conds and self.token(0) == DrtTokens.COMMA:
self.token() # swallow the comma
conds.append(self.process_next_expression(context))
self.assertNextToken(DrtTokens.CLOSE_BRACKET)
return conds
def handle_prop(self, tok, context):
variable = self.make_VariableExpression(tok)
self.assertNextToken(':')
drs = self.process_next_expression(DrtTokens.COLON)
return DrtProposition(variable, drs)
def make_EqualityExpression(self, first, second):
"""This method serves as a hook for other logic parsers that
have different equality expression classes"""
return DrtEqualityExpression(first, second)
def get_BooleanExpression_factory(self, tok):
"""This method serves as a hook for other logic parsers that
have different boolean operators"""
if tok == DrtTokens.DRS_CONC:
return lambda first, second: DrtConcatenation(first, second, None)
elif tok in DrtTokens.OR_LIST:
return DrtOrExpression
elif tok in DrtTokens.IMP_LIST:
def make_imp_expression(first, second):
if isinstance(first, DRS):
return DRS(first.refs, first.conds, second)
if isinstance(first, DrtConcatenation):
return DrtConcatenation(first.first, first.second, second)
raise Exception('Antecedent of implication must be a DRS')
return make_imp_expression
else:
return None
def make_BooleanExpression(self, factory, first, second):
return factory(first, second)
def make_ApplicationExpression(self, function, argument):
return DrtApplicationExpression(function, argument)
def make_VariableExpression(self, name):
return DrtVariableExpression(Variable(name))
def make_LambdaExpression(self, variables, term):
return DrtLambdaExpression(variables, term)
class DrtExpression(object):
"""
This is the base abstract DRT Expression from which every DRT
Expression extends.
"""
_drt_parser = DrtParser()
@classmethod
def fromstring(cls, s):
return cls._drt_parser.parse(s)
def applyto(self, other):
return DrtApplicationExpression(self, other)
def __neg__(self):
return DrtNegatedExpression(self)
def __and__(self, other):
raise NotImplementedError()
def __or__(self, other):
assert isinstance(other, DrtExpression)
return DrtOrExpression(self, other)
def __gt__(self, other):
assert isinstance(other, DrtExpression)
if isinstance(self, DRS):
return DRS(self.refs, self.conds, other)
if isinstance(self, DrtConcatenation):
return DrtConcatenation(self.first, self.second, other)
raise Exception('Antecedent of implication must be a DRS')
def equiv(self, other, prover=None):
"""
Check for logical equivalence.
Pass the expression (self <-> other) to the theorem prover.
If the prover says it is valid, then the self and other are equal.
:param other: an ``DrtExpression`` to check equality against
:param prover: a ``nltk.inference.api.Prover``
"""
assert isinstance(other, DrtExpression)
f1 = self.simplify().fol();
f2 = other.simplify().fol();
return f1.equiv(f2, prover)
@property
def type(self):
raise AttributeError("'%s' object has no attribute 'type'" %
self.__class__.__name__)
def typecheck(self, signature=None):
raise NotImplementedError()
def __add__(self, other):
return DrtConcatenation(self, other, None)
def get_refs(self, recursive=False):
"""
Return the set of discourse referents in this DRS.
:param recursive: bool Also find discourse referents in subterms?
:return: list of ``Variable`` objects
"""
raise NotImplementedError()
def is_pronoun_function(self):
""" Is self of the form "PRO(x)"? """
return isinstance(self, DrtApplicationExpression) and \
isinstance(self.function, DrtAbstractVariableExpression) and \
self.function.variable.name == DrtTokens.PRONOUN and \
isinstance(self.argument, DrtIndividualVariableExpression)
def make_EqualityExpression(self, first, second):
return DrtEqualityExpression(first, second)
def make_VariableExpression(self, variable):
return DrtVariableExpression(variable)
def resolve_anaphora(self):
return resolve_anaphora(self)
def eliminate_equality(self):
return self.visit_structured(lambda e: e.eliminate_equality(),
self.__class__)
def pretty_format(self):
"""
Draw the DRS
:return: the pretty print string
"""
return '\n'.join(self._pretty())
def pretty_print(self):
print(self.pretty_format())
def draw(self):
DrsDrawer(self).draw()
@python_2_unicode_compatible
class DRS(DrtExpression, Expression):
"""A Discourse Representation Structure."""
def __init__(self, refs, conds, consequent=None):
"""
:param refs: list of ``DrtIndividualVariableExpression`` for the
discourse referents
:param conds: list of ``Expression`` for the conditions
"""
self.refs = refs
self.conds = conds
self.consequent = consequent
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
"""Replace all instances of variable v with expression E in self,
where v is free in self."""
if variable in self.refs:
#if a bound variable is the thing being replaced
if not replace_bound:
return self
else:
i = self.refs.index(variable)
if self.consequent:
consequent = self.consequent.replace(variable, expression, True, alpha_convert)
else:
consequent = None
return DRS(self.refs[:i]+[expression.variable]+self.refs[i+1:],
[cond.replace(variable, expression, True, alpha_convert)
for cond in self.conds],
consequent)
else:
if alpha_convert:
# any bound variable that appears in the expression must
# be alpha converted to avoid a conflict
for ref in (set(self.refs) & expression.free()):
newvar = unique_variable(ref)
newvarex = DrtVariableExpression(newvar)
i = self.refs.index(ref)
if self.consequent:
consequent = self.consequent.replace(ref, newvarex, True, alpha_convert)
else:
consequent = None
self = DRS(self.refs[:i]+[newvar]+self.refs[i+1:],
[cond.replace(ref, newvarex, True, alpha_convert)
for cond in self.conds],
consequent)
#replace in the conditions
if self.consequent:
consequent = self.consequent.replace(variable, expression, replace_bound, alpha_convert)
else:
consequent = None
return DRS(self.refs,
[cond.replace(variable, expression, replace_bound, alpha_convert)
for cond in self.conds],
consequent)
def free(self):
""":see: Expression.free()"""
conds_free = reduce(operator.or_, [c.free() for c in self.conds], set())
if self.consequent:
conds_free.update(self.consequent.free())
return conds_free - set(self.refs)
def get_refs(self, recursive=False):
""":see: AbstractExpression.get_refs()"""
if recursive:
conds_refs = self.refs + sum((c.get_refs(True) for c in self.conds), [])
if self.consequent:
conds_refs.extend(self.consequent.get_refs(True))
return conds_refs
else:
return self.refs
def visit(self, function, combinator):
""":see: Expression.visit()"""
parts = list(map(function, self.conds))
if self.consequent:
parts.append(function(self.consequent))
return combinator(parts)
def visit_structured(self, function, combinator):
""":see: Expression.visit_structured()"""
consequent = (function(self.consequent) if self.consequent else None)
return combinator(self.refs, list(map(function, self.conds)), consequent)
def eliminate_equality(self):
drs = self
i = 0
while i < len(drs.conds):
cond = drs.conds[i]
if isinstance(cond, EqualityExpression) and \
isinstance(cond.first, AbstractVariableExpression) and \
isinstance(cond.second, AbstractVariableExpression):
drs = DRS(list(set(drs.refs)-set([cond.second.variable])),
drs.conds[:i]+drs.conds[i+1:],
drs.consequent)
if cond.second.variable != cond.first.variable:
drs = drs.replace(cond.second.variable, cond.first, False, False)
i = 0
i -= 1
i += 1
conds = []
for cond in drs.conds:
new_cond = cond.eliminate_equality()
new_cond_simp = new_cond.simplify()
if not isinstance(new_cond_simp, DRS) or \
new_cond_simp.refs or new_cond_simp.conds or \
new_cond_simp.consequent:
conds.append(new_cond)
consequent = (drs.consequent.eliminate_equality() if drs.consequent else None)
return DRS(drs.refs, conds, consequent)
def fol(self):
if self.consequent:
accum = None
if self.conds:
accum = reduce(AndExpression, [c.fol() for c in self.conds])
if accum:
accum = ImpExpression(accum, self.consequent.fol())
else:
accum = self.consequent.fol()
for ref in self.refs[::-1]:
accum = AllExpression(ref, accum)
return accum
else:
if not self.conds:
raise Exception("Cannot convert DRS with no conditions to FOL.")
accum = reduce(AndExpression, [c.fol() for c in self.conds])
for ref in map(Variable, self._order_ref_strings(self.refs)[::-1]):
accum = ExistsExpression(ref, accum)
return accum
def _pretty(self):
refs_line = ' '.join(self._order_ref_strings(self.refs))
cond_lines = [cond for cond_line in [filter(lambda s: s.strip(), cond._pretty())
for cond in self.conds]
for cond in cond_line]
length = max([len(refs_line)] + list(map(len, cond_lines)))
drs = ([' _' + '_' * length + '_ ',
'| ' + refs_line.ljust(length) + ' |',
'|-' + '-' * length + '-|'] +
['| ' + line.ljust(length) + ' |' for line in cond_lines] +
['|_' + '_' * length + '_|'])
if self.consequent:
return DrtBinaryExpression._assemble_pretty(drs, DrtTokens.IMP,
self.consequent._pretty())
return drs
def _order_ref_strings(self, refs):
strings = ["%s" % ref for ref in refs]
ind_vars = []
func_vars = []
event_vars = []
other_vars = []
for s in strings:
if is_indvar(s):
ind_vars.append(s)
elif is_funcvar(s):
func_vars.append(s)
elif is_eventvar(s):
event_vars.append(s)
else:
other_vars.append(s)
return sorted(other_vars) + \
sorted(event_vars, key=lambda v: int([v[2:],-1][len(v[2:]) == 0])) + \
sorted(func_vars, key=lambda v: (v[0], int([v[1:],-1][len(v[1:])==0]))) + \
sorted(ind_vars, key=lambda v: (v[0], int([v[1:],-1][len(v[1:])==0])))
def __eq__(self, other):
r"""Defines equality modulo alphabetic variance.
If we are comparing \x.M and \y.N, then check equality of M and N[x/y]."""
if isinstance(other, DRS):
if len(self.refs) == len(other.refs):
converted_other = other
for (r1, r2) in zip(self.refs, converted_other.refs):
varex = self.make_VariableExpression(r1)
converted_other = converted_other.replace(r2, varex, True)
if self.consequent == converted_other.consequent and \
len(self.conds) == len(converted_other.conds):
for c1, c2 in zip(self.conds, converted_other.conds):
if not (c1 == c2):
return False
return True
return False
def __ne__(self, other):
return not self == other
__hash__ = Expression.__hash__
def __str__(self):
drs = '([%s],[%s])' % (','.join(self._order_ref_strings(self.refs)),
', '.join("%s" % cond for cond in self.conds)) # map(str, self.conds)))
if self.consequent:
return DrtTokens.OPEN + drs + ' ' + DrtTokens.IMP + ' ' + \
"%s" % self.consequent + DrtTokens.CLOSE
return drs
def DrtVariableExpression(variable):
"""
This is a factory method that instantiates and returns a subtype of
``DrtAbstractVariableExpression`` appropriate for the given variable.
"""
if is_indvar(variable.name):
return DrtIndividualVariableExpression(variable)
elif is_funcvar(variable.name):
return DrtFunctionVariableExpression(variable)
elif is_eventvar(variable.name):
return DrtEventVariableExpression(variable)
else:
return DrtConstantExpression(variable)
class DrtAbstractVariableExpression(DrtExpression, AbstractVariableExpression):
def fol(self):
return self
def get_refs(self, recursive=False):
""":see: AbstractExpression.get_refs()"""
return []
def _pretty(self):
s = "%s" % self
blank = ' '*len(s)
return [blank, blank, s, blank]
def eliminate_equality(self):
return self
class DrtIndividualVariableExpression(DrtAbstractVariableExpression, IndividualVariableExpression):
pass
class DrtFunctionVariableExpression(DrtAbstractVariableExpression, FunctionVariableExpression):
pass
class DrtEventVariableExpression(DrtIndividualVariableExpression, EventVariableExpression):
pass
class DrtConstantExpression(DrtAbstractVariableExpression, ConstantExpression):
pass
@python_2_unicode_compatible
class DrtProposition(DrtExpression, Expression):
def __init__(self, variable, drs):
self.variable = variable
self.drs = drs
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
if self.variable == variable:
assert isinstance(expression, DrtAbstractVariableExpression), "Can only replace a proposition label with a variable"
return DrtProposition(expression.variable, self.drs.replace(variable, expression, replace_bound, alpha_convert))
else:
return DrtProposition(self.variable, self.drs.replace(variable, expression, replace_bound, alpha_convert))
def eliminate_equality(self):
return DrtProposition(self.variable, self.drs.eliminate_equality())
def get_refs(self, recursive=False):
return (self.drs.get_refs(True) if recursive else [])
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.variable == other.variable and \
self.drs == other.drs
def __ne__(self, other):
return not self == other
__hash__ = Expression.__hash__
def fol(self):
return self.drs.fol()
def _pretty(self):
drs_s = self.drs._pretty()
blank = ' ' * len("%s" % self.variable)
return ([blank + ' ' + line for line in drs_s[:1]] +
["%s" % self.variable + ':' + line for line in drs_s[1:2]] +
[blank + ' ' + line for line in drs_s[2:]])
def visit(self, function, combinator):
""":see: Expression.visit()"""
return combinator([function(self.drs)])
def visit_structured(self, function, combinator):
""":see: Expression.visit_structured()"""
return combinator(self.variable, function(self.drs))
def __str__(self):
return 'prop(%s, %s)' % (self.variable, self.drs)
class DrtNegatedExpression(DrtExpression, NegatedExpression):
def fol(self):
return NegatedExpression(self.term.fol())
def get_refs(self, recursive=False):
""":see: AbstractExpression.get_refs()"""
return self.term.get_refs(recursive)
def _pretty(self):
term_lines = self.term._pretty()
return ([' ' + line for line in term_lines[:2]] +
['__ ' + line for line in term_lines[2:3]] +
[' | ' + line for line in term_lines[3:4]] +
[' ' + line for line in term_lines[4:]])
class DrtLambdaExpression(DrtExpression, LambdaExpression):
def alpha_convert(self, newvar):
"""Rename all occurrences of the variable introduced by this variable
binder in the expression to ``newvar``.
:param newvar: ``Variable``, for the new variable
"""
return self.__class__(newvar, self.term.replace(self.variable,
DrtVariableExpression(newvar), True))
def fol(self):
return LambdaExpression(self.variable, self.term.fol())
def _pretty(self):
variables = [self.variable]
term = self.term
while term.__class__ == self.__class__:
variables.append(term.variable)
term = term.term
var_string = ' '.join("%s" % v for v in variables) + DrtTokens.DOT
term_lines = term._pretty()
blank = ' ' * len(var_string)
return ([' ' + blank + line for line in term_lines[:1]] +
[' \ ' + blank + line for line in term_lines[1:2]] +
[' /\ ' + var_string + line for line in term_lines[2:3]] +
[' ' + blank + line for line in term_lines[3:]])
class DrtBinaryExpression(DrtExpression, BinaryExpression):
def get_refs(self, recursive=False):
""":see: AbstractExpression.get_refs()"""
return self.first.get_refs(True) + self.second.get_refs(True) if recursive else []
def _pretty(self):
return DrtBinaryExpression._assemble_pretty(self._pretty_subex(self.first), self.getOp(), self._pretty_subex(self.second))
@staticmethod
def _assemble_pretty(first_lines, op, second_lines):
max_lines = max(len(first_lines), len(second_lines))
first_lines = _pad_vertically(first_lines, max_lines)
second_lines = _pad_vertically(second_lines, max_lines)
blank = ' ' * len(op)
first_second_lines = list(zip(first_lines, second_lines))
return ([' ' + first_line + ' ' + blank + ' ' + second_line + ' ' for first_line, second_line in first_second_lines[:2]] +
['(' + first_line + ' ' + op + ' ' + second_line + ')' for first_line, second_line in first_second_lines[2:3]] +
[' ' + first_line + ' ' + blank + ' ' + second_line + ' ' for first_line, second_line in first_second_lines[3:]])
def _pretty_subex(self, subex):
return subex._pretty()
class DrtBooleanExpression(DrtBinaryExpression, BooleanExpression):
pass
class DrtOrExpression(DrtBooleanExpression, OrExpression):
def fol(self):
return OrExpression(self.first.fol(), self.second.fol())
def _pretty_subex(self, subex):
if isinstance(subex, DrtOrExpression):
return [line[1:-1] for line in subex._pretty()]
return DrtBooleanExpression._pretty_subex(self, subex)
class DrtEqualityExpression(DrtBinaryExpression, EqualityExpression):
def fol(self):
return EqualityExpression(self.first.fol(), self.second.fol())
@python_2_unicode_compatible
class DrtConcatenation(DrtBooleanExpression):
"""DRS of the form '(DRS + DRS)'"""
def __init__(self, first, second, consequent=None):
DrtBooleanExpression.__init__(self, first, second)
self.consequent = consequent
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
"""Replace all instances of variable v with expression E in self,
where v is free in self."""
first = self.first
second = self.second
consequent = self.consequent
# If variable is bound
if variable in self.get_refs():
if replace_bound:
first = first.replace(variable, expression, replace_bound, alpha_convert)
second = second.replace(variable, expression, replace_bound, alpha_convert)
if consequent:
consequent = consequent.replace(variable, expression, replace_bound, alpha_convert)
else:
if alpha_convert:
# alpha convert every ref that is free in 'expression'
for ref in (set(self.get_refs(True)) & expression.free()):
v = DrtVariableExpression(unique_variable(ref))
first = first.replace(ref, v, True, alpha_convert)
second = second.replace(ref, v, True, alpha_convert)
if consequent:
consequent = consequent.replace(ref, v, True, alpha_convert)
first = first.replace(variable, expression, replace_bound, alpha_convert)
second = second.replace(variable, expression, replace_bound, alpha_convert)
if consequent:
consequent = consequent.replace(variable, expression, replace_bound, alpha_convert)
return self.__class__(first, second, consequent)
def eliminate_equality(self):
#TODO: at some point. for now, simplify.
drs = self.simplify()
assert not isinstance(drs, DrtConcatenation)
return drs.eliminate_equality()
def simplify(self):
first = self.first.simplify()
second = self.second.simplify()
consequent = (self.consequent.simplify() if self.consequent else None)
if isinstance(first, DRS) and isinstance(second, DRS):
# For any ref that is in both 'first' and 'second'
for ref in (set(first.get_refs(True)) & set(second.get_refs(True))):
# alpha convert the ref in 'second' to prevent collision
newvar = DrtVariableExpression(unique_variable(ref))
second = second.replace(ref, newvar, True)
return DRS(first.refs + second.refs, first.conds + second.conds, consequent)
else:
return self.__class__(first, second, consequent)
def get_refs(self, recursive=False):
""":see: AbstractExpression.get_refs()"""
refs = self.first.get_refs(recursive) + self.second.get_refs(recursive)
if self.consequent and recursive:
refs.extend(self.consequent.get_refs(True))
return refs
def getOp(self):
return DrtTokens.DRS_CONC
def __eq__(self, other):
r"""Defines equality modulo alphabetic variance.
If we are comparing \x.M and \y.N, then check equality of M and N[x/y]."""
if isinstance(other, DrtConcatenation):
self_refs = self.get_refs()
other_refs = other.get_refs()
if len(self_refs) == len(other_refs):
converted_other = other
for (r1,r2) in zip(self_refs, other_refs):
varex = self.make_VariableExpression(r1)
converted_other = converted_other.replace(r2, varex, True)
return self.first == converted_other.first and \
self.second == converted_other.second and \
self.consequent == converted_other.consequent
return False
def __ne__(self, other):
return not self == other
__hash__ = DrtBooleanExpression.__hash__
def fol(self):
e = AndExpression(self.first.fol(), self.second.fol())
if self.consequent:
e = ImpExpression(e, self.consequent.fol())
return e
def _pretty(self):
drs = DrtBinaryExpression._assemble_pretty(self._pretty_subex(self.first),
self.getOp(),
self._pretty_subex(self.second))
if self.consequent:
drs = DrtBinaryExpression._assemble_pretty(drs, DrtTokens.IMP,
self._pretty(self.consequent))
return drs
def _pretty_subex(self, subex):
if isinstance(subex, DrtConcatenation):
return [line[1:-1] for line in subex._pretty()]
return DrtBooleanExpression._pretty_subex(self, subex)
def visit(self, function, combinator):
""":see: Expression.visit()"""
if self.consequent:
return combinator([function(self.first), function(self.second), function(self.consequent)])
else:
return combinator([function(self.first), function(self.second)])
def __str__(self):
first = self._str_subex(self.first)
second = self._str_subex(self.second)
drs = Tokens.OPEN + first + ' ' + self.getOp() \
+ ' ' + second + Tokens.CLOSE
if self.consequent:
return DrtTokens.OPEN + drs + ' ' + DrtTokens.IMP + ' ' + \
"%s" % self.consequent + DrtTokens.CLOSE
return drs
def _str_subex(self, subex):
s = "%s" % subex
if isinstance(subex, DrtConcatenation) and subex.consequent is None:
return s[1:-1]
return s
class DrtApplicationExpression(DrtExpression, ApplicationExpression):
def fol(self):
return ApplicationExpression(self.function.fol(), self.argument.fol())
def get_refs(self, recursive=False):
""":see: AbstractExpression.get_refs()"""
return (self.function.get_refs(True) + self.argument.get_refs(True)
if recursive else [])
def _pretty(self):
function, args = self.uncurry()
function_lines = function._pretty()
args_lines = [arg._pretty() for arg in args]
max_lines = max(map(len, [function_lines] + args_lines))
function_lines = _pad_vertically(function_lines, max_lines)
args_lines = [_pad_vertically(arg_lines, max_lines) for arg_lines in args_lines]
func_args_lines = list(zip(function_lines, list(zip(*args_lines))))
return ([func_line + ' ' + ' '.join(args_line) + ' ' for func_line, args_line in func_args_lines[:2]] +
[func_line + '(' + ','.join(args_line) + ')' for func_line, args_line in func_args_lines[2:3]] +
[func_line + ' ' + ' '.join(args_line) + ' ' for func_line, args_line in func_args_lines[3:]])
def _pad_vertically(lines, max_lines):
pad_line = [' ' * len(lines[0])]
return lines + pad_line * (max_lines - len(lines))
@python_2_unicode_compatible
class PossibleAntecedents(list, DrtExpression, Expression):
def free(self):
"""Set of free variables."""
return set(self)
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
"""Replace all instances of variable v with expression E in self,
where v is free in self."""
result = PossibleAntecedents()
for item in self:
if item == variable:
self.append(expression)
else:
self.append(item)
return result
def _pretty(self):
s = "%s" % self
blank = ' ' * len(s)
return [blank, blank, s]
def __str__(self):
return '[' + ','.join("%s" % it for it in self) + ']'
class AnaphoraResolutionException(Exception):
pass
def resolve_anaphora(expression, trail=[]):
if isinstance(expression, ApplicationExpression):
if expression.is_pronoun_function():
possible_antecedents = PossibleAntecedents()
for ancestor in trail:
for ref in ancestor.get_refs():
refex = expression.make_VariableExpression(ref)
#==========================================================
# Don't allow resolution to itself or other types
#==========================================================
if refex.__class__ == expression.argument.__class__ and \
not (refex == expression.argument):
possible_antecedents.append(refex)
if len(possible_antecedents) == 1:
resolution = possible_antecedents[0]
else:
resolution = possible_antecedents
return expression.make_EqualityExpression(expression.argument, resolution)
else:
r_function = resolve_anaphora(expression.function, trail + [expression])
r_argument = resolve_anaphora(expression.argument, trail + [expression])
return expression.__class__(r_function, r_argument)
elif isinstance(expression, DRS):
r_conds = []
for cond in expression.conds:
r_cond = resolve_anaphora(cond, trail + [expression])
# if the condition is of the form '(x = [])' then raise exception
if isinstance(r_cond, EqualityExpression):
if isinstance(r_cond.first, PossibleAntecedents):
#Reverse the order so that the variable is on the left
temp = r_cond.first
r_cond.first = r_cond.second
r_cond.second = temp
if isinstance(r_cond.second, PossibleAntecedents):
if not r_cond.second:
raise AnaphoraResolutionException("Variable '%s' does not "
"resolve to anything." % r_cond.first)
r_conds.append(r_cond)
if expression.consequent:
consequent = resolve_anaphora(expression.consequent, trail + [expression])
else:
consequent = None
return expression.__class__(expression.refs, r_conds, consequent)
elif isinstance(expression, AbstractVariableExpression):
return expression
elif isinstance(expression, NegatedExpression):
return expression.__class__(resolve_anaphora(expression.term, trail + [expression]))
elif isinstance(expression, DrtConcatenation):
if expression.consequent:
consequent = resolve_anaphora(expression.consequent, trail + [expression])
else:
consequent = None
return expression.__class__(resolve_anaphora(expression.first, trail + [expression]),
resolve_anaphora(expression.second, trail + [expression]),
consequent)
elif isinstance(expression, BinaryExpression):
return expression.__class__(resolve_anaphora(expression.first, trail + [expression]),
resolve_anaphora(expression.second, trail + [expression]))
elif isinstance(expression, LambdaExpression):
return expression.__class__(expression.variable, resolve_anaphora(expression.term, trail + [expression]))
class DrsDrawer(object):
BUFFER = 3 #Space between elements
TOPSPACE = 10 #Space above whole DRS
OUTERSPACE = 6 #Space to the left, right, and bottom of the whle DRS
def __init__(self, drs, size_canvas=True, canvas=None):
"""
:param drs: ``DrtExpression``, The DRS to be drawn
:param size_canvas: bool, True if the canvas size should be the exact size of the DRS
:param canvas: ``Canvas`` The canvas on which to draw the DRS. If none is given, create a new canvas.
"""
master = None
if not canvas:
master = Tk()
master.title("DRT")
font = Font(family='helvetica', size=12)
if size_canvas:
canvas = Canvas(master, width=0, height=0)
canvas.font = font
self.canvas = canvas
(right, bottom) = self._visit(drs, self.OUTERSPACE, self.TOPSPACE)
width = max(right+self.OUTERSPACE, 100)
height = bottom+self.OUTERSPACE
canvas = Canvas(master, width=width, height=height)#, bg='white')
else:
canvas = Canvas(master, width=300, height=300)
canvas.pack()
canvas.font = font
self.canvas = canvas
self.drs = drs
self.master = master
def _get_text_height(self):
"""Get the height of a line of text"""
return self.canvas.font.metrics("linespace")
def draw(self, x=OUTERSPACE, y=TOPSPACE):
"""Draw the DRS"""
self._handle(self.drs, self._draw_command, x, y)
if self.master and not in_idle():
self.master.mainloop()
else:
return self._visit(self.drs, x, y)
def _visit(self, expression, x, y):
"""
Return the bottom-rightmost point without actually drawing the item
:param expression: the item to visit
:param x: the top of the current drawing area
:param y: the left side of the current drawing area
:return: the bottom-rightmost point
"""
return self._handle(expression, self._visit_command, x, y)
def _draw_command(self, item, x, y):
"""
Draw the given item at the given location
:param item: the item to draw
:param x: the top of the current drawing area
:param y: the left side of the current drawing area
:return: the bottom-rightmost point
"""
if isinstance(item, string_types):
self.canvas.create_text(x, y, anchor='nw', font=self.canvas.font, text=item)
elif isinstance(item, tuple):
# item is the lower-right of a box
(right, bottom) = item
self.canvas.create_rectangle(x, y, right, bottom)
horiz_line_y = y + self._get_text_height() + (self.BUFFER * 2) #the line separating refs from conds
self.canvas.create_line(x, horiz_line_y, right, horiz_line_y)
return self._visit_command(item, x, y)
def _visit_command(self, item, x, y):
"""
Return the bottom-rightmost point without actually drawing the item
:param item: the item to visit
:param x: the top of the current drawing area
:param y: the left side of the current drawing area
:return: the bottom-rightmost point
"""
if isinstance(item, string_types):
return (x + self.canvas.font.measure(item), y + self._get_text_height())
elif isinstance(item, tuple):
return item
def _handle(self, expression, command, x=0, y=0):
"""
:param expression: the expression to handle
:param command: the function to apply, either _draw_command or _visit_command
:param x: the top of the current drawing area
:param y: the left side of the current drawing area
:return: the bottom-rightmost point
"""
if command == self._visit_command:
#if we don't need to draw the item, then we can use the cached values
try:
#attempt to retrieve cached values
right = expression._drawing_width + x
bottom = expression._drawing_height + y
return (right, bottom)
except AttributeError:
#the values have not been cached yet, so compute them
pass
if isinstance(expression, DrtAbstractVariableExpression):
factory = self._handle_VariableExpression
elif isinstance(expression, DRS):
factory = self._handle_DRS
elif isinstance(expression, DrtNegatedExpression):
factory = self._handle_NegatedExpression
elif isinstance(expression, DrtLambdaExpression):
factory = self._handle_LambdaExpression
elif isinstance(expression, BinaryExpression):
factory = self._handle_BinaryExpression
elif isinstance(expression, DrtApplicationExpression):
factory = self._handle_ApplicationExpression
elif isinstance(expression, PossibleAntecedents):
factory = self._handle_VariableExpression
elif isinstance(expression, DrtProposition):
factory = self._handle_DrtProposition
else:
raise Exception(expression.__class__.__name__)
(right, bottom) = factory(expression, command, x, y)
#cache the values
expression._drawing_width = right - x
expression._drawing_height = bottom - y
return (right, bottom)
def _handle_VariableExpression(self, expression, command, x, y):
return command("%s" % expression, x, y)
def _handle_NegatedExpression(self, expression, command, x, y):
# Find the width of the negation symbol
right = self._visit_command(DrtTokens.NOT, x, y)[0]
# Handle term
(right, bottom) = self._handle(expression.term, command, right, y)
# Handle variables now that we know the y-coordinate
command(DrtTokens.NOT, x, self._get_centered_top(y, bottom - y, self._get_text_height()))
return (right, bottom)
def _handle_DRS(self, expression, command, x, y):
left = x + self.BUFFER #indent the left side
bottom = y + self.BUFFER #indent the top
# Handle Discourse Referents
if expression.refs:
refs = ' '.join("%s"%r for r in expression.refs)
else:
refs = ' '
(max_right, bottom) = command(refs, left, bottom)
bottom += (self.BUFFER * 2)
# Handle Conditions
if expression.conds:
for cond in expression.conds:
(right, bottom) = self._handle(cond, command, left, bottom)
max_right = max(max_right, right)
bottom += self.BUFFER
else:
bottom += self._get_text_height() + self.BUFFER
# Handle Box
max_right += self.BUFFER
return command((max_right, bottom), x, y)
def _handle_ApplicationExpression(self, expression, command, x, y):
function, args = expression.uncurry()
if not isinstance(function, DrtAbstractVariableExpression):
#It's not a predicate expression ("P(x,y)"), so leave arguments curried
function = expression.function
args = [expression.argument]
# Get the max bottom of any element on the line
function_bottom = self._visit(function, x, y)[1]
max_bottom = max([function_bottom] + [self._visit(arg, x, y)[1] for arg in args])
line_height = max_bottom - y
# Handle 'function'
function_drawing_top = self._get_centered_top(y, line_height, function._drawing_height)
right = self._handle(function, command, x, function_drawing_top)[0]
# Handle open paren
centred_string_top = self._get_centered_top(y, line_height, self._get_text_height())
right = command(DrtTokens.OPEN, right, centred_string_top)[0]
# Handle each arg
for (i,arg) in enumerate(args):
arg_drawing_top = self._get_centered_top(y, line_height, arg._drawing_height)
right = self._handle(arg, command, right, arg_drawing_top)[0]
if i+1 < len(args):
#since it's not the last arg, add a comma
right = command(DrtTokens.COMMA + ' ', right, centred_string_top)[0]
# Handle close paren
right = command(DrtTokens.CLOSE, right, centred_string_top)[0]
return (right, max_bottom)
def _handle_LambdaExpression(self, expression, command, x, y):
# Find the width of the lambda symbol and abstracted variables
variables = DrtTokens.LAMBDA + "%s" % expression.variable + DrtTokens.DOT
right = self._visit_command(variables, x, y)[0]
# Handle term
(right, bottom) = self._handle(expression.term, command, right, y)
# Handle variables now that we know the y-coordinate
command(variables, x, self._get_centered_top(y, bottom - y, self._get_text_height()))
return (right, bottom)
def _handle_BinaryExpression(self, expression, command, x, y):
# Get the full height of the line, based on the operands
first_height = self._visit(expression.first, 0, 0)[1]
second_height = self._visit(expression.second, 0, 0)[1]
line_height = max(first_height, second_height)
# Handle open paren
centred_string_top = self._get_centered_top(y, line_height, self._get_text_height())
right = command(DrtTokens.OPEN, x, centred_string_top)[0]
# Handle the first operand
first_height = expression.first._drawing_height
(right, first_bottom) = self._handle(expression.first, command, right, self._get_centered_top(y, line_height, first_height))
# Handle the operator
right = command(' %s ' % expression.getOp(), right, centred_string_top)[0]
# Handle the second operand
second_height = expression.second._drawing_height
(right, second_bottom) = self._handle(expression.second, command, right, self._get_centered_top(y, line_height, second_height))
# Handle close paren
right = command(DrtTokens.CLOSE, right, centred_string_top)[0]
return (right, max(first_bottom, second_bottom))
def _handle_DrtProposition(self, expression, command, x, y):
# Find the width of the negation symbol
right = command(expression.variable, x, y)[0]
# Handle term
(right, bottom) = self._handle(expression.term, command, right, y)
return (right, bottom)
def _get_centered_top(self, top, full_height, item_height):
"""Get the y-coordinate of the point that a figure should start at if
its height is 'item_height' and it needs to be centered in an area that
starts at 'top' and is 'full_height' tall."""
return top + (full_height - item_height) / 2
def demo():
print('='*20 + 'TEST PARSE' + '='*20)
dexpr = DrtExpression.fromstring
print(dexpr(r'([x,y],[sees(x,y)])'))
print(dexpr(r'([x],[man(x), walks(x)])'))
print(dexpr(r'\x.\y.([],[sees(x,y)])'))
print(dexpr(r'\x.([],[walks(x)])(john)'))
print(dexpr(r'(([x],[walks(x)]) + ([y],[runs(y)]))'))
print(dexpr(r'(([],[walks(x)]) -> ([],[runs(x)]))'))
print(dexpr(r'([x],[PRO(x), sees(John,x)])'))
print(dexpr(r'([x],[man(x), -([],[walks(x)])])'))
print(dexpr(r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])'))
print('='*20 + 'Test fol()' + '='*20)
print(dexpr(r'([x,y],[sees(x,y)])').fol())
print('='*20 + 'Test alpha conversion and lambda expression equality' + '='*20)
e1 = dexpr(r'\x.([],[P(x)])')
print(e1)
e2 = e1.alpha_convert(Variable('z'))
print(e2)
print(e1 == e2)
print('='*20 + 'Test resolve_anaphora()' + '='*20)
print(resolve_anaphora(dexpr(r'([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])')))
print(resolve_anaphora(dexpr(r'([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])')))
print(resolve_anaphora(dexpr(r'(([x,y],[]) + ([],[PRO(x)]))')))
print('='*20 + 'Test pprint()' + '='*20)
dexpr(r"([],[])").pprint()
dexpr(r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])").pprint()
dexpr(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pprint()
dexpr(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pprint()
dexpr(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pprint()
def test_draw():
try:
from tkinter import Tk
except ImportError:
from nose import SkipTest
raise SkipTest("tkinter is required, but it's not available.")
expressions = [
r'x',
r'([],[])',
r'([x],[])',
r'([x],[man(x)])',
r'([x,y],[sees(x,y)])',
r'([x],[man(x), walks(x)])',
r'\x.([],[man(x), walks(x)])',
r'\x y.([],[sees(x,y)])',
r'([],[(([],[walks(x)]) + ([],[runs(x)]))])',
r'([x],[man(x), -([],[walks(x)])])',
r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])'
]
for e in expressions:
d = DrtExpression.fromstring(e)
d.draw()
if __name__ == '__main__':
demo()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.