repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
h2educ/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
ttm/oscEmRede | venv/lib/python2.7/site-packages/networkx/readwrite/adjlist.py | 28 | 8490 | # -*- coding: utf-8 -*-
"""
**************
Adjacency List
**************
Read and write NetworkX graphs as adjacency lists.
Adjacency list format is useful for graphs without data associated
with nodes or edges and for nodes that can be meaningfully represented
as strings.
Format
------
The adjacency list format consists of lines with node labels. The
first label in a line is the source node. Further labels in the line
are considered target nodes and are added to the graph along with an edge
between the source node and target node.
The graph with edges a-b, a-c, d-e can be represented as the following
adjacency list (anything following the # in a line is a comment)::
a b c # source target target
d e
"""
__author__ = '\n'.join(['Aric Hagberg <hagberg@lanl.gov>',
'Dan Schult <dschult@colgate.edu>',
'Loïc Séguin-C. <loicseguin@gmail.com>'])
# Copyright (C) 2004-2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['generate_adjlist',
'write_adjlist',
'parse_adjlist',
'read_adjlist']
from networkx.utils import make_str, open_file
import networkx as nx
def generate_adjlist(G, delimiter = ' '):
"""Generate a single line of the graph G in adjacency list format.
Parameters
----------
G : NetworkX graph
delimiter : string, optional
Separator for node labels
Returns
-------
lines : string
Lines of data in adjlist format.
Examples
--------
>>> G = nx.lollipop_graph(4, 3)
>>> for line in nx.generate_adjlist(G):
... print(line)
0 1 2 3
1 2 3
2 3
3 4
4 5
5 6
6
See Also
--------
write_adjlist, read_adjlist
"""
directed=G.is_directed()
seen=set()
for s,nbrs in G.adjacency_iter():
line = make_str(s)+delimiter
for t,data in nbrs.items():
if not directed and t in seen:
continue
if G.is_multigraph():
for d in data.values():
line += make_str(t) + delimiter
else:
line += make_str(t) + delimiter
if not directed:
seen.add(s)
yield line[:-len(delimiter)]
@open_file(1,mode='wb')
def write_adjlist(G, path, comments="#", delimiter=' ', encoding = 'utf-8'):
"""Write graph G in single-line adjacency-list format to path.
Parameters
----------
G : NetworkX graph
path : string or file
Filename or file handle for data output.
Filenames ending in .gz or .bz2 will be compressed.
comments : string, optional
Marker for comment lines
delimiter : string, optional
Separator for node labels
encoding : string, optional
Text encoding.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_adjlist(G,"test.adjlist")
The path can be a filehandle or a string with the name of the file. If a
filehandle is provided, it has to be opened in 'wb' mode.
>>> fh=open("test.adjlist",'wb')
>>> nx.write_adjlist(G, fh)
Notes
-----
This format does not store graph, node, or edge data.
See Also
--------
read_adjlist, generate_adjlist
"""
import sys
import time
pargs=comments + " ".join(sys.argv) + '\n'
header = (pargs
+ comments + " GMT %s\n" % (time.asctime(time.gmtime()))
+ comments + " %s\n" % (G.name))
path.write(header.encode(encoding))
for line in generate_adjlist(G, delimiter):
line+='\n'
path.write(line.encode(encoding))
def parse_adjlist(lines, comments = '#', delimiter = None,
create_using = None, nodetype = None):
"""Parse lines of a graph adjacency list representation.
Parameters
----------
lines : list or iterator of strings
Input data in adjlist format
create_using: NetworkX graph container
Use given NetworkX graph for holding nodes or edges.
nodetype : Python type, optional
Convert nodes to this type.
comments : string, optional
Marker for comment lines
delimiter : string, optional
Separator for node labels. The default is whitespace.
create_using: NetworkX graph container
Use given NetworkX graph for holding nodes or edges.
Returns
-------
G: NetworkX graph
The graph corresponding to the lines in adjacency list format.
Examples
--------
>>> lines = ['1 2 5',
... '2 3 4',
... '3 5',
... '4',
... '5']
>>> G = nx.parse_adjlist(lines, nodetype = int)
>>> G.nodes()
[1, 2, 3, 4, 5]
>>> G.edges()
[(1, 2), (1, 5), (2, 3), (2, 4), (3, 5)]
See Also
--------
read_adjlist
"""
if create_using is None:
G=nx.Graph()
else:
try:
G=create_using
G.clear()
except:
raise TypeError("Input graph is not a NetworkX graph type")
for line in lines:
p=line.find(comments)
if p>=0:
line = line[:p]
if not len(line):
continue
vlist=line.strip().split(delimiter)
u=vlist.pop(0)
# convert types
if nodetype is not None:
try:
u=nodetype(u)
except:
raise TypeError("Failed to convert node (%s) to type %s"\
%(u,nodetype))
G.add_node(u)
if nodetype is not None:
try:
vlist=map(nodetype,vlist)
except:
raise TypeError("Failed to convert nodes (%s) to type %s"\
%(','.join(vlist),nodetype))
G.add_edges_from([(u, v) for v in vlist])
return G
@open_file(0,mode='rb')
def read_adjlist(path, comments="#", delimiter=None, create_using=None,
nodetype=None, encoding = 'utf-8'):
"""Read graph in adjacency list format from path.
Parameters
----------
path : string or file
Filename or file handle to read.
Filenames ending in .gz or .bz2 will be uncompressed.
create_using: NetworkX graph container
Use given NetworkX graph for holding nodes or edges.
nodetype : Python type, optional
Convert nodes to this type.
comments : string, optional
Marker for comment lines
delimiter : string, optional
Separator for node labels. The default is whitespace.
create_using: NetworkX graph container
Use given NetworkX graph for holding nodes or edges.
Returns
-------
G: NetworkX graph
The graph corresponding to the lines in adjacency list format.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_adjlist(G, "test.adjlist")
>>> G=nx.read_adjlist("test.adjlist")
The path can be a filehandle or a string with the name of the file. If a
filehandle is provided, it has to be opened in 'rb' mode.
>>> fh=open("test.adjlist", 'rb')
>>> G=nx.read_adjlist(fh)
Filenames ending in .gz or .bz2 will be compressed.
>>> nx.write_adjlist(G,"test.adjlist.gz")
>>> G=nx.read_adjlist("test.adjlist.gz")
The optional nodetype is a function to convert node strings to nodetype.
For example
>>> G=nx.read_adjlist("test.adjlist", nodetype=int)
will attempt to convert all nodes to integer type.
Since nodes must be hashable, the function nodetype must return hashable
types (e.g. int, float, str, frozenset - or tuples of those, etc.)
The optional create_using parameter is a NetworkX graph container.
The default is Graph(), an undirected graph. To read the data as
a directed graph use
>>> G=nx.read_adjlist("test.adjlist", create_using=nx.DiGraph())
Notes
-----
This format does not store graph or node data.
See Also
--------
write_adjlist
"""
lines = (line.decode(encoding) for line in path)
return parse_adjlist(lines,
comments = comments,
delimiter = delimiter,
create_using = create_using,
nodetype = nodetype)
# fixture for nose tests
def teardown_module(module):
import os
os.unlink('test.adjlist')
os.unlink('test.adjlist.gz')
| gpl-3.0 |
xin3liang/platform_external_chromium_org | tools/site_compare/commands/scrape.py | 189 | 1832 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command for scraping images from a URL or list of URLs.
Prerequisites:
1. The command_line package from tools/site_compare
2. Either the IE BHO or Firefox extension (or both)
Installation:
1. Build the IE BHO, or call regsvr32 on a prebuilt binary
2. Add a file called "measurepageloadtimeextension@google.com" to
the default Firefox profile directory under extensions, containing
the path to the Firefox extension root
Invoke with the command line arguments as documented within
the command line.
"""
import command_line
from drivers import windowing
from utils import browser_iterate
def CreateCommand(cmdline):
"""Inserts the command and arguments into a command line for parsing."""
cmd = cmdline.AddCommand(
["scrape"],
"Scrapes an image from a URL or series of URLs.",
None,
ExecuteScrape)
browser_iterate.SetupIterationCommandLine(cmd)
cmd.AddArgument(
["-log", "--logfile"], "File to write text output", type="string")
cmd.AddArgument(
["-out", "--outdir"], "Directory to store scrapes", type="string", required=True)
def ExecuteScrape(command):
"""Executes the Scrape command."""
def ScrapeResult(url, proc, wnd, result):
"""Capture and save the scrape."""
if log_file: log_file.write(result)
# Scrape the page
image = windowing.ScrapeWindow(wnd)
filename = windowing.URLtoFilename(url, command["--outdir"], ".bmp")
image.save(filename)
if command["--logfile"]: log_file = open(command["--logfile"], "w")
else: log_file = None
browser_iterate.Iterate(command, ScrapeResult)
# Close the log file and return. We're done.
if log_file: log_file.close()
| bsd-3-clause |
suitai/MyTweetApp | bin/tweet-download-media.py | 1 | 1697 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import getopt
import cuitweet
import download
### Functions
def check_optlist(optlist):
option = {}
option_keys = {
"-c": "count",
"-s": "since_id",
"-m": "max_id",
"-o": "out_dir",
"-k": "key_file"
}
for opt, arg in optlist:
if opt in option_keys.keys():
option[option_keys[opt]] = arg
else:
assert False, "unhandled option"
return option
def tweet_get_favorite_media(args, optlist):
opt = check_optlist(optlist)
out_dir = opt['out_dir'] if opt.has_key('out_dir') else "./"
key_file = opt['key_file'] if opt.has_key('key_file') else ("tweet_keys.yaml")
Tw = cuitweet.Tweet(key_file)
tweets = Tw.get_favorite(opt)
num = len(tweets)
first_id = tweets[0][u'id']
last_id = tweets[-1][u'id']
for tw in tweets:
print "<https://twitter.com/%s/status/%d>" % (tw[u'user'][u'screen_name'], tw[u'id'])
if "media" in tw[u'entities']:
for media in tw[u'entities'][u'media']:
try:
download.download(media[u'media_url'], out_dir)
except download.DownloadError as detail:
print detail
print ""
return num, first_id, last_id
def main():
raw_args = sys.argv
try:
optlist, args = getopt.getopt(raw_args[1:], 'c:k:m:o:s:')
except getopt.GetoptError as detail:
sys.exit("GetoptError: %s" % detail)
args.insert(0, raw_args[0])
num, first_id, last_id = tweet_get_favorite_media(args, optlist)
### Execute
if __name__ == "__main__":
main()
| mit |
PeterDing/iScript | yunpan.360.cn.py | 19 | 12433 | #!/usr/bin/env python2
# vim: set fileencoding=utf8
import os
import sys
from getpass import getpass
import requests
import urllib
import json
import re
import time
import argparse
import random
import md5
############################################################
# wget exit status
wget_es = {
0: "No problems occurred.",
2: "User interference.",
1<<8: "Generic error code.",
2<<8: "Parse error - for instance, when parsing command-line " \
"optio.wgetrc or .netrc...",
3<<8: "File I/O error.",
4<<8: "Network failure.",
5<<8: "SSL verification failure.",
6<<8: "Username/password authentication failure.",
7<<8: "Protocol errors.",
8<<8: "Server issued an error response."
}
############################################################
s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template
cookie_file = os.path.join(os.path.expanduser('~'), '.360.cookies')
headers = {
"Accept":"text/html,application/xhtml+xml,application/xml; " \
"q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding":"text/html",
"Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2",
"Content-Type":"application/x-www-form-urlencoded",
"Referer":"http://yunpan.360.cn/",
"X-Requested-With":"XMLHttpRequest",
"User-Agent":"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 "\
"(KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36"
}
ss = requests.session()
ss.headers.update(headers)
class yunpan360(object):
def init(self):
if os.path.exists(cookie_file):
try:
t = json.loads(open(cookie_file).read())
ss.cookies.update(t.get('cookies', t))
if not self.check_login():
print s % (1, 91, ' !! cookie is invalid, please login\n')
sys.exit(1)
except:
g = open(cookie_file, 'w')
g.close()
print s % (1, 97, ' please login')
sys.exit(1)
else:
print s % (1, 91, ' !! cookie_file is missing, please login')
sys.exit(1)
def get_path(self, url):
url = urllib.unquote_plus(url)
f = re.search(r'#(.+?)(&|$)', url)
if f:
return f.group(1)
else:
return '/'
def check_login(self):
#print s % (1, 97, '\n -- check_login')
url = 'http://yunpan.360.cn/user/login?st=774'
r = ss.get(url)
self.save_cookies()
if r.ok:
#print s % (1, 92, ' -- check_login success\n')
# get apihost
self.apihost = re.search(r'http://(.+?)/', r.url).group(1).encode('utf8')
self.save_cookies()
return True
else:
print s % (1, 91, ' -- check_login fail\n')
return False
def login(self, username, password):
print s % (1, 97, '\n -- login')
# get token
params = {
"o": "sso",
"m": "getToken",
"func": "QHPass.loginUtils.tokenCallback",
"userName": username,
"rand": random.random()
}
url = 'https://login.360.cn'
r = ss.get(url, params=params)
token = re.search(r'token":"(.+?)"', r.content).group(1)
# now loin
params = {
"o": "sso",
"m": "login",
"requestScema": "http",
"from": "pcw_cloud",
"rtype": "data",
"func": "QHPass.loginUtils.loginCallback",
"userName": username,
"pwdmethod": 1,
"isKeepAlive": 0,
"token": token,
"captFlag": 1,
"captId": "i360",
"captCode": "",
"lm": 0,
"validatelm": 0,
"password": md5.new(password).hexdigest(),
"r": int(time.time()*1000)
}
url = 'https://login.360.cn'
ss.get(url, params=params)
self.save_cookies()
def save_cookies(self):
with open(cookie_file, 'w') as g:
c = {'cookies': ss.cookies.get_dict()}
g.write(json.dumps(c, indent=4, sort_keys=True))
def get_dlink(self, i):
data = 'nid=%s&fname=%s&' % (i['nid'].encode('utf8'), \
urllib.quote_plus(i['path'].encode('utf8')))
apiurl = 'http://%s/file/download' % self.apihost
r = ss.post(apiurl, data=data)
j = r.json()
if j['errno'] == 0:
dlink = j['data']['download_url'].encode('utf8')
return dlink
def fix_json(self, ori):
# 万恶的 360,返回的json尽然不合法。
jdata = re.search(r'data:\s*\[.+?\]', ori).group()
jlist = re.split(r'\}\s*,\s*\{', jdata)
jlist = [l for l in jlist if l.strip()]
j = []
for item in jlist:
nid = re.search(r',nid: \'(\d+)\'', item)
path = re.search(r',path: \'(.+?)\',nid', item)
name = re.search(r'oriName: \'(.+?)\',path', item)
isdir = 'isDir: ' in item
if nid:
t = {
'nid': nid.group(1),
'path': path.group(1).replace("\\'", "'"),
'name': name.group(1).replace("\\'", "'"),
'isdir': 1 if isdir else 0
}
j.append(t)
return j
def get_infos(self):
apiurl = 'http://%s/file/list' % self.apihost
data = "type" + "=2" + "&" \
"t" + "=%s" % random.random() + "&" \
"order" + "=asc" + "&" \
"field" + "=file_name" + "&" \
"path" + "=%s" + "&" \
"page" + "=0" + "&" \
"page_size" + "=10000" + "&" \
"ajax" + "=1"
dir_loop = [self.path]
base_dir = os.path.split(self.path[:-1])[0] if self.path[-1] == '/' \
and self.path != '/' else os.path.split(self.path)[0]
for d in dir_loop:
data = data % urllib.quote_plus(d)
r = ss.post(apiurl, data=data)
j = self.fix_json(r.text.strip())
if j:
if args.type_:
j = [x for x in j if x['isdir'] \
or x['name'][-len(args.type_):] \
== unicode(args.type_)]
total_file = len([i for i in j if not i['isdir']])
if args.from_ - 1:
j = j[args.from_-1:] if args.from_ else j
nn = args.from_
for i in j:
if i['isdir']:
dir_loop.append(i['path'].encode('utf8'))
else:
t = i['path'].encode('utf8')
t = t.replace(base_dir, '')
t = t[1:] if t[0] == '/' else t
t = os.path.join(os.getcwd(), t)
infos = {
'file': t,
'dir_': os.path.split(t)[0],
'dlink': self.get_dlink(i),
'name': i['name'].encode('utf8'),
'apihost': self.apihost,
'nn': nn,
'total_file': total_file
}
nn += 1
self.download(infos)
else:
print s % (1, 91, ' error: get_infos')
sys.exit(0)
@staticmethod
def download(infos):
#### !!!! 注意:360不支持断点续传
## make dirs
if not os.path.exists(infos['dir_']):
os.makedirs(infos['dir_'])
else:
if os.path.exists(infos['file']):
return 0
num = random.randint(0, 7) % 8
col = s % (2, num + 90, infos['file'])
infos['nn'] = infos['nn'] if infos.get('nn') else 1
infos['total_file'] = infos['total_file'] if infos.get('total_file') else 1
print '\n ++ 正在下载: #', s % (1, 97, infos['nn']), '/', s % (1, 97, infos['total_file']), '#', col
cookie = '; '.join(['%s=%s' % (x, y) for x, y in ss.cookies.items()]).encode('utf8')
if args.aria2c:
if args.limit:
cmd = 'aria2c -c -s10 -x10 ' \
'--max-download-limit %s ' \
'-o "%s.tmp" -d "%s" ' \
'--user-agent "%s" ' \
'--header "Cookie:%s" ' \
'--header "Referer:http://%s/" "%s"' \
% (args.limit, infos['name'], infos['dir_'],\
headers['User-Agent'], cookie, infos['apihost'], infos['dlink'])
else:
cmd = 'aria2c -c -s10 -x10 ' \
'-o "%s.tmp" -d "%s" --user-agent "%s" ' \
'--header "Cookie:%s" ' \
'--header "Referer:http://%s/" "%s"' \
% (infos['name'], infos['dir_'], headers['User-Agent'], \
cookie, infos['apihost'], infos['dlink'])
else:
if args.limit:
cmd = 'wget -c --limit-rate %s ' \
'-O "%s.tmp" --user-agent "%s" ' \
'--header "Cookie:%s" ' \
'--header "Referer:http://%s/" "%s"' \
% (args.limit, infos['file'], headers['User-Agent'], \
cookie, infos['apihost'], infos['dlink'])
else:
cmd = 'wget -c -O "%s.tmp" --user-agent "%s" ' \
'--header "Cookie:%s" ' \
'--header "Referer:http://%s/" "%s"' \
% (infos['file'], headers['User-Agent'], \
cookie, infos['apihost'], infos['dlink'])
status = os.system(cmd)
if status != 0: # other http-errors, such as 302.
wget_exit_status_info = wget_es[status]
print('\n\n ---### \x1b[1;91mERROR\x1b[0m ==> '\
'\x1b[1;91m%d (%s)\x1b[0m ###--- \n\n' \
% (status, wget_exit_status_info))
print s % (1, 91, ' ===> '), cmd
sys.exit(1)
else:
os.rename('%s.tmp' % infos['file'], infos['file'])
def exists(self, filepath):
pass
def upload(self, path, dir_):
pass
def addtask(self):
pass
def do(self):
self.get_infos()
def main(argv):
if len(argv) <= 1:
sys.exit()
######################################################
# for argparse
p = argparse.ArgumentParser(description='download from yunpan.360.com')
p.add_argument('xxx', type=str, nargs='*', \
help='命令对象.')
p.add_argument('-a', '--aria2c', action='store_true', \
help='download with aria2c')
p.add_argument('-p', '--play', action='store_true', \
help='play with mpv')
p.add_argument('-f', '--from_', action='store', \
default=1, type=int, \
help='从第几个开始下载,eg: -f 42')
p.add_argument('-t', '--type_', action='store', \
default=None, type=str, \
help='要下载的文件的后缀,eg: -t mp3')
p.add_argument('-l', '--limit', action='store', \
default=None, type=str, help='下载速度限制,eg: -l 100k')
global args
args = p.parse_args(argv[1:])
xxx = args.xxx
if xxx[0] == 'login' or xxx[0] == 'g':
if len(xxx[1:]) < 1:
username = raw_input(s % (1, 97, ' username: '))
password = getpass(s % (1, 97, ' password: '))
elif len(xxx[1:]) == 1:
username = xxx[1]
password = getpass(s % (1, 97, ' password: '))
elif len(xxx[1:]) == 2:
username = xxx[1]
password = xxx[2]
else:
print s % (1, 91, ' login\n login username\n login username password')
x = yunpan360()
x.login(username, password)
is_signin = x.check_login()
if is_signin:
print s % (1, 92, ' ++ login succeeds.')
else:
print s % (1, 91, ' login failes')
elif xxx[0] == 'signout':
g = open(cookie_file, 'w')
g.close()
else:
urls = xxx
x = yunpan360()
x.init()
for url in urls:
x.path = x.get_path(url)
x.do()
if __name__ == '__main__':
argv = sys.argv
main(argv)
| mit |
toanalien/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/config/urls_unittest.py | 124 | 4004 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from .urls import parse_bug_id, parse_attachment_id
class URLsTest(unittest.TestCase):
def test_parse_bug_id(self):
# FIXME: These would be all better as doctests
self.assertEqual(12345, parse_bug_id("http://webkit.org/b/12345"))
self.assertEqual(12345, parse_bug_id("foo\n\nhttp://webkit.org/b/12345\nbar\n\n"))
self.assertEqual(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345"))
self.assertEqual(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345&ctype=xml"))
self.assertEqual(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345&ctype=xml&excludefield=attachmentdata"))
self.assertEqual(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345excludefield=attachmentdata&ctype=xml"))
# Our url parser is super-fragile, but at least we're testing it.
self.assertIsNone(parse_bug_id("http://www.webkit.org/b/12345"))
self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345"))
self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345&excludefield=attachmentdata"))
self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&excludefield=attachmentdata&id=12345"))
self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?excludefield=attachmentdata&ctype=xml&id=12345"))
self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?excludefield=attachmentdata&id=12345&ctype=xml"))
def test_parse_attachment_id(self):
self.assertEqual(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=review"))
self.assertEqual(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=edit"))
self.assertEqual(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=prettypatch"))
self.assertEqual(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=diff"))
# Direct attachment links are hosted from per-bug subdomains:
self.assertEqual(12345, parse_attachment_id("https://bug-23456-attachments.webkit.org/attachment.cgi?id=12345"))
# Make sure secure attachment URLs work too.
self.assertEqual(12345, parse_attachment_id("https://bug-23456-attachments.webkit.org/attachment.cgi?id=12345&t=Bqnsdkl9fs"))
| bsd-3-clause |
davidhawkes11/p3w | p3w_05.0c.4Challenge.py | 1 | 1649 | # Code to instruct the computer to generate two user's dice roll values and display output to the screen.
# Input: number, an integer variable
# Output: a vertical list of randomly generated integers in the range specified.
# The above text is commentary. The actual program starts below:
import random # provides this program's access to the Python 3.5 builtin 'random' module.
print ("Python 3.0 Workbook\nStudent Work Booklet\nStudent Activity p3w_05.0c.4Challenge\n")
print ("A program to instruct the computer to generate 100 dice roll values and display output to the screen.\n" )
print ("The program allows two users to enter a value for the preferred number of sides on the dice.\n" )
print ("This is best solved using a while loop:\n")
user1 = input("Who is the first player? ") # asks for the name of user 1
user2 = input("Who is the second player? ") # asks for the name of user 2
counter1 = 1 # initialises the pretest while loop to 1
counter2 = 2 # initialises the pretest while loop to 1
sides = int(input("Enter the number of sides you would like to have on the dice: "))
while counter1 <= 100: # sets the upper limit of the loop
print(user1, "'s first dice roll ", counter1 ," is ", random.randint(1, sides) ," and ", user2 ,"'s dice roll", counter2 ,"is", random.randint(1, sides))
counter1 = counter1 + 1 # increments the counter
counter2 = counter2 + 1 # increments the counter
print ("\nProgram successfully terminated." )
| mit |
tximikel/kuma | vendor/packages/logilab/astng/exceptions.py | 27 | 1650 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""this module contains exceptions used in the astng library
"""
__doctype__ = "restructuredtext en"
class ASTNGError(Exception):
"""base exception class for all astng related exceptions"""
class ASTNGBuildingException(ASTNGError):
"""exception class when we are unable to build an astng representation"""
class ResolveError(ASTNGError):
"""base class of astng resolution/inference error"""
class NotFoundError(ResolveError):
"""raised when we are unable to resolve a name"""
class InferenceError(ResolveError):
"""raised when we are unable to infer a node"""
class UnresolvableName(InferenceError):
"""raised when we are unable to resolve a name"""
class NoDefault(ASTNGError):
"""raised by function's `default_value` method when an argument has
no default value
"""
| mpl-2.0 |
msdubov/AST-text-analysis | east/asts/utils.py | 2 | 1163 | # -*- coding: utf-8 -*
from east import consts
def index(array, key, start=0):
# NOTE(msdubov): No boundary check for optimization purposes.
i = start
while array[i] != key:
i += 1
return i
def match_strings(str1, str2):
"""
Returns the largest index i such that str1[:i] == str2[:i]
"""
i = 0
min_len = len(str1) if len(str1) < len(str2) else len(str2)
while i < min_len and str1[i] == str2[i]: i += 1
return i
def make_unique_endings(strings_collection):
"""
Make each string in the collection end with a unique character.
Essential for correct builiding of a generalized annotated suffix tree.
Returns the updated strings collection, encoded in Unicode.
max strings_collection ~ 1.100.000
"""
res = []
for i in range(len(strings_collection)):
# NOTE(msdubov): a trick to handle 'narrow' python installation issues.
hex_code = hex(consts.String.UNICODE_SPECIAL_SYMBOLS_START+i)
hex_code = r"\U" + "0" * (8 - len(hex_code) + 2) + hex_code[2:]
res.append(strings_collection[i] + hex_code.decode("unicode-escape"))
return res
| mit |
lbjay/cds-invenio | modules/websubmit/lib/functions/Register_Referee_Decision.py | 4 | 9034 | ## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Register a referee's decision on a document (i.e. it is approved or
rejected) in the submission approvals database (sbmAPPROVALS).
"""
__revision__ = "$Id$"
import cgi
import os.path
from invenio.config import CFG_SITE_SUPPORT_EMAIL
from invenio.websubmit_functions.Shared_Functions import ParamFromFile
from invenio.websubmit_config import InvenioWebSubmitFunctionError, \
InvenioWebSubmitFunctionStop
from invenio.websubmit_dblayer import get_simple_approval_status, \
update_approval_request_status
def Register_Referee_Decision(parameters, curdir, form, user_info=None):
"""
A referee may either "accept" or "reject" a refereed document.
The referee's decision is stored in a file in the submission's working
directory and it is this function's job to read the contents of that
file and update the status of the document's entry in the approvals
table (sbmAPPROVAL) to be either "accepted" or "rejected" depending
upon the referee's decision.
@param decision_file: (string) - the name of the file in which the
referee's decision is to be found.
NOTE: A referee's decision _MUST_ be either "accept" or "reject".
If not, an InvenioWebSubmitFunctionError will be raised.
If a document's "approval status" is not "waiting" at the
time of the referee's decision, the decision will not be
taken into account and the submission will be halted.
(This is because it's not appropriate to approve a document
that has already been approved or rejected, has been
withdrawn, etc.)
@return: empty string.
@Exceptions raised: InvenioWebSubmitFunctionError on unexpected error.
InvenioWebSubmitFunctionStop in the case where the
approval should be stopped for whatever reason.
(E.g. when it has already been approved.)
"""
global rn
doctype = form['doctype']
########
## Get the parameters from the list:
########
## Get the name of the "decision" file and read its value:
########
decision = "" ## variable to hold the referee's decision
try:
decision_file = parameters["decision_file"]
except KeyError:
## No value given for the decision file:
decision_file = None
else:
if decision_file is not None:
decision_file = os.path.basename(decision_file).strip()
if decision_file == "":
decision_file = None
if decision_file is None:
## Unable to obtain the name of the file in which the referee's
## decision is stored. Halt.
err_msg = "Error in Register_Referee_Decision: Function was not " \
"configured with a valid value for decision_file - the " \
"file in which the referee's decision is stored. " \
"The referee's decision has not been processed for " \
"[%s]. Please inform the administrator." \
% rn
raise InvenioWebSubmitFunctionError(err_msg)
## Read in the referee's decision:
decision = ParamFromFile("%s/%s" % (curdir, decision_file)).lower()
##
########
if decision not in ("approve", "reject"):
## Invalid value for the referee's decision.
err_msg = "Error in Register_Referee_Decision: The value for the " \
"referee's decision (%s) was invalid. Please inform the " \
"administrator." % decision
raise InvenioWebSubmitFunctionError(err_msg)
##
## Get the status of the approval request for this document from the DB:
document_status = get_simple_approval_status(doctype, rn)
if document_status is None:
## No information about this document in the approval database.
## Its approval has never been requested.
msg = """
<br />
<div>
<span style="color: red;">Note:</span> No details about an approval request
for the document [%s] have been found in the database.<br />
Before a decision can be made about it, a request for its approval must have
been submitted.<br />
If you feel that there is a problem, please contact <%s>, quoting the
document's report number.
</div>""" % (cgi.escape(rn), cgi.escape(CFG_SITE_SUPPORT_EMAIL))
raise InvenioWebSubmitFunctionStop(msg)
elif document_status in ("approved", "rejected"):
## If a document was already approved or rejected, halt the approval
## process with a message for the referee:
msg = """
<br />
<div>
<span style="color: red;">Note:</span> The document [%s] has
already been %s.<br />
There is nothing more to be done in this case and your decision
has <b>NOT</b> been taken into account.<br />
If you believe this to be an error, please contact <%s>, quoting the<br />
document's report-number [%s] and describing the problem.
</div>""" % (cgi.escape(rn), \
cgi.escape(document_status), \
cgi.escape(CFG_SITE_SUPPORT_EMAIL), \
cgi.escape(rn))
raise InvenioWebSubmitFunctionStop(msg)
elif document_status == "withdrawn":
## Somebody had withdrawn the approval request for this document
## before the referee made this decision. Halt the approval process
## with a message for the referee:
msg = """
<br />
<div>
<span style="color: red;">Note:</span> The request for the approval of the
document [%s] had been withdrawn prior to the submission of your
decision.<br />
Before a decision can be made regarding its status, a new request for its
approval must be submitted by the author.<br />
Your decision has therefore <b>NOT</b> been taken into account.<br />
If you believe this to be an error, please contact <%s>, quoting the
document's report-number [%s] and describing the problem.
</div>
""" % (cgi.escape(rn), \
cgi.escape(CFG_SITE_SUPPORT_EMAIL), \
cgi.escape(rn))
raise InvenioWebSubmitFunctionStop(msg)
elif document_status == "waiting":
## The document is awaiting approval. Register the referee's decision:
if decision == "approve":
## Register the approval:
update_approval_request_status(doctype, \
rn, \
note="",
status="approved")
else:
## Register the rejection:
update_approval_request_status(doctype, \
rn, \
note="",
status="rejected")
## Now retrieve the status of the document once more and check that
## it is either approved or rejected. If not, the decision couldn't
## be registered and an error should be raised.
status_after_update = get_simple_approval_status(doctype, rn)
if status_after_update not in ("approved", "rejected"):
msg = "Error in Register_Referee_Decision function: It was " \
"not possible to update the approvals database when " \
"trying to register the referee's descision of [%s] " \
"for the document [%s]. Please report this this " \
"problem to [%s], quoting the document's " \
"report-number [%s]." \
% (decision, rn, CFG_SITE_SUPPORT_EMAIL, rn)
raise InvenioWebSubmitFunctionError(msg)
else:
## The document had an unrecognised "status". Halt with an error.
msg = "Error in Register_Referee_Decision function: The " \
"document [%s] has an unknown approval status " \
"[%s]. Unable to process the referee's decision. Please " \
"report this problem to [%s], quoting the document's " \
"report-number [%s] and describing the problem." \
% (rn, document_status, CFG_SITE_SUPPORT_EMAIL, rn)
raise InvenioWebSubmitFunctionError(msg)
## Finished.
return ""
| gpl-2.0 |
IsaacShelton/Adept | include/TOKEN/generate_c.py | 1 | 24522 | #!/usr/bin/python3
# Rather than explicitly write out the values for each token by hand,
# This file takes care of the dirty work of manually maintaining token
# IDs so that we can add/remove tokens more easily
# This file takes care of automatically sorting and positioning
# tokens in the correct order
# In order to add or remove tokens from the program,
# all you have to do is edit the 'tokens' array
# later in this file
# If you want multiple token names to share the
# same value, you can add token aliases by editing
# the 'token_aliases' array later in this file
from enum import IntEnum, auto, unique
import time
import os
@unique
class TokenType(IntEnum):
NONE = auto()
WORD = auto()
KEYWORD = auto()
OPERATOR = auto()
LITERAL = auto()
POLYMORPH = auto()
PREPROCESSOR = auto()
@unique
class ExtraDataFormat(IntEnum):
ID_ONLY = auto()
C_STRING = auto()
LEN_STRING = auto()
MEMORY = auto()
class Token:
def __init__(self, short_name, token_type, extra_data_format, long_name):
self.short_name = short_name
self.token_type = token_type
self.extra_data_format = extra_data_format
self.long_name = short_name.replace("_", " ") if long_name == None else long_name
def definition(self, value):
return token_definition_string(self.short_name, value)
def token_definition_string(short_name, value):
global tokens_max_short_name
token_identifier = "TOKEN_{0}".format(short_name.upper())
token_identifier = token_identifier.ljust(tokens_max_short_name + 6)
return "#define {0} {1}\n".format(token_identifier, "0x%0.8X" % value)
tokens = [
Token("none" , TokenType.NONE , ExtraDataFormat.ID_ONLY , None ),
Token("word" , TokenType.WORD , ExtraDataFormat.C_STRING , None ),
Token("string" , TokenType.LITERAL , ExtraDataFormat.LEN_STRING , None ),
Token("cstring" , TokenType.LITERAL , ExtraDataFormat.C_STRING , None ),
Token("add" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("subtract" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("multiply" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("divide" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("equals" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("notequals" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("lessthan" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("greaterthan" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("lessthaneq" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("greaterthaneq" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("not" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("open" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("close" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("begin" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("end" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("newline" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("byte" , TokenType.LITERAL , ExtraDataFormat.MEMORY , None ),
Token("ubyte" , TokenType.LITERAL , ExtraDataFormat.MEMORY , None ),
Token("short" , TokenType.LITERAL , ExtraDataFormat.MEMORY , None ),
Token("ushort" , TokenType.LITERAL , ExtraDataFormat.MEMORY , None ),
Token("int" , TokenType.LITERAL , ExtraDataFormat.MEMORY , None ),
Token("uint" , TokenType.LITERAL , ExtraDataFormat.MEMORY , None ),
Token("long" , TokenType.LITERAL , ExtraDataFormat.MEMORY , None ),
Token("ulong" , TokenType.LITERAL , ExtraDataFormat.MEMORY , None ),
Token("usize" , TokenType.LITERAL , ExtraDataFormat.MEMORY , None ),
Token("float" , TokenType.LITERAL , ExtraDataFormat.MEMORY , None ),
Token("double" , TokenType.LITERAL , ExtraDataFormat.MEMORY , None ),
Token("member" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("address" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("next" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("bracket_open" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("bracket_close" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("modulus" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("generic_int" , TokenType.LITERAL , ExtraDataFormat.ID_ONLY , None ),
Token("generic_float" , TokenType.LITERAL , ExtraDataFormat.ID_ONLY , None ),
Token("add_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("subtract_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("multiply_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("divide_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("modulus_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("bit_and_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise and assign" ),
Token("bit_or_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise or assign" ),
Token("bit_xor_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise xor assign" ),
Token("bit_ls_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise left shift assign" ),
Token("bit_rs_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise right shift assign" ),
Token("bit_lgc_ls_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise logical left shift assign" ),
Token("bit_lgc_rs_assign" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise logical right shift assign"),
Token("ellipsis" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("uberand" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "uber and" ),
Token("uberor" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "uber or" ),
Token("terminate_join" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "terminate join" ),
Token("colon" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("bit_or" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise or" ),
Token("bit_xor" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise xor" ),
Token("bit_lshift" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise left shift" ),
Token("bit_rshift" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise right shift" ),
Token("bit_complement" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("bit_lgc_lshift" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise logical left shift" ),
Token("bit_lgc_rshift" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "bitwise logical right shift" ),
Token("associate" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("meta" , TokenType.PREPROCESSOR , ExtraDataFormat.C_STRING , None ),
Token("polymorph" , TokenType.POLYMORPH , ExtraDataFormat.C_STRING , None ),
Token("maybe" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("increment" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("decrement" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("toggle" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("strong_arrow" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , "strong arrow" ),
Token("range" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("gives" , TokenType.OPERATOR , ExtraDataFormat.ID_ONLY , None ),
Token("polycount" , TokenType.LITERAL , ExtraDataFormat.C_STRING , None ),
Token("POD" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "POD keyword" ),
Token("alias" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "alias keyword" ),
Token("and" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "and keyword" ),
Token("as" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "as keyword" ),
Token("at" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "at keyword" ),
Token("break" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "break keyword" ),
Token("case" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "case keyword" ),
Token("cast" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "cast keyword" ),
Token("const" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "const keyword" ),
Token("continue" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "continue keyword" ),
Token("def" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "def keyword" ),
Token("default" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "default keyword" ),
Token("defer" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "defer keyword" ),
Token("define" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "define keyword" ),
Token("delete" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "delete keyword" ),
Token("each" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "each keyword" ),
Token("else" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "else keyword" ),
Token("enum" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "enum keyword" ),
Token("exhaustive" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "exhaustive keyword" ),
Token("external" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "external keyword" ),
Token("fallthrough" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "fallthrough keyword" ),
Token("false" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "false keyword" ),
Token("for" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "for keyword" ),
Token("foreign" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "foreign keyword" ),
Token("func" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "func keyword" ),
Token("funcptr" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "funcptr keyword" ),
Token("global" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "global keyword" ),
Token("if" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "if keyword" ),
Token("implicit" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "implicit keyword" ),
Token("import" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "import keyword" ),
Token("in" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "in keyword" ),
Token("inout" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "inout keyword" ),
Token("llvm_asm" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "llvm_asm keyword" ),
Token("namespace" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "namespace keyword" ),
Token("new" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "new keyword" ),
Token("null" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "null keyword" ),
Token("or" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "or keyword" ),
Token("out" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "out keyword" ),
Token("packed" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "packed keyword" ),
Token("pragma" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "pragma keyword" ),
Token("private" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "private keyword" ),
Token("public" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "public keyword" ),
Token("repeat" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "repeat keyword" ),
Token("return" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "return keyword" ),
Token("sizeof" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "sizeof keyword" ),
Token("static" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "static keyword" ),
Token("stdcall" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "stdcall keyword" ),
Token("struct" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "struct keyword" ),
Token("switch" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "switch keyword" ),
Token("thread_local" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "thread_local keyword" ),
Token("true" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "true keyword" ),
Token("typeinfo" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "typeinfo keyword" ),
Token("undef" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "undef keyword" ),
Token("union" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "union keyword" ),
Token("unless" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "unless keyword" ),
Token("until" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "until keyword" ),
Token("using" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "using keyword" ),
Token("va_arg" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "va_arg keyword" ),
Token("va_copy" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "va_copy keyword" ),
Token("va_end" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "va_end keyword" ),
Token("va_start" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "va_start keyword" ),
Token("verbatim" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "verbatim keyword" ),
Token("while" , TokenType.KEYWORD , ExtraDataFormat.ID_ONLY , "while keyword" )
]
# Calculate longest 'short name' of tokens
tokens_max_short_name = 0
for token in tokens:
if tokens_max_short_name < len(token.short_name):
tokens_max_short_name = len(token.short_name)
# Push keywords to bottom of list, and make sure they are sorted alphabetically
def is_not_keyword_otherwise_alphabetical(token):
return "" if token.token_type is not TokenType.KEYWORD else token.short_name
tokens.sort(key=is_not_keyword_otherwise_alphabetical)
# Calculate first keyword value
beginning_of_keywords = 0
for i in range(0, len(tokens)):
if tokens[i].token_type == TokenType.KEYWORD:
beginning_of_keywords = i
break
class TokenAlias:
def __init__(self, short_name, long_name, points_to):
self.short_name = short_name
self.long_name = short_name.replace("_", " ") if long_name == None else long_name
self.points_to = points_to
def definition(self):
for i in range(0, len(tokens)):
if tokens[i].short_name == self.points_to:
return token_definition_string(self.short_name, i)
raise RuntimeError("TokenAlias.definition() failed to resolve destination '{0}'".format(self.short_name))
token_aliases = [
TokenAlias("bit_and", "bitwise and", "address")
]
old_pkg_tokendata = """
// DEPRECATED: Pre-lexed files will probably be removed in the future.
// Used in place of common sequences in packages.
// Not recognized by parser.
#define TOKEN_PKG_MIN TOKEN_PKG_WBOOL
#define TOKEN_PKG_WBOOL 0x0000000D0
#define TOKEN_PKG_WBYTE 0x0000000D1
#define TOKEN_PKG_WDOUBLE 0x0000000D2
#define TOKEN_PKG_WFLOAT 0x0000000D3
#define TOKEN_PKG_WINT 0x0000000D4
#define TOKEN_PKG_WLONG 0x0000000D5
#define TOKEN_PKG_WSHORT 0x0000000D6
#define TOKEN_PKG_WUBYTE 0x0000000D7
#define TOKEN_PKG_WUINT 0x0000000D8
#define TOKEN_PKG_WULONG 0x0000000D9
#define TOKEN_PKG_WUSHORT 0x0000000DA
#define TOKEN_PKG_WUSIZE 0x0000000DB
#define TOKEN_PKG_MAX TOKEN_PKG_WUSIZE
"""
extra_data_format_encode_offset = ord('a') - 1
def generate_header(filename):
head = "\n// This file was auto-generated by 'include/TOKEN/generate_c.py'\n\n#ifndef _ISAAC_TOKEN_DATA_H\n#define _ISAAC_TOKEN_DATA_H\n\n"
iteration_version = "#define TOKEN_ITERATION_VERSION 0x%0.8X\n\n" % int(time.time())
tail = old_pkg_tokendata + "\n#endif // _ISAAC_TOKEN_DATA_H\n"
f = open(filename, "w")
f.write(head)
f.write(iteration_version)
for i in range(0, len(tokens)):
token = tokens[i]
f.write(token.definition(i))
for token_alias in token_aliases:
f.write(token_alias.definition())
f.write("\n");
f.write("#define MAX_LEX_TOKEN 0x%0.8X\n" % (len(tokens) - 1))
f.write("#define BEGINNING_OF_KEYWORD_TOKENS 0x%0.8X\n" % beginning_of_keywords)
f.write("\n");
f.write("#define TOKEN_EXTRA_DATA_FORMAT_ID_ONLY 0x%0.8X\n" % int(extra_data_format_encode_offset + ExtraDataFormat.ID_ONLY))
f.write("#define TOKEN_EXTRA_DATA_FORMAT_C_STRING 0x%0.8X\n" % int(extra_data_format_encode_offset + ExtraDataFormat.C_STRING))
f.write("#define TOKEN_EXTRA_DATA_FORMAT_LEN_STRING 0x%0.8X\n" % int(extra_data_format_encode_offset + ExtraDataFormat.LEN_STRING))
f.write("#define TOKEN_EXTRA_DATA_FORMAT_MEMORY 0x%0.8X\n" % int(extra_data_format_encode_offset + ExtraDataFormat.MEMORY))
f.write("\n");
f.write("extern const char *global_token_name_table[];\n");
f.write("extern const char global_token_extra_format_table[];\n");
f.write("\n");
f.write("extern const char *global_token_keywords_list[];\n");
f.write("extern unsigned long long global_token_keywords_list_length;\n");
f.write(tail)
f.close()
print("[done] Generated token_data.h")
def generate_source(filename):
head = "\n// This file was auto-generated by 'include/TOKEN/generate_c.py'\n\n"
longest_long_name = 0
for token in tokens:
if longest_long_name < len(token.long_name):
longest_long_name = len(token.long_name)
f = open(filename, "w");
f.write(head)
f.write("const char *global_token_name_table[] = {\n");
for i in range(0, len(tokens)):
token = tokens[i]
padding = " " * (longest_long_name - len(token.long_name) + 1)
comment = "// 0x%0.8X" % i
comment_and_padding = padding + comment
f.write(" \"" + token.long_name + "\"" + ("," if i != len(tokens) else "") + comment_and_padding + "\n");
f.write("};\n");
f.write("\n");
f.write("const char global_token_extra_format_table[] = \"");
for i in range(0, len(tokens)):
token = tokens[i]
f.write(chr(extra_data_format_encode_offset + int(token.extra_data_format)));
f.write("\";\n");
f.write("\n");
num_keywords = 0
f.write("const char *global_token_keywords_list[] = {\n");
for i in range(0, len(tokens)):
token = tokens[i]
if token.token_type != TokenType.KEYWORD:
continue
f.write(" \"" + token.short_name + "\"" + ("," if i != len(tokens) else "") + "\n");
num_keywords += 1
f.write("};\n");
f.write("\n");
f.write("unsigned long long global_token_keywords_list_length = {0};\n".format(num_keywords));
f.close()
print("[done] Generated token_data.c")
def main():
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
generate_header("token_data.h")
generate_source("../../src/TOKEN/token_data.c")
os.chdir(cwd)
main()
| gpl-3.0 |
fragarco/pybomber | src/gamelib/sprites.py | 1 | 3207 | import pygame
from .viewer import Viewer
from .loader import Loader
class Base(pygame.sprite.Sprite):
images = {}
def __init__(self):
pygame.sprite.Sprite.__init__(self)
def init(self, type):
if not Base.images:
Base.images = {
"WHITE" : Loader.load_pixmap("assets/sprites/base0.png"),
"YELLOW" : Loader.load_pixmap("assets/sprites/base1.png"),
"ORANGE" : Loader.load_pixmap("assets/sprites/base2.png")
}
self.image = Base.images[type]
self.rect = self.image.get_rect()
class Floor(pygame.sprite.Sprite):
images = {}
def __init__(self):
pygame.sprite.Sprite.__init__(self)
def init(self, type):
if not Floor.images:
Floor.images = {
"WHITE" : Loader.load_pixmap("assets/sprites/floor0.png"),
"YELLOW" : Loader.load_pixmap("assets/sprites/floor1.png"),
"ORANGE" : Loader.load_pixmap("assets/sprites/floor2.png")
}
self.image = Floor.images[type]
self.rect = self.image.get_rect()
class Plane(pygame.sprite.Sprite):
images = []
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.velocity = -0.25
def init(self):
if not Plane.images:
Plane.images = [
Loader.load_png("assets/animations/plane/plane1.png"),
Loader.load_png("assets/animations/plane/plane2.png"),
Loader.load_png("assets/animations/plane/plane3.png")
]
self.image_ind = 0
self.rect = Plane.images[0].get_rect()
self.image = Plane.images[0]
self.playing_time = 0
def init_pos(self):
self.rect.left = Viewer.Width - self.rect.width
self.rect.top = 40
def update(self, time_passed):
xoff = round(self.velocity * time_passed)
self.rect.move_ip(xoff,0)
if self.rect.left < -self.rect.width:
self.rect.left = Viewer.Width
self.rect.top += self.rect.height
self.image_ind = int((self.playing_time / 150) % 3)
self.image = Plane.images[self.image_ind]
self.playing_time += time_passed
class Explosion(pygame.sprite.Sprite):
images = []
def __init__(self):
pygame.sprite.Sprite.__init__(self)
def init(self):
if not Explosion.images:
Explosion.images = [
Loader.load_png("assets/animations/explosion/exp1.png"),
Loader.load_png("assets/animations/explosion/exp2.png"),
Loader.load_png("assets/animations/explosion/exp3.png"),
Loader.load_png("assets/animations/explosion/exp4.png"),
Loader.load_png("assets/animations/explosion/exp5.png")
]
self.image_ind = 0
self.rect = Explosion.images[0].get_rect()
self.image = Explosion.images[0]
self.playing_time = 0
def update(self, time_passed):
self.image_ind = int(self.playing_time / 150)
self.playing_time += time_passed
try:
self.image = Explosion.images[self.image_ind]
except IndexError:
self.kill()
class Bomb(pygame.sprite.Sprite):
image = None
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.velocity = 0.25
def init(self, x, y):
if not Bomb.image:
Bomb.image = Loader.load_png("assets/sprites/bomb.png")
self.image = Bomb.image
self.rect = self.image.get_rect()
self.rect.left = x
self.rect.top = y
def update(self, time_passed):
yoff = round(time_passed * self.velocity)
self.rect.top += yoff
if self.rect.top > Viewer.Height - 10:
self.kill()
| mit |
aneeshusa/android-quill | jni/libhpdf-2.3.0RC2/if/python/demo/text_demo.py | 32 | 10572 | ###
## * << Haru Free PDF Library 2.0.0 >> -- text_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
from grid_sheet import *
from math import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def show_stripe_pattern (page, x, y):
iy = 0
while (iy < 50):
HPDF_Page_SetRGBStroke (page, 0.0, 0.0, 0.5)
HPDF_Page_SetLineWidth (page, 1)
HPDF_Page_MoveTo (page, x, y + iy)
HPDF_Page_LineTo (page, x + HPDF_Page_TextWidth (page, "ABCabc123"),
y + iy)
HPDF_Page_Stroke (page)
iy += 3
HPDF_Page_SetLineWidth (page, 2.5)
def show_description (page, x, y, text):
fsize = HPDF_Page_GetCurrentFontSize (page)
font = HPDF_Page_GetCurrentFont (page)
c = HPDF_Page_GetRGBFill (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, x, y - 12, text)
HPDF_Page_EndText (page)
HPDF_Page_SetFontAndSize (page, font, fsize)
HPDF_Page_SetRGBFill (page, c.r, c.g, c.b)
def main ():
global pdf
page_title = "Text Demo"
samp_text = "abcdefgABCDEFG123!#$%&+-@?"
samp_text2 = "The quick brown fox jumps over the lazy dog."
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# set compression mode
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
# draw grid to the page
print_grid (pdf, page)
# print the lines of the page.
HPDF_Page_SetLineWidth (page, 1)
HPDF_Page_Rectangle (page, 50, 50, HPDF_Page_GetWidth(page) - 100,
HPDF_Page_GetHeight (page) - 110)
HPDF_Page_Stroke (page)
# print the title of the page (with positioning center).
HPDF_Page_SetFontAndSize (page, font, 24)
tw = HPDF_Page_TextWidth (page, page_title)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, (HPDF_Page_GetWidth(page) - tw) / 2,
HPDF_Page_GetHeight (page) - 50, page_title)
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 60, HPDF_Page_GetHeight(page) - 60)
# font size
fsize = 8
while (fsize < 60):
# set style and size of font.
HPDF_Page_SetFontAndSize(page, font, fsize)
# set the position of the text.
HPDF_Page_MoveTextPos (page, 0, -5 - fsize)
# measure the number of characters which included in the page.
buf= samp_text
length = HPDF_Page_MeasureText (page, samp_text,
HPDF_Page_GetWidth(page) - 120, HPDF_FALSE, NULL)
# truncate the text.
buf='%*s\0' %(int(length), buf)
HPDF_Page_ShowText (page, buf)
# print the description.
HPDF_Page_MoveTextPos (page, 0, -10)
HPDF_Page_SetFontAndSize(page, font, 8)
buf="Fontsize=%.0f" %fsize
HPDF_Page_ShowText (page, buf)
fsize *= 1.5
# font color
HPDF_Page_SetFontAndSize(page, font, 8)
HPDF_Page_MoveTextPos (page, 0, -30)
HPDF_Page_ShowText (page, "Font color")
HPDF_Page_SetFontAndSize (page, font, 18)
HPDF_Page_MoveTextPos (page, 0, -20)
length = len (samp_text)
for i in range(length):
buf=[None ,None]
r = i / float(length)
g = 1 - (i / float(length))
buf[0] = samp_text[i]
buf[1] = '\0'
HPDF_Page_SetRGBFill (page, r, g, 0.0)
HPDF_Page_ShowText (page, buf)
HPDF_Page_MoveTextPos (page, 0, -25)
for i in range(length):
buf=[None ,None]
r = i / float(length)
b = 1 - (i / float(length))
buf[0] = samp_text[i]
buf[1] = '\0'
HPDF_Page_SetRGBFill (page, r, 0.0, b)
HPDF_Page_ShowText (page, buf)
HPDF_Page_MoveTextPos (page, 0, -25)
for i in range(length):
buf=[None ,None]
b = i / float(length)
g = 1 - (i / float(length))
buf[0] = samp_text[i]
buf[1] = '\0'
HPDF_Page_SetRGBFill (page, 0.0, g, b)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
ypos = 450
#
# Font rendering mode
#
HPDF_Page_SetFontAndSize(page, font, 32)
HPDF_Page_SetRGBFill (page, 0.5, 0.5, 0.0)
HPDF_Page_SetLineWidth (page, 1.5)
# PDF_FILL
show_description (page, 60, ypos,
"RenderingMode=PDF_FILL")
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos, "ABCabc123")
HPDF_Page_EndText (page)
# PDF_STROKE
show_description (page, 60, ypos - 50,
"RenderingMode=PDF_STROKE")
HPDF_Page_SetTextRenderingMode (page, HPDF_STROKE)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 50, "ABCabc123")
HPDF_Page_EndText (page)
# PDF_FILL_THEN_STROKE
show_description (page, 60, ypos - 100,
"RenderingMode=PDF_FILL_THEN_STROKE")
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL_THEN_STROKE)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 100, "ABCabc123")
HPDF_Page_EndText (page)
# PDF_FILL_CLIPPING
show_description (page, 60, ypos - 150,
"RenderingMode=PDF_FILL_CLIPPING")
HPDF_Page_GSave (page)
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL_CLIPPING)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 150, "ABCabc123")
HPDF_Page_EndText (page)
show_stripe_pattern (page, 60, ypos - 150)
HPDF_Page_GRestore (page)
# PDF_STROKE_CLIPPING
show_description (page, 60, ypos - 200,
"RenderingMode=PDF_STROKE_CLIPPING")
HPDF_Page_GSave (page)
HPDF_Page_SetTextRenderingMode (page, HPDF_STROKE_CLIPPING)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 200, "ABCabc123")
HPDF_Page_EndText (page)
show_stripe_pattern (page, 60, ypos - 200)
HPDF_Page_GRestore (page)
# PDF_FILL_STROKE_CLIPPING
show_description (page, 60, ypos - 250,
"RenderingMode=PDF_FILL_STROKE_CLIPPING")
HPDF_Page_GSave (page)
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL_STROKE_CLIPPING)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 250, "ABCabc123")
HPDF_Page_EndText (page)
show_stripe_pattern (page, 60, ypos - 250)
HPDF_Page_GRestore (page)
# Reset text attributes
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL)
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_SetFontAndSize(page, font, 30)
#
# Rotating text
#
angle1 = 30; # A rotation of 30 degrees.
rad1 = angle1 / 180 * 3.141592; # Calcurate the radian value.
show_description (page, 320, ypos - 60, "Rotating text")
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, cos(rad1), sin(rad1), -sin(rad1), cos(rad1),
330, ypos - 60)
HPDF_Page_ShowText (page, "ABCabc123")
HPDF_Page_EndText (page)
#
# Skewing text.
#
show_description (page, 320, ypos - 120, "Skewing text")
HPDF_Page_BeginText (page)
angle1 = 10
angle2 = 20
rad1 = angle1 / 180 * 3.141592
rad2 = angle2 / 180 * 3.141592
HPDF_Page_SetTextMatrix (page, 1, tan(rad1), tan(rad2), 1, 320, ypos - 120)
HPDF_Page_ShowText (page, "ABCabc123")
HPDF_Page_EndText (page)
#
# scaling text (X direction)
#
show_description (page, 320, ypos - 175, "Scaling text (X direction)")
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, 1.5, 0, 0, 1, 320, ypos - 175)
HPDF_Page_ShowText (page, "ABCabc12")
HPDF_Page_EndText (page)
#
# scaling text (Y direction)
#
show_description (page, 320, ypos - 250, "Scaling text (Y direction)")
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, 1, 0, 0, 2, 320, ypos - 250)
HPDF_Page_ShowText (page, "ABCabc123")
HPDF_Page_EndText (page)
#
# char spacing, word spacing
#
show_description (page, 60, 140, "char-spacing 0")
show_description (page, 60, 100, "char-spacing 1.5")
show_description (page, 60, 60, "char-spacing 1.5, word-spacing 2.5")
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_SetRGBFill (page, 0.1, 0.3, 0.1)
## char-spacing 0
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, 140, samp_text2)
HPDF_Page_EndText (page)
# char-spacing 1.5
HPDF_Page_SetCharSpace (page, 1.5)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, 100, samp_text2)
HPDF_Page_EndText (page)
# char-spacing 1.5, word-spacing 3.5
HPDF_Page_SetWordSpace (page, 2.5)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, 60, samp_text2)
HPDF_Page_EndText (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | gpl-3.0 |
erwindl0/python-rpc | org.eclipse.triquetrum.python.service/scripts/scisoftpy/python/pybeans.py | 1 | 7553 | ###
# Copyright 2011 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import numpy #@UnresolvedImport @UnusedImport
import pyscisoft
exception = Exception
class guibean(dict):
def __init__(self):
pass
class _parameters(object):
_params = []
_str_to_params = dict()
def _register(self, param):
self._params.append(param)
self._str_to_params[str(param)] = param
class _parametershelper(object):
def __init__(self, outer, rep, name):
self._rep = rep
self._name = name
outer._register(self)
def __str__(self):
return self._name
def __repr__(self):
return self._rep
def __init__(self):
self.plotmode = self._parametershelper(self, "plotmode", "PlotMode")
self.title = self._parametershelper(self, "title", "Title")
self.roi = self._parametershelper(self, "roi", "ROI")
self.roilist = self._parametershelper(self, "roilist", "ROIList")
self.roiclearall = self._parametershelper(self, "roiclearall", "ROIClearAll")
self.plotid = self._parametershelper(self, "plotid", "PlotID")
self.plotop = self._parametershelper(self, "plotop", "PlotOp")
self.fileop = self._parametershelper(self, "fileop", "FileOp")
self.filename = self._parametershelper(self, "filename", "Filename")
self.fileselect = self._parametershelper(self, "fileselect", "FileList")
self.dispview = self._parametershelper(self, "dispview", "DisplayOnView")
self.imagegridxpos = self._parametershelper(self, "imagegridxpos", "IGridX")
self.imagegridypos = self._parametershelper(self, "imagegridypos", "IGridY")
self.imagegridsize = self._parametershelper(self, "imagegridsize", "IGridSize")
self.metadatanodepath = self._parametershelper(self, "metadatanodepath", "NodePath")
self.treenodepath = self._parametershelper(self, "treenodepath", "TreeNodePath")
self.gridpreferences = self._parametershelper(self, "GRIDPREFERENCES", "GridPrefs")
self.imagegridstore = self._parametershelper(self, "imagegridstore", "ImageGridStore")
self.volumeheadersize = self._parametershelper(self, "volumeheadersize", "RawVolumeHeaderSize")
self.volumevoxeltype = self._parametershelper(self, "volumevoxeltype", "RawVolumeVoxelType")
self.volumexdim = self._parametershelper(self, "volumexdim", "RawVolumeVoxelXDim")
self.volumeydim = self._parametershelper(self, "volumeydim", "RawVolumeVoxelYDim")
self.volumezdim = self._parametershelper(self, "volumezdim", "RawVolumeVoxelZDim")
self.imagegridliveview = self._parametershelper(self, "imagegridliveview", "ImageGridLiveView")
self.fittedpeaks = self._parametershelper(self, "fittedpeaks", "FittedPeaks")
self.masking = self._parametershelper(self, "masking", "Masking")
self.calibrationpeaks = self._parametershelper(self, "calibrationpeaks", "CalibrationPeaks")
self.calibrationfunctionncd = self._parametershelper(self, "calibrationfunctionncd", "CalibrationFunction")
self.onedfile = self._parametershelper(self, "onedfile", "OneDFile")
self.axisop = self._parametershelper(self, "axisop", "AxisOp")
def get(self, parametername):
'''Return the GUIParameter with the given name, or return None for no matching'''
return self._str_to_params.get(parametername)
parameters = _parameters()
class _plotmode(object):
_modes = []
_str_to_modes = dict()
def _register(self, mode):
self._modes.append(mode)
self._str_to_modes[str(mode)] = mode
class _plotmodehelper(object):
def __init__(self, outer, rep, name):
self._rep = rep
self._name = name
outer._register(self)
def __str__(self):
return self._name
def __repr__(self):
return self._rep
def __init__(self):
self.oned = self._plotmodehelper(self, "oned", "ONED")
self.oned_threed = self._plotmodehelper(self, "oned_threed", "ONED_THREED")
self.twod = self._plotmodehelper(self, "twod", "TWOD")
self.surf2d = self._plotmodehelper(self, "surf2d", "SURF2D")
self.scatter2d = self._plotmodehelper(self, "scatter2d", "SCATTER2D")
self.scatter3d = self._plotmodehelper(self, "scatter3d", "SCATTER3D")
self.multi2d = self._plotmodehelper(self, "multi2d", "MULTI2D")
self.imgexpl = self._plotmodehelper(self, "imgexpl", "IMGEXPL")
self.volume = self._plotmodehelper(self, "volume", "VOLUME")
self.empty = self._plotmodehelper(self, "empty", "EMPTY")
def get(self, modename):
'''Return the GuiPlotMode with the given name, or return None for no matching'''
return self._str_to_modes.get(modename)
plotmode = _plotmode()
class axismapbean(object):
_AXIS_ID = 'axisID'
_AXIS_NAMES = 'axisNames'
DIRECT = 0
FULL = 1
XAXIS = "X-Axis"
YAXIS = "Y-Axis"
ZAXIS = "Z-Axis"
XAXIS2 = "2nd X-Axis"
def __init__(self, axisID=[], axisNames=None):
self.axisID = axisID
self.axisNames = axisNames
def __eq__(self, other):
return (isinstance(other, axismapbean)
and self.axisID == other.axisID and self.axisNames == other.axisNames)
def __ne__(self, other):
return not self.__eq__(other)
# mutable, not hashable
__hash__ = None
def __repr__(self):
return "axismapbean(%s)" % self.__dict__.__repr__()
class datasetwithaxisinformation(object):
_DATA = "data"
_AXIS_MAP = "axisMap"
def __init__(self, data=None, axisMap=None):
'''
data should be an numpy.ndarray
axisMap should be an axismapbean
'''
self.data = data
self.axisMap = axisMap
def __eq__(self, other):
return (isinstance(other, datasetwithaxisinformation)
and self.axisMap == other.axisMap and pyscisoft.equaldataset(self.data, other.data))
def __ne__(self, other):
return not self.__eq__(other)
# mutable, not hashable
__hash__ = None
def __repr__(self):
return "datasetWithAxisInformation(%s)" % self.__dict__.__repr__()
class databean(object):
_DATA = "data"
_AXIS_DATA = "axisData"
def __init__(self, data=None, axisData=None):
self.data = data or []
self.axisData = axisData or dict()
def __eq__(self, other):
if not isinstance(other, databean) or self.data != other.data:
return False
for k, v in self.axisData.iteritems():
if not pyscisoft.equaldataset(v, other.axisData.get(k)):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self.__dict__.__repr__()
| epl-1.0 |
chienlieu2017/it_management | odoo/odoo/addons/base/ir/ir_cron.py | 5 | 13870 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import threading
import time
import psycopg2
import pytz
from datetime import datetime
from dateutil.relativedelta import relativedelta
import odoo
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
BASE_VERSION = odoo.modules.load_information_from_description_file('base')['version']
def str2tuple(s):
return safe_eval('tuple(%s)' % (s or ''))
_intervalTypes = {
'work_days': lambda interval: relativedelta(days=interval),
'days': lambda interval: relativedelta(days=interval),
'hours': lambda interval: relativedelta(hours=interval),
'weeks': lambda interval: relativedelta(days=7*interval),
'months': lambda interval: relativedelta(months=interval),
'minutes': lambda interval: relativedelta(minutes=interval),
}
class ir_cron(models.Model):
""" Model describing cron jobs (also called actions or tasks).
"""
# TODO: perhaps in the future we could consider a flag on ir.cron jobs
# that would cause database wake-up even if the database has not been
# loaded yet or was already unloaded (e.g. 'force_db_wakeup' or something)
# See also odoo.cron
_name = "ir.cron"
_order = 'name'
name = fields.Char(required=True)
user_id = fields.Many2one('res.users', string='User', default=lambda self: self.env.user, required=True)
active = fields.Boolean(default=True)
interval_number = fields.Integer(default=1, help="Repeat every x.")
interval_type = fields.Selection([('minutes', 'Minutes'),
('hours', 'Hours'),
('work_days', 'Work Days'),
('days', 'Days'),
('weeks', 'Weeks'),
('months', 'Months')], string='Interval Unit', default='months')
numbercall = fields.Integer(string='Number of Calls', default=1, help='How many times the method is called,\na negative number indicates no limit.')
doall = fields.Boolean(string='Repeat Missed', help="Specify if missed occurrences should be executed when the server restarts.")
nextcall = fields.Datetime(string='Next Execution Date', required=True, default=fields.Datetime.now, help="Next planned execution date for this job.")
model = fields.Char(string='Object', help="Model name on which the method to be called is located, e.g. 'res.partner'.")
function = fields.Char(string='Method', help="Name of the method to be called when this job is processed.")
args = fields.Text(string='Arguments', help="Arguments to be passed to the method, e.g. (uid,).")
priority = fields.Integer(default=5, help='The priority of the job, as an integer: 0 means higher priority, 10 means lower priority.')
@api.constrains('args')
def _check_args(self):
try:
for this in self:
str2tuple(this.args)
except Exception:
raise ValidationError(_('Invalid arguments'))
@api.multi
def method_direct_trigger(self):
for cron in self:
self.sudo(user=cron.user_id.id)._callback(cron.model, cron.function, cron.args, cron.id)
return True
@api.model
def _handle_callback_exception(self, model_name, method_name, args, job_id, job_exception):
""" Method called when an exception is raised by a job.
Simply logs the exception and rollback the transaction.
:param model_name: model name on which the job method is located.
:param method_name: name of the method to call when this job is processed.
:param args: arguments of the method (without the usual self, cr, uid).
:param job_id: job id.
:param job_exception: exception raised by the job.
"""
self._cr.rollback()
_logger.exception("Call of self.env[%r].%s(*%r) failed in Job %s",
model_name, method_name, args, job_id)
@api.model
def _callback(self, model_name, method_name, args, job_id):
""" Run the method associated to a given job
It takes care of logging and exception handling.
:param model_name: model name on which the job method is located.
:param method_name: name of the method to call when this job is processed.
:param args: arguments of the method (without the usual self, cr, uid).
:param job_id: job id.
"""
try:
args = str2tuple(args)
if self.pool != self.pool.check_signaling():
# the registry has changed, reload self in the new registry
self.env.reset()
self = self.env()[self._name]
if model_name in self.env:
model = self.env[model_name]
if hasattr(model, method_name):
log_depth = (None if _logger.isEnabledFor(logging.DEBUG) else 1)
odoo.netsvc.log(_logger, logging.DEBUG, 'cron.object.execute', (self._cr.dbname, self._uid, '*', model_name, method_name)+tuple(args), depth=log_depth)
if _logger.isEnabledFor(logging.DEBUG):
start_time = time.time()
getattr(model, method_name)(*args)
if _logger.isEnabledFor(logging.DEBUG):
end_time = time.time()
_logger.debug('%.3fs (%s, %s)', end_time - start_time, model_name, method_name)
self.pool.signal_caches_change()
else:
_logger.warning("Method '%s.%s' does not exist.", model_name, method_name)
else:
_logger.warning("Model %r does not exist.", model_name)
except Exception, e:
self._handle_callback_exception(model_name, method_name, args, job_id, e)
@classmethod
def _process_job(cls, job_cr, job, cron_cr):
""" Run a given job taking care of the repetition.
:param job_cr: cursor to use to execute the job, safe to commit/rollback
:param job: job to be run (as a dictionary).
:param cron_cr: cursor holding lock on the cron job row, to use to update the next exec date,
must not be committed/rolled back!
"""
try:
with api.Environment.manage():
cron = api.Environment(job_cr, job['user_id'], {})[cls._name]
# Use the user's timezone to compare and compute datetimes,
# otherwise unexpected results may appear. For instance, adding
# 1 month in UTC to July 1st at midnight in GMT+2 gives July 30
# instead of August 1st!
now = fields.Datetime.context_timestamp(cron, datetime.now())
nextcall = fields.Datetime.context_timestamp(cron, fields.Datetime.from_string(job['nextcall']))
numbercall = job['numbercall']
ok = False
while nextcall < now and numbercall:
if numbercall > 0:
numbercall -= 1
if not ok or job['doall']:
cron._callback(job['model'], job['function'], job['args'], job['id'])
if numbercall:
nextcall += _intervalTypes[job['interval_type']](job['interval_number'])
ok = True
addsql = ''
if not numbercall:
addsql = ', active=False'
cron_cr.execute("UPDATE ir_cron SET nextcall=%s, numbercall=%s"+addsql+" WHERE id=%s",
(fields.Datetime.to_string(nextcall.astimezone(pytz.UTC)), numbercall, job['id']))
cron.invalidate_cache()
finally:
job_cr.commit()
cron_cr.commit()
@classmethod
def _acquire_job(cls, db_name):
# TODO remove 'check' argument from addons/base_action_rule/base_action_rule.py
""" Try to process one cron job.
This selects in database all the jobs that should be processed. It then
tries to lock each of them and, if it succeeds, run the cron job (if it
doesn't succeed, it means the job was already locked to be taken care
of by another thread) and return.
If a job was processed, returns True, otherwise returns False.
"""
db = odoo.sql_db.db_connect(db_name)
threading.current_thread().dbname = db_name
cr = db.cursor()
jobs = []
try:
# Make sure the database we poll has the same version as the code of base
cr.execute("SELECT 1 FROM ir_module_module WHERE name=%s AND latest_version=%s", ('base', BASE_VERSION))
if cr.fetchone():
# Careful to compare timestamps with 'UTC' - everything is UTC as of v6.1.
cr.execute("""SELECT * FROM ir_cron
WHERE numbercall != 0
AND active AND nextcall <= (now() at time zone 'UTC')
ORDER BY priority""")
jobs = cr.dictfetchall()
else:
_logger.warning('Skipping database %s as its base version is not %s.', db_name, BASE_VERSION)
except psycopg2.ProgrammingError, e:
if e.pgcode == '42P01':
# Class 42 — Syntax Error or Access Rule Violation; 42P01: undefined_table
# The table ir_cron does not exist; this is probably not an OpenERP database.
_logger.warning('Tried to poll an undefined table on database %s.', db_name)
else:
raise
except Exception:
_logger.warning('Exception in cron:', exc_info=True)
finally:
cr.close()
for job in jobs:
lock_cr = db.cursor()
try:
# Try to grab an exclusive lock on the job row from within the task transaction
# Restrict to the same conditions as for the search since the job may have already
# been run by an other thread when cron is running in multi thread
lock_cr.execute("""SELECT *
FROM ir_cron
WHERE numbercall != 0
AND active
AND nextcall <= (now() at time zone 'UTC')
AND id=%s
FOR UPDATE NOWAIT""",
(job['id'],), log_exceptions=False)
locked_job = lock_cr.fetchone()
if not locked_job:
_logger.debug("Job `%s` already executed by another process/thread. skipping it", job['name'])
continue
# Got the lock on the job row, run its code
_logger.debug('Starting job `%s`.', job['name'])
job_cr = db.cursor()
try:
registry = odoo.registry(db_name)
registry[cls._name]._process_job(job_cr, job, lock_cr)
except Exception:
_logger.exception('Unexpected exception while processing cron job %r', job)
finally:
job_cr.close()
except psycopg2.OperationalError, e:
if e.pgcode == '55P03':
# Class 55: Object not in prerequisite state; 55P03: lock_not_available
_logger.debug('Another process/thread is already busy executing job `%s`, skipping it.', job['name'])
continue
else:
# Unexpected OperationalError
raise
finally:
# we're exiting due to an exception while acquiring the lock
lock_cr.close()
if hasattr(threading.current_thread(), 'dbname'): # cron job could have removed it as side-effect
del threading.current_thread().dbname
@api.multi
def _try_lock(self):
"""Try to grab a dummy exclusive write-lock to the rows with the given ids,
to make sure a following write() or unlink() will not block due
to a process currently executing those cron tasks"""
try:
self._cr.execute("""SELECT id FROM "%s" WHERE id IN %%s FOR UPDATE NOWAIT""" % self._table,
[tuple(self.ids)], log_exceptions=False)
except psycopg2.OperationalError:
self._cr.rollback() # early rollback to allow translations to work for the user feedback
raise UserError(_("Record cannot be modified right now: "
"This cron task is currently being executed and may not be modified "
"Please try again in a few minutes"))
@api.multi
def write(self, vals):
self._try_lock()
return super(ir_cron, self).write(vals)
@api.multi
def unlink(self):
self._try_lock()
return super(ir_cron, self).unlink()
@api.multi
def try_write(self, values):
try:
with self._cr.savepoint():
self._cr.execute("""SELECT id FROM "%s" WHERE id IN %%s FOR UPDATE NOWAIT""" % self._table,
[tuple(self.ids)], log_exceptions=False)
except psycopg2.OperationalError:
pass
else:
return super(ir_cron, self).write(values)
return False
@api.model
def toggle(self, model, domain):
active = bool(self.env[model].search_count(domain))
return self.try_write({'active': active})
| gpl-3.0 |
dreamsxin/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/vcs/__init__.py | 536 | 8748 | """Handles all VCS (version control) support"""
import os
import shutil
from pip.backwardcompat import urlparse, urllib
from pip.log import logger
from pip.util import (display_path, backup_dir, find_command,
rmtree, ask_path_exists)
__all__ = ['vcs', 'get_src_requirement']
class VcsSupport(object):
_registry = {}
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# Register more schemes with urlparse for various version control systems
urlparse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urlparse, 'uses_fragment', None):
urlparse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
return list(self._registry.values())
@property
def dirnames(self):
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
schemes = []
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
if not hasattr(cls, 'name'):
logger.warn('Cannot register VCS %s' % cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
def unregister(self, cls=None, name=None):
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warn('Cannot unregister because no class or name given')
def get_backend_name(self, location):
"""
Return the name of the version control backend if found at given
location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
path = os.path.join(location, vc_type.dirname)
if os.path.exists(path):
return vc_type.name
return None
def get_backend(self, name):
name = name.lower()
if name in self._registry:
return self._registry[name]
def get_backend_from_location(self, location):
vc_type = self.get_backend_name(location)
if vc_type:
return self.get_backend(vc_type)
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
def __init__(self, url=None, *args, **kwargs):
self.url = url
self._cmd = None
super(VersionControl, self).__init__(*args, **kwargs)
def _filter(self, line):
return (logger.INFO, line)
def _is_local_repository(self, repo):
"""
posix absolute paths start with os.path.sep,
win32 ones ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or drive
@property
def cmd(self):
if self._cmd is not None:
return self._cmd
command = find_command(self.name)
logger.info('Found command %r at %r' % (self.name, command))
self._cmd = command
return command
def get_url_rev(self):
"""
Returns the correct repository URL and revision by parsing the given
repository URL
"""
error_message = (
"Sorry, '%s' is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp")
assert '+' in self.url, error_message % self.url
url = self.url.split('+', 1)[1]
scheme, netloc, path, query, frag = urlparse.urlsplit(url)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urlparse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def get_info(self, location):
"""
Returns (url, revision), where both are strings
"""
assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
return self.get_url(location), self.get_revision(location)
def normalize_url(self, url):
"""
Normalize a URL for comparison by unquoting it and removing any trailing slash.
"""
return urllib.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def parse_vcs_bundle_file(self, content):
"""
Takes the contents of the bundled text file that explains how to revert
the stripped off version control data of the given package and returns
the URL and revision of it.
"""
raise NotImplementedError
def obtain(self, dest):
"""
Called when installing or updating an editable package, takes the
source path of the checkout.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
"""
raise NotImplemented
def update(self, dest, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
"""
raise NotImplementedError
def check_destination(self, dest, url, rev_options, rev_display):
"""
Prepare a location to receive a checkout/clone.
Return True if the location is ready for (and requires) a
checkout/clone, False otherwise.
"""
checkout = True
prompt = False
if os.path.exists(dest):
checkout = False
if os.path.exists(os.path.join(dest, self.dirname)):
existing_url = self.get_url(dest)
if self.compare_urls(existing_url, url):
logger.info('%s in %s exists, and has correct URL (%s)' %
(self.repo_name.title(), display_path(dest),
url))
logger.notify('Updating %s %s%s' %
(display_path(dest), self.repo_name,
rev_display))
self.update(dest, rev_options)
else:
logger.warn('%s %s in %s exists with URL %s' %
(self.name, self.repo_name,
display_path(dest), existing_url))
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warn('Directory %s already exists, '
'and is not a %s %s.' %
(dest, self.name, self.repo_name))
prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))
if prompt:
logger.warn('The plan is to install the %s repository %s' %
(self.name, url))
response = ask_path_exists('What to do? %s' % prompt[0],
prompt[1])
if response == 's':
logger.notify('Switching %s %s to %s%s' %
(self.repo_name, display_path(dest), url,
rev_display))
self.switch(dest, url, rev_options)
elif response == 'i':
# do nothing
pass
elif response == 'w':
logger.warn('Deleting %s' % display_path(dest))
rmtree(dest)
checkout = True
elif response == 'b':
dest_dir = backup_dir(dest)
logger.warn('Backing up %s to %s'
% (display_path(dest), dest_dir))
shutil.move(dest, dest_dir)
checkout = True
return checkout
def unpack(self, location):
if os.path.exists(location):
rmtree(location)
self.obtain(location)
def get_src_requirement(self, dist, location, find_tags=False):
raise NotImplementedError
def get_src_requirement(dist, location, find_tags):
version_control = vcs.get_backend_from_location(location)
if version_control:
return version_control().get_src_requirement(dist, location, find_tags)
logger.warn('cannot determine version of editable source in %s (is not SVN checkout, Git clone, Mercurial clone or Bazaar branch)' % location)
return dist.as_requirement()
| lgpl-3.0 |
coxmediagroup/googleads-python-lib | examples/dfp/v201411/network_service/make_test_network.py | 4 | 1901 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a test network.
You do not need to have a DFP account to run this example, but you do need to
have a Google account (created at http://www.google.com/accounts/newaccount
if you currently don't have one) that is not associated with any other DFP test
networks. Once this network is created, you can supply the network code in your
settings to make calls to other services.
Alternatively, if you do not wish to run this example, you can create a test
network at:
https://dfp-playground.appspot.com
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
network_service = client.GetService('NetworkService', version='v201411')
# Create a test network.
network = network_service.MakeTestNetwork()
# Display results.
print ('Test network with network code \'%s\' and display name \'%s\' '
'created.' % (network['networkCode'], network['displayName']))
print ('You may now sign in at http://www.google.com/dfp/main?networkCode=%s'
% network['networkCode'])
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.3/Lib/encodings/iso8859_10.py | 15 | 3900 | """ Python Character Mapping Codec generated from '8859-10.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a2: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00a3: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x00a4: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00a5: 0x0128, # LATIN CAPITAL LETTER I WITH TILDE
0x00a6: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00a8: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00a9: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00aa: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00ab: 0x0166, # LATIN CAPITAL LETTER T WITH STROKE
0x00ac: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00ae: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00af: 0x014a, # LATIN CAPITAL LETTER ENG
0x00b1: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00b2: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x00b3: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x00b4: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x00b5: 0x0129, # LATIN SMALL LETTER I WITH TILDE
0x00b6: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00b8: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00b9: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00ba: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00bb: 0x0167, # LATIN SMALL LETTER T WITH STROKE
0x00bc: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00bd: 0x2015, # HORIZONTAL BAR
0x00be: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00bf: 0x014b, # LATIN SMALL LETTER ENG
0x00c0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00c7: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00c8: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ca: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00cc: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00d1: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00d2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00d7: 0x0168, # LATIN CAPITAL LETTER U WITH TILDE
0x00d9: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00e0: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x00e7: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00e8: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00ea: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00ec: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00f1: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00f2: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x00f7: 0x0169, # LATIN SMALL LETTER U WITH TILDE
0x00f9: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00ff: 0x0138, # LATIN SMALL LETTER KRA
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| mit |
Clean-Cole/pysftp | tests/test_chmod.py | 1 | 1145 | '''test pysftp.Connection.chmod - uses py.test'''
from __future__ import print_function
# pylint: disable = W0142
# pylint: disable=E1101
from common import *
import pytest
def test_chmod_not_exist(psftp):
'''verify error if trying to chmod something that isn't there'''
with pytest.raises(IOError):
psftp.chmod('i-do-not-exist.txt', 666)
@skip_if_ci
def test_chmod_simple(lsftp):
'''test basic chmod with octal mode represented by an int`'''
new_mode = 744 # user=rwx g=r o=r
with tempfile_containing('') as fname:
base_fname = os.path.split(fname)[1]
org_attrs = lsftp.put(fname)
lsftp.chmod(base_fname, new_mode)
new_attrs = lsftp.stat(base_fname)
lsftp.remove(base_fname)
# that the new mod 744 is as we wanted
assert pysftp.st_mode_to_int(new_attrs.st_mode) == new_mode
# that we actually changed something
assert new_attrs.st_mode != org_attrs.st_mode
def test_chmod_fail_ro(psftp):
'''test chmod against read-only server'''
new_mode = 440
fname = 'readme.txt'
with pytest.raises(IOError):
psftp.chmod(fname, new_mode)
| bsd-3-clause |
azumimuo/family-xbmc-addon | script.module.simple.downloader/lib/SimpleDownloader.py | 8 | 32985 | '''
Simple Downloader plugin for XBMC
Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
import urllib2
import os
import time
import subprocess
import DialogDownloadProgress
#http://wiki.openelec.tv/index.php?title=How_to_make_OpenELEC_addon_-_on_MuMuDVB_sample
class SimpleDownloader():
dialog = u""
def __init__(self):
self.version = u"1.9.4"
self.plugin = u"SimpleDownloader-" + self.version
if hasattr(sys.modules["__main__"], "common"):
self.common = sys.modules["__main__"].common
else:
import CommonFunctions
self.common = CommonFunctions
self.common.log("")
try:
import StorageServer
self.cache = StorageServer.StorageServer("Downloader")
except:
import storageserverdummy as StorageServer
self.cache = StorageServer.StorageServer("Downloader")
if hasattr(sys.modules["__main__"], "xbmcaddon"):
self.xbmcaddon = sys.modules["__main__"].xbmcaddon
else:
import xbmcaddon
self.xbmcaddon = xbmcaddon
self.settings = self.xbmcaddon.Addon(id='script.module.simple.downloader')
if hasattr(sys.modules["__main__"], "xbmc"):
self.xbmc = sys.modules["__main__"].xbmc
else:
import xbmc
self.xbmc = xbmc
if hasattr(sys.modules["__main__"], "xbmcvfs"):
self.xbmcvfs = sys.modules["__main__"].xbmcvfs
else:
try:
import xbmcvfs
self.xbmcvfs = xbmcvfs
except ImportError:
import xbmcvfsdummy as xbmcvfs
self.xbmcvfs = xbmcvfs
if hasattr(sys.modules["__main__"], "dbglevel"):
self.dbglevel = sys.modules["__main__"].dbglevel
else:
self.dbglevel = 3
if hasattr(sys.modules["__main__"], "dbg"):
self.dbg = sys.modules["__main__"].dbg
else:
self.dbg = True
self.language = self.settings.getLocalizedString
self.hide_during_playback = self.settings.getSetting("hideDuringPlayback") == "true"
self.notification_length = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10][int(self.settings.getSetting("notification_length"))] * 1000
if self.settings.getSetting("rtmp_binary"):
self.rtmp_binary = self.settings.getSetting("rtmp_binary")
else:
self.rtmp_binary = "rtmpdump"
if self.settings.getSetting("vlc_binary"):
self.vlc_binary = self.settings.getSetting("vlc_binary")
else:
self.vlc_binary = "vlc"
if self.settings.getSetting("mplayer_binary"):
self.mplayer_binary = self.settings.getSetting("mplayer_binary")
else:
self.mplayer_binary = "mplayer"
self.__workersByName = {}
self.temporary_path = self.xbmc.translatePath(self.settings.getAddonInfo("profile"))
if not self.xbmcvfs.exists(self.temporary_path):
self.common.log("Making path structure: " + repr(self.temporary_path))
self.xbmcvfs.mkdir(self.temporary_path)
self.cur_dl = {}
self.common.log("Done")
def download(self, filename, params={}, async=True):
self.common.log("", 5)
if async:
self.common.log("Async", 5)
self._run_async(self._startDownload, filename, params)
else:
self.common.log("Normal", 5)
self._startDownload(filename, params)
self.common.log("Done", 5)
def _startDownload(self, filename, params={}):
self.common.log("", 5)
if self.cache.lock("SimpleDownloaderLock"):
self.common.log("Downloader not active, initializing downloader.")
self._addItemToQueue(filename, params)
self._processQueue()
self.cache.unlock("SimpleDownloaderLock")
else:
self.common.log("Downloader is active, Queueing item.")
self._addItemToQueue(filename, params)
self.common.log("Done", 5)
def _setPaths(self, filename, params={}):
self.common.log(filename, 5)
# Check utf-8 stuff here
params["path_incomplete"] = os.path.join(self.temporary_path.decode("utf-8"), self.common.makeUTF8(filename))
params["path_complete"] = os.path.join(params["download_path"].decode("utf-8"), self.common.makeUTF8(filename))
self.common.log(params["path_incomplete"], 5)
self.common.log(params["path_complete"], 5)
if self.xbmcvfs.exists(params["path_complete"]):
self.common.log("Removing existing %s" % repr(params["path_complete"]))
self.xbmcvfs.delete(params["path_complete"])
if self.xbmcvfs.exists(params["path_incomplete"]):
self.common.log("Removing incomplete %s" % repr(params["path_incomplete"]))
self.xbmcvfs.delete(params["path_incomplete"])
self.common.log("Done", 5)
def _processQueue(self):
self.common.log("")
item = self._getNextItemFromQueue()
if item:
(filename, item) = item
if item:
if not self.dialog:
self.dialog = DialogDownloadProgress.DownloadProgress()
self.dialog.create(self.language(201), "")
while item:
status = 500
self._setPaths(filename, item)
if not "url" in item:
self.common.log("URL missing : %s" % repr(item))
elif item["url"].find("ftp") > -1 or item["url"].find("http") > -1:
status = self._downloadURL(filename, item)
else:
self._detectStream(filename, item)
if "cmd_call" in item:
status = self._downloadStream(filename, item)
else:
self._showMessage(self.language(301), filename)
if status == 200:
if self.xbmcvfs.exists(item["path_incomplete"]):
self.common.log("Moving %s to %s" % (repr(item["path_incomplete"]), repr(item["path_complete"])))
self.xbmcvfs.rename(item["path_incomplete"], item["path_complete"])
self._showMessage(self.language(203), filename)
else:
self.common.log("Download complete, but file %s not found" % repr(item["path_incomplete"]))
self._showMessage(self.language(204), "ERROR")
elif status != 300:
self.common.log("Failure: " + repr(item) + " - " + repr(status))
self._showMessage(self.language(204), self.language(302))
if status == 300:
item = False
else:
self._removeItemFromQueue(filename)
item = self._getNextItemFromQueue()
if item:
(filename, item) = item
self.common.log("Finished download queue.")
self.cache.set("StopQueue", "")
if self.dialog:
self.dialog.close()
self.common.log("Closed dialog")
self.dialog = u""
def _runCommand(self, args):
self.common.log(" ".join(args))
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self.cur_dl["proc"] = proc
except:
self.common.log("Couldn't run command")
return False
else:
self.common.log("Returning process", 5)
return proc
def _readPipe(self, proc):
self.common.log("", 50)
try:
return proc.communicate()[0]
except:
return ""
def _rtmpDetectArgs(self, probe_args, item):
get = item.get
self.common.log("")
if get("url"):
probe_args += ["--rtmp", get("url")]
elif get("rtmp"):
probe_args += ["--rtmp", get("url")]
if get("host"):
probe_args += ["--host", get("host")]
if get("port"):
probe_args += ["--port", get("port")]
if get("protocol"):
probe_args += ["--protocol", get("protocol")]
if get("app"):
probe_args += ["--app", get("app")]
if get("tcUrl"):
probe_args += ["--tcUrl", get("tcUrl")]
if get("pageUrl"):
probe_args += ["--pageUrl", get("pageUrl")]
if get("swfUrl"):
probe_args += ["--swfUrl", get("swfUrl")]
if get("flashVer"):
probe_args += ["--flashVer", get("flashVer")]
if get("auth"):
probe_args += ["--auth", get("auth")]
if get("conn"):
probe_args += ["--conn", get("conn")]
if get("playpath"):
probe_args += ["--playpath", get("playpath")]
if get("playlist"):
probe_args += ["--playlist"]
if get("live"):
probe_args += ["--live"]
if get("subscribe"):
probe_args += ["--subscribe", get("subscribe")]
if get("resume"):
probe_args += ["--resume"]
if get("skip"):
probe_args += ["--skip", get("skip")]
if get("start"):
probe_args += ["--start", get("start")]
if get("stop") and "--stop" not in probe_args:
probe_args += ["--stop", str(get("stop"))]
elif get("duration") and "--stop" not in probe_args:
probe_args += ["--stop", str(get("duration"))]
if get("buffer"):
probe_args += ["--buffer", get("buffer")]
if get("timeout"):
probe_args += ["--timeout", get("timeout")]
if get("token"):
probe_args += ["--token", get("token")]
if get("swfhash"):
probe_args += ["--swfhash", get("swfhash")]
if get("swfsize"):
probe_args += ["--swfsize", get("swfsize")]
if get("player_url"):
probe_args += ["--swfVfy", get("player_url")]
elif get("swfVfy"):
probe_args += ["--swfVfy", get("player_url")]
if get("swfAge"):
probe_args += ["--swfAge", get("swfAge")]
self.common.log("Done: " + repr(probe_args))
return probe_args
def _detectStream(self, filename, item):
get = item.get
self.common.log(get("url"))
# RTMPDump
if get("url").find("rtmp") > -1 or get("use_rtmpdump"):
self.common.log("Trying rtmpdump")
# Detect filesize
probe_args = [self.rtmp_binary, "--stop", "1"]
probe_args = self._rtmpDetectArgs(probe_args, item)
proc = self._runCommand(probe_args)
if proc:
output = ""
now = time.time()
while not proc.poll():
temp_output = self._readPipe(proc)
output += temp_output
if now + 15 < time.time() or output.find("Starting") > -1:
self.common.log("Breaking, duration: " + repr(time.time() - now))
break
if output.find("Starting") > -1: # If download actually started
if output.find("filesize") > -1:
item["total_size"] = int(float(output[output.find("filesize") + len("filesize"):output.find("\n", output.find("filesize"))]))
elif get("live"):
item["total_size"] = 0
cmd_call = self._rtmpDetectArgs([self.rtmp_binary], item)
cmd_call += ["--flv", item["path_incomplete"]]
item["cmd_call"] = cmd_call
try:
proc.kill()
except:
pass
# VLC
# Fix getting filesize
if ("total_size" not in item and "cmd_call" not in item) or get("use_vlc"):
self.common.log("Trying vlc")
# Detect filesize
probe_args = [self.vlc_binary, "-I", "dummy", "-v", "-v", "--stop-time", "1", "--sout", "file/avi:" + item["path_incomplete"], item["url"], "vlc://quit"]
proc = self._runCommand(probe_args)
if proc:
output = ""
now = time.time()
while not proc.poll():
temp_output = self._readPipe(proc)
output += temp_output
if now + 15 < time.time() or output.find(get("url") + "' successfully opened") > -1:
self.common.log("Breaking, duration: " + repr(time.time() - now))
break
if output.find(get("url") + "' successfully opened") > -1:
if output.find("media_length:") > -1 and False:
item["total_size"] = int(float(output[output.find("media_length:") + len("media_length:"):output.find("s", output.find("media_length:"))]))
elif get("live"):
item["total_size"] = 0
# Download args
cmd_call = [self.vlc_binary, "-v", "-v", "-I", "dummy", "--sout", "file/avi:" + get("path_incomplete")]
if "duration" in item:
cmd_call += ["--stop-time", str(get("duration"))]
cmd_call += [get("url"), "vlc://quit"]
item["cmd_call"] = cmd_call
try:
proc.kill()
except:
pass
# Mplayer
# -endpos doesn't work with dumpstream.
if ("total_size" not in item and "cmd_call" not in item) or get("use_mplayer"):
self.common.log("Trying mplayer")
# Detect filesize
probe_args = [self.mplayer_binary, "-v", "-endpos", "1", "-vo", "null", "-ao", "null", get("url")]
proc = self._runCommand(probe_args)
if proc:
output = ""
now = time.time()
while not proc.poll():
temp_output = self._readPipe(proc)
output += temp_output
if now + 15 < time.time() or output.find("Starting playback") > -1:
self.common.log("Breaking, duration: " + repr(time.time() - now))
break
if output.find("Starting playback") > -1:
if output.find("filesize") > -1:
item["total_size"] = int(float(output[output.find("filesize: ") + len("filesize: "):output.find("\n", output.find("filesize: "))]))
elif get("live"):
item["total_size"] = 0
item["cmd_call"] = [self.mplayer_binary, "-v", "-dumpstream", "-dumpfile", item["path_incomplete"], get("url")]
try:
proc.kill()
except:
pass
if not "total_size" in item:
item["total_size"] = 0
def _stopCurrentDownload(self):
self.common.log("")
if "proc" in self.cur_dl:
self.common.log("Killing: " + repr(self.cur_dl))
proc = self.cur_dl["proc"]
try:
proc.kill()
self.common.log("Killed")
except:
self.common.log("Couldn't kill")
self.common.log("Done")
def _downloadStream(self, filename, item):
get = item.get
self.common.log(filename)
self.common.log(get("cmd_call"))
same_bytes_count = 0
retval = 1
params = {"bytes_so_far": 0, "mark": 0.0, "queue_mark": 0.0, "obytes_so_far": 0}
item["percent"] = 0.1
item["old_percent"] = -1
delay = 0.3
stall_timeout = self.settings.getSetting("stall_timeout")
proc = self._runCommand(get("cmd_call"))
output = ""
if proc:
while proc.returncode == None and "quit" not in params:
temp_output = proc.stdout.read(23)
if len(output) > 10000:
output = output[0:500] + "\r\n\r\n\r\n"
output += temp_output
if self.xbmcvfs.exists(item["path_incomplete"]):
params["bytes_so_far"] = os.path.getsize(item["path_incomplete"])
if params["mark"] == 0.0 and params["bytes_so_far"] > 0:
params["mark"] = time.time()
self.common.log("Mark set")
if params["bytes_so_far"] == params["obytes_so_far"]:
if same_bytes_count == 0:
now = time.time()
same_bytes_count += 1
#if delay < 3:
# delay = delay * 1.2
if same_bytes_count >= 300 and (item["total_size"] != 0 or params["bytes_so_far"] != 0) and (now + int(stall_timeout) < time.time()):
self.common.log("Download complete. Same bytes for 300 times in a row.")
if (item["total_size"] > 0 and item["total_size"] * 0.998 < params["bytes_so_far"]):
self.common.log("Size disrepancy: " + str(item["total_size"] - params["bytes_so_far"]))
retval = 0
break
else:
self.common.log("Sleeping: " + str(delay) + " - " + str(params["bytes_so_far"]), 5)
time.sleep(delay)
continue
else:
same_bytes_count = 0
#if delay > 0.3:
# delay = delay * 0.8
self.common.log("Bytes updated: " + str(delay) + " - " + str(params["bytes_so_far"]), 5)
self.common.log("bytes_so_far : " + str(params["bytes_so_far"]), 5)
self._generatePercent(item, params)
if "duration" in item and repr(get("cmd_call")).find("mplayer") > -1 and item["percent"] > 105:
self.common.log("Mplayer over percentage %s. Killing! " % repr(item["percent"]))
retval = 0
proc.kill()
break
if item["percent"] > item["old_percent"] or time.time() - params["queue_mark"] > 3:
self._updateProgress(filename, item, params)
item["old_percent"] = item["percent"]
if params["bytes_so_far"] >= item["total_size"] and item["total_size"] != 0:
self.common.log("Download complete. Matched size")
retval = 0
break
if "duration" in item and params["mark"] > 0.0 and (params["mark"] + int(get("duration")) + 10 < time.time()) and False:
self.common.log("Download complete. Over duration.")
retval = 0
break
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those.
if (item["total_size"] != 0 and get("url").find("rtmp") > -1 and item["total_size"] * 0.998 < params["bytes_so_far"]):
self.common.log("Download complete. Size disrepancy: " + str(item["total_size"] - params["bytes_so_far"]) + " - " + str(same_bytes_count))
retval = 0
break
params["obytes_so_far"] = params["bytes_so_far"]
try:
output += proc.stdout.read()
proc.kill()
except:
pass
if "quit" in params:
self.common.log("Download aborted.")
return 300
if retval == 1:
self.common.log("Download failed, binary output: %s" % output)
return 500
self.common.log("Done")
return 200
def _downloadURL(self, filename, item):
self.common.log(filename)
url = urllib2.Request(item["url"])
if "useragent" in item:
url.add_header("User-Agent", item["useragent"])
else:
url.add_header("User-Agent", self.common.USERAGENT)
if "cookie" in item:
if item["cookie"]!=False :
url.add_header("Cookie", item["cookie"])
file = self.common.openFile(item["path_incomplete"], "wb")
con = urllib2.urlopen(url)
item["total_size"] = 0
chunk_size = 1024 * 8
if con.info().getheader("Content-Length").strip():
item["total_size"] = int(con.info().getheader("Content-Length").strip())
params = {"bytes_so_far": 0, "mark": 0.0, "queue_mark": 0.0, "obytes_so_far": 0}
item["percent"] = 0.1
item["old_percent"] = -1
try:
while "quit" not in params:
chunk = con.read(chunk_size)
file.write(chunk)
params["bytes_so_far"] += len(chunk)
if params["mark"] == 0.0 and params["bytes_so_far"] > 0:
params["mark"] = time.time()
self.common.log("Mark set")
self._generatePercent(item, params)
self.common.log("recieved chunk: %s - %s" % ( repr(item["percent"] > item["old_percent"]), repr(time.time() - params["queue_mark"])), 4)
if item["percent"] > item["old_percent"] or time.time() - params["queue_mark"] > 30:
self._run_async(self._updateProgress(filename, item, params))
item["old_percent"] = item["percent"]
params["obytes_so_far"] = params["bytes_so_far"]
if not chunk:
break
self.common.log("Loop done")
con.close()
file.close()
except:
self.common.log("Download failed.")
try:
con.close()
except:
self.common.log("Failed to close download stream")
try:
file.close()
except:
self.common.log("Failed to close file handle")
self._showMessage(self.language(204), "ERROR")
return 500
if "quit" in params:
self.common.log("Download aborted.")
return 300
self.common.log("Done")
return 200
def _convertSecondsToHuman(self, seconds):
seconds = int(seconds)
if seconds < 60:
return "~%ss" % (seconds)
elif seconds < 3600:
return "~%sm" % (seconds / 60)
def _generatePercent(self, item, params):
self.common.log("", 5)
get = params.get
iget = item.get
new_delta = False
if "last_delta" in item:
if time.time() - item["last_delta"] > 0.2:
new_delta = True
else:
item["last_delta"] = 0.0
new_delta = True
if item["total_size"] > 0 and new_delta:
self.common.log("total_size", 5)
item["percent"] = float(get("bytes_so_far")) / float(item["total_size"]) * 100
elif iget("duration") and get("mark") != 0.0 and new_delta:
time_spent = time.time() - get("mark")
item["percent"] = time_spent / int(iget("duration")) * 100
self.common.log("Time spent: %s. Duration: %s. Time left: %s (%s)" % (int(time_spent), int(iget("duration")),
int(int(iget("duration")) - time_spent),
self._convertSecondsToHuman(int(iget("duration")) - time_spent)), 5)
elif new_delta:
self.common.log("cycle - " + str(time.time() - item["last_delta"]), 5)
delta = time.time() - item["last_delta"]
if delta > 10 or delta < 0:
delta = 5
item["percent"] = iget("old_percent") + delta
if item["percent"] >= 100:
item["percent"] -= 100
item["old_percent"] = item["percent"]
if new_delta:
item["last_delta"] = time.time()
def _getQueue(self):
self.common.log("")
queue = self.cache.get("SimpleDownloaderQueue")
try:
items = eval(queue)
except:
items = {}
self.common.log("Done: " + str(len(items)))
return items
def _updateProgress(self, filename, item, params):
self.common.log("", 3)
get = params.get
iget = item.get
queue = False
new_mark = time.time()
if new_mark == get("mark"):
speed = 0
else:
speed = int((get("bytes_so_far") / 1024) / (new_mark - get("mark")))
if new_mark - get("queue_mark") > 1.5:
queue = self.cache.get("SimpleDownloaderQueue")
self.queue = queue
elif hasattr(self, "queue"):
queue = self.queue
self.common.log("eval queue", 2)
try:
items = eval(queue)
except:
items = {}
if new_mark - get("queue_mark") > 1.5:
heading = u"[%s] %sKb/s (%.2f%%)" % (len(items), speed, item["percent"])
self.common.log("Updating %s - %s" % (heading, self.common.makeUTF8(filename)), 2)
params["queue_mark"] = new_mark
if self.xbmc.Player().isPlaying() and self.xbmc.getCondVisibility("VideoPlayer.IsFullscreen"):
if self.dialog:
self.dialog.close()
self.dialog = u""
else:
if not self.dialog:
self.dialog = DialogDownloadProgress.DownloadProgress()
self.dialog.create(self.language(201), "")
heading = u"[%s] %s - %.2f%%" % (len(items), self.language(202), item["percent"])
if iget("Title"):
self.dialog.update(percent=item["percent"], heading=heading, label=iget("Title"))
else:
self.dialog.update(percent=item["percent"], heading=heading, label=filename)
self.common.log("Done", 3)
#============================= Download Queue =================================
def _getNextItemFromQueue(self):
if self.cache.lock("SimpleDownloaderQueueLock"):
items = []
queue = self.cache.get("SimpleDownloaderQueue")
self.common.log("queue loaded : " + repr(queue))
if queue:
try:
items = eval(queue)
except:
items = False
item = {}
if len(items) > 0:
item = items[0]
self.common.log("returning : " + item[0])
self.cache.unlock("SimpleDownloaderQueueLock")
if items:
return item
else:
return False
else:
self.common.log("Couldn't aquire lock")
def _addItemToQueue(self, filename, params={}):
if self.cache.lock("SimpleDownloaderQueueLock"):
items = []
if filename:
queue = self.cache.get("SimpleDownloaderQueue")
self.common.log("queue loaded : " + repr(queue), 3)
if queue:
try:
items = eval(queue)
except:
items = []
append = True
for index, item in enumerate(items):
(item_id, item) = item
if item_id == filename:
append = False
del items[index]
break
if append:
items.append((filename, params))
self.common.log("Added: " + filename + " to queue - " + str(len(items)))
else:
items.insert(1, (filename, params)) # 1 or 0?
self.common.log("Moved " + filename + " to front of queue. - " + str(len(items)))
self.cache.set("SimpleDownloaderQueue", repr(items))
self.cache.unlock("SimpleDownloaderQueueLock")
self.common.log("Done")
else:
self.common.log("Couldn't lock")
def _removeItemFromQueue(self, filename):
if self.cache.lock("SimpleDownloaderQueueLock"):
items = []
queue = self.cache.get("SimpleDownloaderQueue")
self.common.log("queue loaded : " + repr(queue), 3)
if queue:
try:
items = eval(queue)
except:
items = []
for index, item in enumerate(items):
(item_id, item) = item
if item_id == filename:
del items[index]
self.cache.set("SimpleDownloaderQueue", repr(items))
self.common.log("Removed: " + filename + " from queue")
self.cache.unlock("SimpleDownloaderQueueLock")
self.common.log("Done")
else:
self.common.log("Exception")
def movieItemToPosition(self, filename, position):
if position > 0 and self.cache.lock("SimpleDownloaderQueueLock"):
items = []
if filename:
queue = self.cache.get("SimpleDownloaderQueue")
self.common.log("queue loaded : " + repr(queue), 3)
if queue:
try:
items = eval(queue)
except:
items = []
self.common.log("pre items: %s " % repr(items), 3)
for index, item in enumerate(items):
(item_id, item) = item
if item_id == filename:
print "FOUND ID"
del items[index]
items = items[:position] + [(filename, item)] + items[position:]
break
self.common.log("post items: %s " % repr(items), 3)
self.cache.set("SimpleDownloaderQueue", repr(items))
self.cache.unlock("SimpleDownloaderQueueLock")
self.common.log("Done")
else:
self.common.log("Couldn't lock")
def isRTMPInstalled(self):
basic_args = ["rtmpdump", "-V"]
try:
p = subprocess.Popen(basic_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate()[1]
return output.find("RTMPDump") > -1
except:
return False
def isVLCInstalled(self):
basic_args = ["vlc", "--version"]
try:
p = subprocess.Popen(basic_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate()[0]
self.common.log(repr(output))
return output.find("VLC") > -1
except:
return False
def isMPlayerInstalled(self):
basic_args = ["mplayer"]
try:
p = subprocess.Popen(basic_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate()[0]
self.common.log(repr(output))
return output.find("MPlayer") > -1
except:
return False
def _run_async(self, func, *args, **kwargs):
from threading import Thread
worker = Thread(target=func, args=args, kwargs=kwargs)
self.__workersByName[worker.getName()] = worker
worker.start()
return worker
# Shows a more user-friendly notification
def _showMessage(self, heading, message):
self.xbmc.executebuiltin((u'XBMC.Notification("%s", "%s", %s)' % (heading, self.common.makeUTF8(message), self.notification_length)).encode("utf-8"))
| gpl-2.0 |
salguarnieri/intellij-community | python/helpers/pep8.py | 63 | 81919 | #!/usr/bin/env python
# pep8.py - Check Python source code formatting, according to PEP 8
# Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net>
# Copyright (C) 2009-2014 Florent Xicluna <florent.xicluna@gmail.com>
# Copyright (C) 2014 Ian Lee <ianlee1521@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
r"""
Check Python source code formatting, according to PEP 8.
For usage and a list of options, try this:
$ python pep8.py -h
This program and its regression test suite live here:
http://github.com/jcrocholl/pep8
Groups of errors and warnings:
E errors
W warnings
100 indentation
200 whitespace
300 blank lines
400 imports
500 line length
600 deprecation
700 statements
900 syntax error
"""
from __future__ import with_statement
import os
import sys
import re
import time
import inspect
import keyword
import tokenize
from optparse import OptionParser
from fnmatch import fnmatch
try:
from configparser import RawConfigParser
from io import TextIOWrapper
except ImportError:
from ConfigParser import RawConfigParser
__version__ = '1.6.2'
DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox'
DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704'
try:
if sys.platform == 'win32':
USER_CONFIG = os.path.expanduser(r'~\.pep8')
else:
USER_CONFIG = os.path.join(
os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'),
'pep8'
)
except ImportError:
USER_CONFIG = None
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite')
MAX_LINE_LENGTH = 79
REPORT_FORMAT = {
'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
}
PyCF_ONLY_AST = 1024
SINGLETONS = frozenset(['False', 'None', 'True'])
KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS
UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-'])
WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%'])
WS_NEEDED_OPERATORS = frozenset([
'**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
'%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
WHITESPACE = frozenset(' \t')
NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT])
# ERRORTOKEN is triggered by backticks in Python 3
SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN])
BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
INDENT_REGEX = re.compile(r'([ \t]*)')
RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$')
ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
DOCSTRING_REGEX = re.compile(r'u?r?["\']')
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
COMPARE_SINGLETON_REGEX = re.compile(r'\b(None|False|True)?\s*([=!]=)'
r'\s*(?(1)|(None|False|True))\b')
COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^][)(}{ ]+\s+(in|is)\s')
COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type'
r'|\s*\(\s*([^)]*[^ )])\s*\))')
KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
LAMBDA_REGEX = re.compile(r'\blambda\b')
HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
# Work around Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
##############################################################################
# Plugins (check functions) for physical lines
##############################################################################
def tabs_or_spaces(physical_line, indent_char):
r"""Never mix tabs and spaces.
The most popular way of indenting Python is with spaces only. The
second-most popular way is with tabs only. Code indented with a mixture
of tabs and spaces should be converted to using spaces exclusively. When
invoking the Python command line interpreter with the -t option, it issues
warnings about code that illegally mixes tabs and spaces. When using -tt
these warnings become errors. These options are highly recommended!
Okay: if a == 0:\n a = 1\n b = 1
E101: if a == 0:\n a = 1\n\tb = 1
"""
indent = INDENT_REGEX.match(physical_line).group(1)
for offset, char in enumerate(indent):
if char != indent_char:
return offset, "E101 indentation contains mixed spaces and tabs"
def tabs_obsolete(physical_line):
r"""For new projects, spaces-only are strongly recommended over tabs.
Okay: if True:\n return
W191: if True:\n\treturn
"""
indent = INDENT_REGEX.match(physical_line).group(1)
if '\t' in indent:
return indent.index('\t'), "W191 indentation contains tabs"
def trailing_whitespace(physical_line):
r"""Trailing whitespace is superfluous.
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip(' \t\v')
if physical_line != stripped:
if stripped:
return len(stripped), "W291 trailing whitespace"
else:
return 0, "W293 blank line contains whitespace"
def trailing_blank_lines(physical_line, lines, line_number, total_lines):
r"""Trailing blank lines are superfluous.
Okay: spam(1)
W391: spam(1)\n
However the last line should end with a new line (warning W292).
"""
if line_number == total_lines:
stripped_last_line = physical_line.rstrip()
if not stripped_last_line:
return 0, "W391 blank line at end of file"
if stripped_last_line == physical_line:
return len(physical_line), "W292 no newline at end of file"
def maximum_line_length(physical_line, max_line_length, multiline):
r"""Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
Reports error E501.
"""
line = physical_line.rstrip()
length = len(line)
if length > max_line_length and not noqa(line):
# Special case for long URLs in multi-line docstrings or comments,
# but still report the error when the 72 first chars are whitespaces.
chunks = line.split()
if ((len(chunks) == 1 and multiline) or
(len(chunks) == 2 and chunks[0] == '#')) and \
len(line) - len(chunks[-1]) < max_line_length - 7:
return
if hasattr(line, 'decode'): # Python 2
# The line could contain multi-byte characters
try:
length = len(line.decode('utf-8'))
except UnicodeError:
pass
if length > max_line_length:
return (max_line_length, "E501 line too long "
"(%d > %d characters)" % (length, max_line_length))
##############################################################################
# Plugins (check functions) for logical lines
##############################################################################
def blank_lines(logical_line, blank_lines, indent_level, line_number,
blank_before, previous_logical, previous_indent_level):
r"""Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
Extra blank lines may be used (sparingly) to separate groups of related
functions. Blank lines may be omitted between a bunch of related
one-liners (e.g. a set of dummy implementations).
Use blank lines in functions, sparingly, to indicate logical sections.
Okay: def a():\n pass\n\n\ndef b():\n pass
Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass
E301: class Foo:\n b = 0\n def bar():\n pass
E302: def a():\n pass\n\ndef b(n):\n pass
E303: def a():\n pass\n\n\n\ndef b(n):\n pass
E303: def a():\n\n\n\n pass
E304: @decorator\n\ndef a():\n pass
"""
if line_number < 3 and not previous_logical:
return # Don't expect blank lines before the first line
if previous_logical.startswith('@'):
if blank_lines:
yield 0, "E304 blank lines found after function decorator"
elif blank_lines > 2 or (indent_level and blank_lines == 2):
yield 0, "E303 too many blank lines (%d)" % blank_lines
elif logical_line.startswith(('def ', 'class ', '@')):
if indent_level:
if not (blank_before or previous_indent_level < indent_level or
DOCSTRING_REGEX.match(previous_logical)):
yield 0, "E301 expected 1 blank line, found 0"
elif blank_before != 2:
yield 0, "E302 expected 2 blank lines, found %d" % blank_before
def extraneous_whitespace(logical_line):
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in these situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
E201: spam(ham[ 1], {eggs: 2})
E201: spam(ham[1], { eggs: 2})
E202: spam(ham[1], {eggs: 2} )
E202: spam(ham[1 ], {eggs: 2})
E202: spam(ham[1], {eggs: 2 })
E203: if x == 4: print x, y; x, y = y , x
E203: if x == 4: print x, y ; x, y = y, x
E203: if x == 4 : print x, y; x, y = y, x
"""
line = logical_line
for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
text = match.group()
char = text.strip()
found = match.start()
if text == char + ' ':
# assert char in '([{'
yield found + 1, "E201 whitespace after '%s'" % char
elif line[found - 1] != ',':
code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
yield found, "%s whitespace before '%s'" % (code, char)
def whitespace_around_keywords(logical_line):
r"""Avoid extraneous whitespace around keywords.
Okay: True and False
E271: True and False
E272: True and False
E273: True and\tFalse
E274: True\tand False
"""
for match in KEYWORD_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E274 tab before keyword"
elif len(before) > 1:
yield match.start(1), "E272 multiple spaces before keyword"
if '\t' in after:
yield match.start(2), "E273 tab after keyword"
elif len(after) > 1:
yield match.start(2), "E271 multiple spaces after keyword"
def missing_whitespace(logical_line):
r"""Each comma, semicolon or colon should be followed by whitespace.
Okay: [a, b]
Okay: (3,)
Okay: a[1:4]
Okay: a[:4]
Okay: a[1:]
Okay: a[1:4:2]
E231: ['a','b']
E231: foo(bar,baz)
E231: [{'a':'b'}]
"""
line = logical_line
for index in range(len(line) - 1):
char = line[index]
if char in ',;:' and line[index + 1] not in WHITESPACE:
before = line[:index]
if char == ':' and before.count('[') > before.count(']') and \
before.rfind('{') < before.rfind('['):
continue # Slice syntax, no space required
if char == ',' and line[index + 1] == ')':
continue # Allow tuple with only one element: (3,)
yield index, "E231 missing whitespace after '%s'" % char
def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
r"""Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
Okay: a = 1
Okay: if a == 0:\n a = 1
E111: a = 1
E114: # a = 1
Okay: for item in items:\n pass
E112: for item in items:\npass
E115: for item in items:\n# Hi\n pass
Okay: a = 1\nb = 2
E113: a = 1\n b = 2
E116: a = 1\n # b = 2
"""
c = 0 if logical_line else 3
tmpl = "E11%d %s" if logical_line else "E11%d %s (comment)"
if indent_level % 4:
yield 0, tmpl % (1 + c, "indentation is not a multiple of four")
indent_expect = previous_logical.endswith(':')
if indent_expect and indent_level <= previous_indent_level:
yield 0, tmpl % (2 + c, "expected an indented block")
elif not indent_expect and indent_level > previous_indent_level:
yield 0, tmpl % (3 + c, "unexpected indentation")
def continued_indentation(logical_line, tokens, indent_level, hang_closing,
indent_char, noqa, verbose):
r"""Continuation lines indentation.
Continuation lines should align wrapped elements either vertically
using Python's implicit line joining inside parentheses, brackets
and braces, or using a hanging indent.
When using a hanging indent these considerations should be applied:
- there should be no arguments on the first line, and
- further indentation should be used to clearly distinguish itself as a
continuation line.
Okay: a = (\n)
E123: a = (\n )
Okay: a = (\n 42)
E121: a = (\n 42)
E122: a = (\n42)
E123: a = (\n 42\n )
E124: a = (24,\n 42\n)
E125: if (\n b):\n pass
E126: a = (\n 42)
E127: a = (24,\n 42)
E128: a = (24,\n 42)
E129: if (a or\n b):\n pass
E131: a = (\n 42\n 24)
"""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented; assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line; in turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (4,) if indent_char != '\t' else (4, 8)
# remember how many brackets were opened on each line
parens = [0] * nrows
# relative indents of physical lines
rel_indent = [0] * nrows
# for each depth, collect a list of opening rows
open_rows = [[0]]
# for each depth, memorize the hanging indentation
hangs = [None]
# visual indents
indent_chances = {}
last_indent = tokens[0][2]
visual_indent = None
last_token_multiline = False
# for each depth, memorize the visual indent column
indent = [last_indent[1]]
if verbose >= 3:
print(">>> " + tokens[0][4].rstrip())
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = not last_token_multiline and token_type not in NEWLINE
if newline:
# this is the beginning of a continuation line.
last_indent = start
if verbose >= 3:
print("... " + line.rstrip())
# record the initial indent.
rel_indent[row] = expand_indent(line) - indent_level
# identify closing bracket
close_bracket = (token_type == tokenize.OP and text in ']})')
# is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
# is there any chance of visual indent?
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# closing bracket for visual indent
if start[1] != indent[depth]:
yield (start, "E124 closing bracket does not match "
"visual indentation")
elif close_bracket and not hang:
# closing bracket matches indentation of opening bracket's line
if hang_closing:
yield start, "E133 closing bracket is missing indentation"
elif indent[depth] and start[1] < indent[depth]:
if visual_indent is not True:
# visual indent is broken
yield (start, "E128 continuation line "
"under-indented for visual indent")
elif hanging_indent or (indent_next and rel_indent[row] == 8):
# hanging indent is verified
if close_bracket and not hang_closing:
yield (start, "E123 closing bracket does not match "
"indentation of opening bracket's line")
hangs[depth] = hang
elif visual_indent is True:
# visual indent is verified
indent[depth] = start[1]
elif visual_indent in (text, str):
# ignore token lined up with matching one from a previous line
pass
else:
# indent is broken
if hang <= 0:
error = "E122", "missing indentation or outdented"
elif indent[depth]:
error = "E127", "over-indented for visual indent"
elif not close_bracket and hangs[depth]:
error = "E131", "unaligned for hanging indent"
else:
hangs[depth] = hang
if hang > 4:
error = "E126", "over-indented for hanging indent"
else:
error = "E121", "under-indented for hanging indent"
yield start, "%s continuation line %s" % error
# look for visual indenting
if (parens[row] and
token_type not in (tokenize.NL, tokenize.COMMENT) and
not indent[depth]):
indent[depth] = start[1]
indent_chances[start[1]] = True
if verbose >= 4:
print("bracket depth %s indent to %s" % (depth, start[1]))
# deal with implicit string concatenation
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = str
# special case for the "if" statement because len("if (") == 4
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# keep track of bracket depth
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
if verbose >= 4:
print("bracket depth %s seen, col %s, visual min = %s" %
(depth, start[1], indent[depth]))
elif text in ')]}' and depth > 0:
# parent indents should not be more than this one
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if start[1] not in indent_chances:
# allow to line up tokens
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if last_token_multiline:
rel_indent[end[0] - first_row] = rel_indent[row]
if indent_next and expand_indent(line) == indent_level + 4:
pos = (start[0], indent[0] + 4)
if visual_indent:
code = "E129 visually indented line"
else:
code = "E125 continuation line"
yield pos, "%s with same indent as next logical line" % code
def whitespace_before_parameters(logical_line, tokens):
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in the following situations:
- before the open parenthesis that starts the argument list of a
function call.
- before the open parenthesis that starts an indexing or slicing.
Okay: spam(1)
E211: spam (1)
Okay: dict['key'] = list[index]
E211: dict ['key'] = list[index]
E211: dict['key'] = list [index]
"""
prev_type, prev_text, __, prev_end, __ = tokens[0]
for index in range(1, len(tokens)):
token_type, text, start, end, __ = tokens[index]
if (token_type == tokenize.OP and
text in '([' and
start != prev_end and
(prev_type == tokenize.NAME or prev_text in '}])') and
# Syntax "class A (B):" is allowed, but avoid it
(index < 2 or tokens[index - 2][1] != 'class') and
# Allow "return (a.foo for a in range(5))"
not keyword.iskeyword(prev_text)):
yield prev_end, "E211 whitespace before '%s'" % text
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_operator(logical_line):
r"""Avoid extraneous whitespace around an operator.
Okay: a = 12 + 3
E221: a = 4 + 5
E222: a = 4 + 5
E223: a = 4\t+ 5
E224: a = 4 +\t5
"""
for match in OPERATOR_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E223 tab before operator"
elif len(before) > 1:
yield match.start(1), "E221 multiple spaces before operator"
if '\t' in after:
yield match.start(2), "E224 tab after operator"
elif len(after) > 1:
yield match.start(2), "E222 multiple spaces after operator"
def missing_whitespace_around_operator(logical_line, tokens):
r"""Surround operators with a single space on either side.
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
comparisons (==, <, >, !=, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- If operators with different priorities are used, consider adding
whitespace around the operators with the lowest priorities.
Okay: i = i + 1
Okay: submitted += 1
Okay: x = x * 2 - 1
Okay: hypot2 = x * x + y * y
Okay: c = (a + b) * (a - b)
Okay: foo(bar, key='word', *args, **kwargs)
Okay: alpha[:-i]
E225: i=i+1
E225: submitted +=1
E225: x = x /2 - 1
E225: z = x **y
E226: c = (a+b) * (a-b)
E226: hypot2 = x*x + y*y
E227: c = a|b
E228: msg = fmt%(errno, errmsg)
"""
parens = 0
need_space = False
prev_type = tokenize.OP
prev_text = prev_end = None
for token_type, text, start, end, line in tokens:
if token_type in SKIP_COMMENTS:
continue
if text in ('(', 'lambda'):
parens += 1
elif text == ')':
parens -= 1
if need_space:
if start != prev_end:
# Found a (probably) needed space
if need_space is not True and not need_space[1]:
yield (need_space[0],
"E225 missing whitespace around operator")
need_space = False
elif text == '>' and prev_text in ('<', '-'):
# Tolerate the "<>" operator, even if running Python 3
# Deal with Python 3's annotated return value "->"
pass
else:
if need_space is True or need_space[1]:
# A needed trailing space was not found
yield prev_end, "E225 missing whitespace around operator"
elif prev_text != '**':
code, optype = 'E226', 'arithmetic'
if prev_text == '%':
code, optype = 'E228', 'modulo'
elif prev_text not in ARITHMETIC_OP:
code, optype = 'E227', 'bitwise or shift'
yield (need_space[0], "%s missing whitespace "
"around %s operator" % (code, optype))
need_space = False
elif token_type == tokenize.OP and prev_end is not None:
if text == '=' and parens:
# Allow keyword args or defaults: foo(bar=None).
pass
elif text in WS_NEEDED_OPERATORS:
need_space = True
elif text in UNARY_OPERATORS:
# Check if the operator is being used as a binary operator
# Allow unary operators: -123, -x, +1.
# Allow argument unpacking: foo(*args, **kwargs).
if (prev_text in '}])' if prev_type == tokenize.OP
else prev_text not in KEYWORDS):
need_space = None
elif text in WS_OPTIONAL_OPERATORS:
need_space = None
if need_space is None:
# Surrounding space is optional, but ensure that
# trailing space matches opening space
need_space = (prev_end, start != prev_end)
elif need_space and start == prev_end:
# A needed opening space was not found
yield prev_end, "E225 missing whitespace around operator"
need_space = False
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_comma(logical_line):
r"""Avoid extraneous whitespace after a comma or a colon.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
"""
line = logical_line
for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
found = m.start() + 1
if '\t' in m.group():
yield found, "E242 tab after '%s'" % m.group()[0]
else:
yield found, "E241 multiple spaces after '%s'" % m.group()[0]
def whitespace_around_named_parameter_equals(logical_line, tokens):
r"""Don't use spaces around the '=' sign in function arguments.
Don't use spaces around the '=' sign when used to indicate a
keyword argument or a default parameter value.
Okay: def complex(real, imag=0.0):
Okay: return magic(r=real, i=imag)
Okay: boolean(a == b)
Okay: boolean(a != b)
Okay: boolean(a <= b)
Okay: boolean(a >= b)
Okay: def foo(arg: int = 42):
E251: def complex(real, imag = 0.0):
E251: return magic(r = real, i = imag)
"""
parens = 0
no_space = False
prev_end = None
annotated_func_arg = False
in_def = logical_line.startswith('def')
message = "E251 unexpected spaces around keyword / parameter equals"
for token_type, text, start, end, line in tokens:
if token_type == tokenize.NL:
continue
if no_space:
no_space = False
if start != prev_end:
yield (prev_end, message)
if token_type == tokenize.OP:
if text == '(':
parens += 1
elif text == ')':
parens -= 1
elif in_def and text == ':' and parens == 1:
annotated_func_arg = True
elif parens and text == ',' and parens == 1:
annotated_func_arg = False
elif parens and text == '=' and not annotated_func_arg:
no_space = True
if start != prev_end:
yield (prev_end, message)
if not parens:
annotated_func_arg = False
prev_end = end
def whitespace_before_comment(logical_line, tokens):
r"""Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
Each line of a block comment starts with a # and a single space
(unless it is indented text inside the comment).
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
Okay: # Block comment
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
E265: #Block comment
E266: ### Block comment
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
inline_comment = line[:start[1]].strip()
if inline_comment:
if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
yield (prev_end,
"E261 at least two spaces before inline comment")
symbol, sp, comment = text.partition(' ')
bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#')
if inline_comment:
if bad_prefix or comment[:1] in WHITESPACE:
yield start, "E262 inline comment should start with '# '"
elif bad_prefix and (bad_prefix != '!' or start[0] > 1):
if bad_prefix != '#':
yield start, "E265 block comment should start with '# '"
elif comment:
yield start, "E266 too many leading '#' for block comment"
elif token_type != tokenize.NL:
prev_end = end
def imports_on_separate_lines(logical_line):
r"""Imports should usually be on separate lines.
Okay: import os\nimport sys
E401: import sys, os
Okay: from subprocess import Popen, PIPE
Okay: from myclas import MyClass
Okay: from foo.bar.yourclass import YourClass
Okay: import myclass
Okay: import foo.bar.yourclass
"""
line = logical_line
if line.startswith('import '):
found = line.find(',')
if -1 < found and ';' not in line[:found]:
yield found, "E401 multiple imports on one line"
def module_imports_on_top_of_file(
logical_line, indent_level, checker_state, noqa):
r"""Imports are always put at the top of the file, just after any module
comments and docstrings, and before module globals and constants.
Okay: import os
Okay: # this is a comment\nimport os
Okay: '''this is a module docstring'''\nimport os
Okay: r'''this is a module docstring'''\nimport os
Okay: try:\n import x\nexcept:\n pass\nelse:\n pass\nimport y
Okay: try:\n import x\nexcept:\n pass\nfinally:\n pass\nimport y
E402: a=1\nimport os
E402: 'One string'\n"Two string"\nimport os
E402: a=1\nfrom sys import x
Okay: if x:\n import os
"""
def is_string_literal(line):
if line[0] in 'uUbB':
line = line[1:]
if line and line[0] in 'rR':
line = line[1:]
return line and (line[0] == '"' or line[0] == "'")
allowed_try_keywords = ('try', 'except', 'else', 'finally')
if indent_level: # Allow imports in conditional statements or functions
return
if not logical_line: # Allow empty lines or comments
return
if noqa:
return
line = logical_line
if line.startswith('import ') or line.startswith('from '):
if checker_state.get('seen_non_imports', False):
yield 0, "E402 module level import not at top of file"
elif any(line.startswith(kw) for kw in allowed_try_keywords):
# Allow try, except, else, finally keywords intermixed with imports in
# order to support conditional importing
return
elif is_string_literal(line):
# The first literal is a docstring, allow it. Otherwise, report error.
if checker_state.get('seen_docstring', False):
checker_state['seen_non_imports'] = True
else:
checker_state['seen_docstring'] = True
else:
checker_state['seen_non_imports'] = True
def compound_statements(logical_line):
r"""Compound statements (on the same line) are generally discouraged.
While sometimes it's okay to put an if/for/while with a small body
on the same line, never do this for multi-clause statements.
Also avoid folding such long lines!
Always use a def statement instead of an assignment statement that
binds a lambda expression directly to a name.
Okay: if foo == 'blah':\n do_blah_thing()
Okay: do_one()
Okay: do_two()
Okay: do_three()
E701: if foo == 'blah': do_blah_thing()
E701: for x in lst: total += x
E701: while t < 10: t = delay()
E701: if foo == 'blah': do_blah_thing()
E701: else: do_non_blah_thing()
E701: try: something()
E701: finally: cleanup()
E701: if foo == 'blah': one(); two(); three()
E702: do_one(); do_two(); do_three()
E703: do_four(); # useless semicolon
E704: def f(x): return 2*x
E731: f = lambda x: 2*x
"""
line = logical_line
last_char = len(line) - 1
found = line.find(':')
while -1 < found < last_char:
before = line[:found]
if ((before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
before.count('(') <= before.count(')'))): # (annotation)
lambda_kw = LAMBDA_REGEX.search(before)
if lambda_kw:
before = line[:lambda_kw.start()].rstrip()
if before[-1:] == '=' and isidentifier(before[:-1].strip()):
yield 0, ("E731 do not assign a lambda expression, use a "
"def")
break
if before.startswith('def '):
yield 0, "E704 multiple statements on one line (def)"
else:
yield found, "E701 multiple statements on one line (colon)"
found = line.find(':', found + 1)
found = line.find(';')
while -1 < found:
if found < last_char:
yield found, "E702 multiple statements on one line (semicolon)"
else:
yield found, "E703 statement ends with a semicolon"
found = line.find(';', found + 1)
def explicit_line_join(logical_line, tokens):
r"""Avoid explicit line join between brackets.
The preferred way of wrapping long lines is by using Python's implied line
continuation inside parentheses, brackets and braces. Long lines can be
broken over multiple lines by wrapping expressions in parentheses. These
should be used in preference to using a backslash for line continuation.
E502: aaa = [123, \\n 123]
E502: aaa = ("bbb " \\n "ccc")
Okay: aaa = [123,\n 123]
Okay: aaa = ("bbb "\n "ccc")
Okay: aaa = "bbb " \\n "ccc"
Okay: aaa = 123 # \\
"""
prev_start = prev_end = parens = 0
comment = False
backslash = None
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
comment = True
if start[0] != prev_start and parens and backslash and not comment:
yield backslash, "E502 the backslash is redundant between brackets"
if end[0] != prev_end:
if line.rstrip('\r\n').endswith('\\'):
backslash = (end[0], len(line.splitlines()[-1]) - 1)
else:
backslash = None
prev_start = prev_end = end[0]
else:
prev_start = start[0]
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in ')]}':
parens -= 1
def break_around_binary_operator(logical_line, tokens):
r"""
Avoid breaks before binary operators.
The preferred place to break around a binary operator is after the
operator, not before it.
W503: (width == 0\n + height == 0)
W503: (width == 0\n and height == 0)
Okay: (width == 0 +\n height == 0)
Okay: foo(\n -x)
Okay: foo(x\n [])
Okay: x = '''\n''' + ''
Okay: foo(x,\n -y)
Okay: foo(x, # comment\n -y)
"""
def is_binary_operator(token_type, text):
# The % character is strictly speaking a binary operator, but the
# common usage seems to be to put it next to the format parameters,
# after a line break.
return ((token_type == tokenize.OP or text in ['and', 'or']) and
text not in "()[]{},:.;@=%")
line_break = False
unary_context = True
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
continue
if ('\n' in text or '\r' in text) and token_type != tokenize.STRING:
line_break = True
else:
if (is_binary_operator(token_type, text) and line_break and
not unary_context):
yield start, "W503 line break before binary operator"
unary_context = text in '([{,;'
line_break = False
def comparison_to_singleton(logical_line, noqa):
r"""Comparison to singletons should use "is" or "is not".
Comparisons to singletons like None should always be done
with "is" or "is not", never the equality operators.
Okay: if arg is not None:
E711: if arg != None:
E711: if None == arg:
E712: if arg == True:
E712: if False == arg:
Also, beware of writing if x when you really mean if x is not None --
e.g. when testing whether a variable or argument that defaults to None was
set to some other value. The other value might have a type (such as a
container) that could be false in a boolean context!
"""
match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line)
if match:
singleton = match.group(1) or match.group(3)
same = (match.group(2) == '==')
msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton)
if singleton in ('None',):
code = 'E711'
else:
code = 'E712'
nonzero = ((singleton == 'True' and same) or
(singleton == 'False' and not same))
msg += " or 'if %scond:'" % ('' if nonzero else 'not ')
yield match.start(2), ("%s comparison to %s should be %s" %
(code, singleton, msg))
def comparison_negative(logical_line):
r"""Negative comparison should be done using "not in" and "is not".
Okay: if x not in y:\n pass
Okay: assert (X in Y or X is Z)
Okay: if not (X in Y):\n pass
Okay: zz = x is not y
E713: Z = not X in Y
E713: if not X.B in Y:\n pass
E714: if not X is Y:\n pass
E714: Z = not X.B is Y
"""
match = COMPARE_NEGATIVE_REGEX.search(logical_line)
if match:
pos = match.start(1)
if match.group(2) == 'in':
yield pos, "E713 test for membership should be 'not in'"
else:
yield pos, "E714 test for object identity should be 'is not'"
def comparison_type(logical_line, noqa):
r"""Object type comparisons should always use isinstance().
Do not compare types directly.
Okay: if isinstance(obj, int):
E721: if type(obj) is type(1):
When checking if an object is a string, keep in mind that it might be a
unicode string too! In Python 2.3, str and unicode have a common base
class, basestring, so you can do:
Okay: if isinstance(obj, basestring):
Okay: if type(a1) is type(b1):
"""
match = COMPARE_TYPE_REGEX.search(logical_line)
if match and not noqa:
inst = match.group(1)
if inst and isidentifier(inst) and inst not in SINGLETONS:
return # Allow comparison for types which are not obvious
yield match.start(), "E721 do not compare types, use 'isinstance()'"
def python_3000_has_key(logical_line, noqa):
r"""The {}.has_key() method is removed in Python 3: use the 'in' operator.
Okay: if "alph" in d:\n print d["alph"]
W601: assert d.has_key('alph')
"""
pos = logical_line.find('.has_key(')
if pos > -1 and not noqa:
yield pos, "W601 .has_key() is deprecated, use 'in'"
def python_3000_raise_comma(logical_line):
r"""When raising an exception, use "raise ValueError('message')".
The older form is removed in Python 3.
Okay: raise DummyError("Message")
W602: raise DummyError, "Message"
"""
match = RAISE_COMMA_REGEX.match(logical_line)
if match and not RERAISE_COMMA_REGEX.match(logical_line):
yield match.end() - 1, "W602 deprecated form of raising exception"
def python_3000_not_equal(logical_line):
r"""New code should always use != instead of <>.
The older syntax is removed in Python 3.
Okay: if a != 'no':
W603: if a <> 'no':
"""
pos = logical_line.find('<>')
if pos > -1:
yield pos, "W603 '<>' is deprecated, use '!='"
def python_3000_backticks(logical_line):
r"""Backticks are removed in Python 3: use repr() instead.
Okay: val = repr(1 + 2)
W604: val = `1 + 2`
"""
pos = logical_line.find('`')
if pos > -1:
yield pos, "W604 backticks are deprecated, use 'repr()'"
##############################################################################
# Helper functions
##############################################################################
if '' == ''.encode():
# Python 2: implicit encoding.
def readlines(filename):
"""Read the source code."""
with open(filename, 'rU') as f:
return f.readlines()
isidentifier = re.compile(r'[a-zA-Z_]\w*$').match
stdin_get_value = sys.stdin.read
else:
# Python 3
def readlines(filename):
"""Read the source code."""
try:
with open(filename, 'rb') as f:
(coding, lines) = tokenize.detect_encoding(f.readline)
f = TextIOWrapper(f, coding, line_buffering=True)
return [l.decode(coding) for l in lines] + f.readlines()
except (LookupError, SyntaxError, UnicodeError):
# Fall back if file encoding is improperly declared
with open(filename, encoding='latin-1') as f:
return f.readlines()
isidentifier = str.isidentifier
def stdin_get_value():
return TextIOWrapper(sys.stdin.buffer, errors='ignore').read()
noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search
def expand_indent(line):
r"""Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
if '\t' not in line:
return len(line) - len(line.lstrip())
result = 0
for char in line:
if char == '\t':
result = result // 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result
def mute_string(text):
"""Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
# String modifiers (e.g. u or r)
start = text.index(text[-1]) + 1
end = len(text) - 1
# Triple quotes
if text[-3:] in ('"""', "'''"):
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:]
def parse_udiff(diff, patterns=None, parent='.'):
"""Return a dictionary of matching lines."""
# For each file of the diff, the entry key is the filename,
# and the value is a set of row numbers to consider.
rv = {}
path = nrows = None
for line in diff.splitlines():
if nrows:
if line[:1] != '-':
nrows -= 1
continue
if line[:3] == '@@ ':
hunk_match = HUNK_REGEX.match(line)
(row, nrows) = [int(g or '1') for g in hunk_match.groups()]
rv[path].update(range(row, row + nrows))
elif line[:3] == '+++':
path = line[4:].split('\t', 1)[0]
if path[:2] == 'b/':
path = path[2:]
rv[path] = set()
return dict([(os.path.join(parent, path), rows)
for (path, rows) in rv.items()
if rows and filename_match(path, patterns)])
def normalize_paths(value, parent=os.curdir):
"""Parse a comma-separated list of paths.
Return a list of absolute paths.
"""
if not value:
return []
if isinstance(value, list):
return value
paths = []
for path in value.split(','):
path = path.strip()
if '/' in path:
path = os.path.abspath(os.path.join(parent, path))
paths.append(path.rstrip('/'))
return paths
def filename_match(filename, patterns, default=True):
"""Check if patterns contains a pattern that matches filename.
If patterns is unspecified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
def _is_eol_token(token):
return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n'
if COMMENT_WITH_NL:
def _is_eol_token(token, _eol_token=_is_eol_token):
return _eol_token(token) or (token[0] == tokenize.COMMENT and
token[1] == token[4])
##############################################################################
# Framework to run all checks
##############################################################################
_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}}
def register_check(check, codes=None):
"""Register a new check object."""
def _add_check(check, kind, codes, args):
if check in _checks[kind]:
_checks[kind][check][0].extend(codes or [])
else:
_checks[kind][check] = (codes or [''], args)
if inspect.isfunction(check):
args = inspect.getargspec(check)[0]
if args and args[0] in ('physical_line', 'logical_line'):
if codes is None:
codes = ERRORCODE_REGEX.findall(check.__doc__ or '')
_add_check(check, args[0], codes, args)
elif inspect.isclass(check):
if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']:
_add_check(check, 'tree', codes, None)
def init_checks_registry():
"""Register all globally visible functions.
The first argument name is either 'physical_line' or 'logical_line'.
"""
mod = inspect.getmodule(register_check)
for (name, function) in inspect.getmembers(mod, inspect.isfunction):
register_check(function)
init_checks_registry()
class Checker(object):
"""Load a Python source file, tokenize it, check coding style."""
def __init__(self, filename=None, lines=None,
options=None, report=None, **kwargs):
if options is None:
options = StyleGuide(kwargs).options
else:
assert not kwargs
self._io_error = None
self._physical_checks = options.physical_checks
self._logical_checks = options.logical_checks
self._ast_checks = options.ast_checks
self.max_line_length = options.max_line_length
self.multiline = False # in a multiline string?
self.hang_closing = options.hang_closing
self.verbose = options.verbose
self.filename = filename
# Dictionary where a checker can store its custom state.
self._checker_states = {}
if filename is None:
self.filename = 'stdin'
self.lines = lines or []
elif filename == '-':
self.filename = 'stdin'
self.lines = stdin_get_value().splitlines(True)
elif lines is None:
try:
self.lines = readlines(filename)
except IOError:
(exc_type, exc) = sys.exc_info()[:2]
self._io_error = '%s: %s' % (exc_type.__name__, exc)
self.lines = []
else:
self.lines = lines
if self.lines:
ord0 = ord(self.lines[0][0])
if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
if ord0 == 0xfeff:
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == '\xef\xbb\xbf':
self.lines[0] = self.lines[0][3:]
self.report = report or options.report
self.report_error = self.report.error
def report_invalid_syntax(self):
"""Check if the syntax is valid."""
(exc_type, exc) = sys.exc_info()[:2]
if len(exc.args) > 1:
offset = exc.args[1]
if len(offset) > 2:
offset = offset[1:3]
else:
offset = (1, 0)
self.report_error(offset[0], offset[1] or 0,
'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
self.report_invalid_syntax)
def readline(self):
"""Get the next line from the input buffer."""
if self.line_number >= self.total_lines:
return ''
line = self.lines[self.line_number]
self.line_number += 1
if self.indent_char is None and line[:1] in WHITESPACE:
self.indent_char = line[0]
return line
def run_check(self, check, argument_names):
"""Run a check plugin."""
arguments = []
for name in argument_names:
arguments.append(getattr(self, name))
return check(*arguments)
def init_checker_state(self, name, argument_names):
""" Prepares a custom state for the specific checker plugin."""
if 'checker_state' in argument_names:
self.checker_state = self._checker_states.setdefault(name, {})
def check_physical(self, line):
"""Run all physical checks on a raw input line."""
self.physical_line = line
for name, check, argument_names in self._physical_checks:
self.init_checker_state(name, argument_names)
result = self.run_check(check, argument_names)
if result is not None:
(offset, text) = result
self.report_error(self.line_number, offset, text, check)
if text[:4] == 'E101':
self.indent_char = line[0]
def build_tokens_line(self):
"""Build a logical line from tokens."""
logical = []
comments = []
length = 0
prev_row = prev_col = mapping = None
for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
if not mapping:
mapping = [(0, start)]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if prev_row:
(start_row, start_col) = start
if prev_row != start_row: # different row
prev_text = self.lines[prev_row - 1][prev_col - 1]
if prev_text == ',' or (prev_text not in '{[(' and
text not in '}])'):
text = ' ' + text
elif prev_col != start_col: # different column
text = line[prev_col:start_col] + text
logical.append(text)
length += len(text)
mapping.append((length, end))
(prev_row, prev_col) = end
self.logical_line = ''.join(logical)
self.noqa = comments and noqa(''.join(comments))
return mapping
def check_logical(self):
"""Build a line from tokens and run all logical checks on it."""
self.report.increment_logical_line()
mapping = self.build_tokens_line()
if not mapping:
return
(start_row, start_col) = mapping[0][1]
start_line = self.lines[start_row - 1]
self.indent_level = expand_indent(start_line[:start_col])
if self.blank_before < self.blank_lines:
self.blank_before = self.blank_lines
if self.verbose >= 2:
print(self.logical_line[:80].rstrip())
for name, check, argument_names in self._logical_checks:
if self.verbose >= 4:
print(' ' + name)
self.init_checker_state(name, argument_names)
for offset, text in self.run_check(check, argument_names) or ():
if not isinstance(offset, tuple):
for token_offset, pos in mapping:
if offset <= token_offset:
break
offset = (pos[0], pos[1] + offset - token_offset)
self.report_error(offset[0], offset[1], text, check)
if self.logical_line:
self.previous_indent_level = self.indent_level
self.previous_logical = self.logical_line
self.blank_lines = 0
self.tokens = []
def check_ast(self):
"""Build the file's AST and run all AST checks."""
try:
tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
except (SyntaxError, TypeError):
return self.report_invalid_syntax()
for name, cls, __ in self._ast_checks:
checker = cls(tree, self.filename)
for lineno, offset, text, check in checker.run():
if not self.lines or not noqa(self.lines[lineno - 1]):
self.report_error(lineno, offset, text, check)
def generate_tokens(self):
"""Tokenize the file, run physical line checks and yield tokens."""
if self._io_error:
self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
tokengen = tokenize.generate_tokens(self.readline)
try:
for token in tokengen:
if token[2][0] > self.total_lines:
return
self.maybe_check_physical(token)
yield token
except (SyntaxError, tokenize.TokenError):
self.report_invalid_syntax()
def maybe_check_physical(self, token):
"""If appropriate (based on token), check current physical line(s)."""
# Called after every token, but act only on end of line.
if _is_eol_token(token):
# Obviously, a newline token ends a single physical line.
self.check_physical(token[4])
elif token[0] == tokenize.STRING and '\n' in token[1]:
# Less obviously, a string that contains newlines is a
# multiline string, either triple-quoted or with internal
# newlines backslash-escaped. Check every physical line in the
# string *except* for the last one: its newline is outside of
# the multiline string, so we consider it a regular physical
# line, and will check it like any other physical line.
#
# Subtleties:
# - we don't *completely* ignore the last line; if it contains
# the magical "# noqa" comment, we disable all physical
# checks for the entire multiline string
# - have to wind self.line_number back because initially it
# points to the last line of the string, and we want
# check_physical() to give accurate feedback
if noqa(token[4]):
return
self.multiline = True
self.line_number = token[2][0]
for line in token[1].split('\n')[:-1]:
self.check_physical(line + '\n')
self.line_number += 1
self.multiline = False
def check_all(self, expected=None, line_offset=0):
"""Run all checks on the input file."""
self.report.init_file(self.filename, self.lines, expected, line_offset)
self.total_lines = len(self.lines)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
self.indent_level = self.previous_indent_level = 0
self.previous_logical = ''
self.tokens = []
self.blank_lines = self.blank_before = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], text))
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type in NEWLINE:
if token_type == tokenize.NEWLINE:
self.check_logical()
self.blank_before = 0
elif len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
del self.tokens[0]
else:
self.check_logical()
elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
if len(self.tokens) == 1:
# The comment also ends a physical line
token = list(token)
token[1] = text.rstrip('\r\n')
token[3] = (token[2][0], token[2][1] + len(token[1]))
self.tokens = [tuple(token)]
self.check_logical()
if self.tokens:
self.check_physical(self.lines[-1])
self.check_logical()
return self.report.get_file_results()
class BaseReport(object):
"""Collect the results of the checks."""
print_filename = False
def __init__(self, options):
self._benchmark_keys = options.benchmark_keys
self._ignore_code = options.ignore_code
# Results
self.elapsed = 0
self.total_errors = 0
self.counters = dict.fromkeys(self._benchmark_keys, 0)
self.messages = {}
def start(self):
"""Start the timer."""
self._start_time = time.time()
def stop(self):
"""Stop the timer."""
self.elapsed = time.time() - self._start_time
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self.filename = filename
self.lines = lines
self.expected = expected or ()
self.line_offset = line_offset
self.file_errors = 0
self.counters['files'] += 1
self.counters['physical lines'] += len(lines)
def increment_logical_line(self):
"""Signal a new logical line."""
self.counters['logical lines'] += 1
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = text[:4]
if self._ignore_code(code):
return
if code in self.counters:
self.counters[code] += 1
else:
self.counters[code] = 1
self.messages[code] = text[5:]
# Don't care about expected errors or warnings
if code in self.expected:
return
if self.print_filename and not self.file_errors:
print(self.filename)
self.file_errors += 1
self.total_errors += 1
return code
def get_file_results(self):
"""Return the count of errors and warnings for this file."""
return self.file_errors
def get_count(self, prefix=''):
"""Return the total count of errors and warnings."""
return sum([self.counters[key]
for key in self.messages if key.startswith(prefix)])
def get_statistics(self, prefix=''):
"""Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports
"""
return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
for key in sorted(self.messages) if key.startswith(prefix)]
def print_statistics(self, prefix=''):
"""Print overall statistics (number of errors and warnings)."""
for line in self.get_statistics(prefix):
print(line)
def print_benchmark(self):
"""Print benchmark numbers."""
print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
if self.elapsed:
for key in self._benchmark_keys:
print('%-7d %s per second (%d total)' %
(self.counters[key] / self.elapsed, key,
self.counters[key]))
class FileReport(BaseReport):
"""Collect the results of the checks and print only the filenames."""
print_filename = True
class StandardReport(BaseReport):
"""Collect and print the results of the checks."""
def __init__(self, options):
super(StandardReport, self).__init__(options)
self._fmt = REPORT_FORMAT.get(options.format.lower(),
options.format)
self._repeat = options.repeat
self._show_source = options.show_source
self._show_pep8 = options.show_pep8
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self._deferred_print = []
return super(StandardReport, self).init_file(
filename, lines, expected, line_offset)
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = super(StandardReport, self).error(line_number, offset,
text, check)
if code and (self.counters[code] == 1 or self._repeat):
self._deferred_print.append(
(line_number, offset, code, text[5:], check.__doc__))
return code
def get_file_results(self):
"""Print the result and return the overall count for this file."""
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
print(re.sub(r'\S', ' ', line[:offset]) + '^')
if self._show_pep8 and doc:
print(' ' + doc.strip())
# stdout is block buffered when not stdout.isatty().
# line can be broken where buffer boundary since other processes
# write to same file.
# flush() after print() to avoid buffer boundary.
# Typical buffer size is 8192. line written safely when
# len(line) < 8192.
sys.stdout.flush()
return self.file_errors
class DiffReport(StandardReport):
"""Collect and print the results for the changed lines only."""
def __init__(self, options):
super(DiffReport, self).__init__(options)
self._selected = options.selected_lines
def error(self, line_number, offset, text, check):
if line_number not in self._selected[self.filename]:
return
return super(DiffReport, self).error(line_number, offset, text, check)
class StyleGuide(object):
"""Initialize a PEP-8 instance with few options."""
def __init__(self, *args, **kwargs):
# build options from the command line
self.checker_class = kwargs.pop('checker_class', Checker)
parse_argv = kwargs.pop('parse_argv', False)
config_file = kwargs.pop('config_file', False)
parser = kwargs.pop('parser', None)
# build options from dict
options_dict = dict(*args, **kwargs)
arglist = None if parse_argv else options_dict.get('paths', None)
options, self.paths = process_options(
arglist, parse_argv, config_file, parser)
if options_dict:
options.__dict__.update(options_dict)
if 'paths' in options_dict:
self.paths = options_dict['paths']
self.runner = self.input_file
self.options = options
if not options.reporter:
options.reporter = BaseReport if options.quiet else StandardReport
options.select = tuple(options.select or ())
if not (options.select or options.ignore or
options.testsuite or options.doctest) and DEFAULT_IGNORE:
# The default choice: ignore controversial checks
options.ignore = tuple(DEFAULT_IGNORE.split(','))
else:
# Ignore all checks which are not explicitly selected
options.ignore = ('',) if options.select else tuple(options.ignore)
options.benchmark_keys = BENCHMARK_KEYS[:]
options.ignore_code = self.ignore_code
options.physical_checks = self.get_checks('physical_line')
options.logical_checks = self.get_checks('logical_line')
options.ast_checks = self.get_checks('tree')
self.init_report()
def init_report(self, reporter=None):
"""Initialize the report instance."""
self.options.report = (reporter or self.options.reporter)(self.options)
return self.options.report
def check_files(self, paths=None):
"""Run all checks on the paths."""
if paths is None:
paths = self.paths
report = self.options.report
runner = self.runner
report.start()
try:
for path in paths:
if os.path.isdir(path):
self.input_dir(path)
elif not self.excluded(path):
runner(path)
except KeyboardInterrupt:
print('... stopped')
report.stop()
return report
def input_file(self, filename, lines=None, expected=None, line_offset=0):
"""Run all checks on a Python source file."""
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
return fchecker.check_all(expected=expected, line_offset=line_offset)
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((filename_match(filename, filepatterns) and
not self.excluded(filename, root))):
runner(os.path.join(root, filename))
def excluded(self, filename, parent=None):
"""Check if the file should be excluded.
Check if 'options.exclude' contains a pattern that matches filename.
"""
if not self.options.exclude:
return False
basename = os.path.basename(filename)
if filename_match(basename, self.options.exclude):
return True
if parent:
filename = os.path.join(parent, filename)
filename = os.path.abspath(filename)
return filename_match(filename, self.options.exclude)
def ignore_code(self, code):
"""Check if the error code should be ignored.
If 'options.select' contains a prefix of the error code,
return False. Else, if 'options.ignore' contains a prefix of
the error code, return True.
"""
if len(code) < 4 and any(s.startswith(code)
for s in self.options.select):
return False
return (code.startswith(self.options.ignore) and
not code.startswith(self.options.select))
def get_checks(self, argument_name):
"""Get all the checks for this category.
Find all globally visible functions where the first argument name
starts with argument_name and which contain selected tests.
"""
checks = []
for check, attrs in _checks[argument_name].items():
(codes, args) = attrs
if any(not (code and self.ignore_code(code)) for code in codes):
checks.append((check.__name__, check, args))
return sorted(checks)
def get_parser(prog='pep8', version=__version__):
parser = OptionParser(prog=prog, version=version,
usage="%prog [options] input ...")
parser.config_options = [
'exclude', 'filename', 'select', 'ignore', 'max-line-length',
'hang-closing', 'count', 'format', 'quiet', 'show-pep8',
'show-source', 'statistics', 'verbose']
parser.add_option('-v', '--verbose', default=0, action='count',
help="print status messages, or debug with -vv")
parser.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
parser.add_option('-r', '--repeat', default=True, action='store_true',
help="(obsolete) show all occurrences of the same error")
parser.add_option('--first', action='store_false', dest='repeat',
help="show first occurrence of each error")
parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %default)")
parser.add_option('--filename', metavar='patterns', default='*.py',
help="when parsing directories, only check filenames "
"matching these comma separated patterns "
"(default: %default)")
parser.add_option('--select', metavar='errors', default='',
help="select errors and warnings (e.g. E,W6)")
parser.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W) "
"(default: %s)" % DEFAULT_IGNORE)
parser.add_option('--show-source', action='store_true',
help="show source code for each error")
parser.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error "
"(implies --first)")
parser.add_option('--statistics', action='store_true',
help="count errors and warnings")
parser.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
parser.add_option('--max-line-length', type='int', metavar='n',
default=MAX_LINE_LENGTH,
help="set maximum allowed line length "
"(default: %default)")
parser.add_option('--hang-closing', action='store_true',
help="hang closing bracket instead of matching "
"indentation of opening bracket's line")
parser.add_option('--format', metavar='format', default='default',
help="set the error format [default|pylint|<custom>]")
parser.add_option('--diff', action='store_true',
help="report only lines changed according to the "
"unified diff received on STDIN")
group = parser.add_option_group("Testing Options")
if os.path.exists(TESTSUITE_PATH):
group.add_option('--testsuite', metavar='dir',
help="run regression tests from dir")
group.add_option('--doctest', action='store_true',
help="run doctest on myself")
group.add_option('--benchmark', action='store_true',
help="measure processing speed")
return parser
def read_config(options, args, arglist, parser):
"""Read and parse configurations
If a config file is specified on the command line with the "--config"
option, then only it is used for configuration.
Otherwise, the user configuration (~/.config/pep8) and any local
configurations in the current directory or above will be merged together
(in that order) using the read method of ConfigParser.
"""
config = RawConfigParser()
cli_conf = options.config
local_dir = os.curdir
if cli_conf and os.path.isfile(cli_conf):
if options.verbose:
print('cli configuration: %s' % cli_conf)
config.read(cli_conf)
else:
if USER_CONFIG and os.path.isfile(USER_CONFIG):
if options.verbose:
print('user configuration: %s' % USER_CONFIG)
config.read(USER_CONFIG)
parent = tail = args and os.path.abspath(os.path.commonprefix(args))
while tail:
if config.read(os.path.join(parent, fn) for fn in PROJECT_CONFIG):
local_dir = parent
if options.verbose:
print('local configuration: in %s' % parent)
break
(parent, tail) = os.path.split(parent)
pep8_section = parser.prog
if config.has_section(pep8_section):
option_list = dict([(o.dest, o.type or o.action)
for o in parser.option_list])
# First, read the default values
(new_options, __) = parser.parse_args([])
# Second, parse the configuration
for opt in config.options(pep8_section):
if opt.replace('_', '-') not in parser.config_options:
print(" unknown option '%s' ignored" % opt)
continue
if options.verbose > 1:
print(" %s = %s" % (opt, config.get(pep8_section, opt)))
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = config.getint(pep8_section, opt)
elif opt_type == 'string':
value = config.get(pep8_section, opt)
if normalized_opt == 'exclude':
value = normalize_paths(value, local_dir)
else:
assert opt_type in ('store_true', 'store_false')
value = config.getboolean(pep8_section, opt)
setattr(new_options, normalized_opt, value)
# Third, overwrite with the command-line options
(options, __) = parser.parse_args(arglist, values=new_options)
options.doctest = options.testsuite = False
return options
def process_options(arglist=None, parse_argv=False, config_file=None,
parser=None):
"""Process options passed either via arglist or via command line args.
Passing in the ``config_file`` parameter allows other tools, such as flake8
to specify their own options to be processed in pep8.
"""
if not parser:
parser = get_parser()
if not parser.has_option('--config'):
group = parser.add_option_group("Configuration", description=(
"The project options are read from the [%s] section of the "
"tox.ini file or the setup.cfg file located in any parent folder "
"of the path(s) being processed. Allowed options are: %s." %
(parser.prog, ', '.join(parser.config_options))))
group.add_option('--config', metavar='path', default=config_file,
help="user config file location")
# Don't read the command line if the module is used as a library.
if not arglist and not parse_argv:
arglist = []
# If parse_argv is True and arglist is None, arguments are
# parsed from the command line (sys.argv)
(options, args) = parser.parse_args(arglist)
options.reporter = None
if options.ensure_value('testsuite', False):
args.append(options.testsuite)
elif not options.ensure_value('doctest', False):
if parse_argv and not args:
if options.diff or any(os.path.exists(name)
for name in PROJECT_CONFIG):
args = ['.']
else:
parser.error('input not specified')
options = read_config(options, args, arglist, parser)
options.reporter = parse_argv and options.quiet == 1 and FileReport
options.filename = options.filename and options.filename.split(',')
options.exclude = normalize_paths(options.exclude)
options.select = options.select and options.select.split(',')
options.ignore = options.ignore and options.ignore.split(',')
if options.diff:
options.reporter = DiffReport
stdin = stdin_get_value()
options.selected_lines = parse_udiff(stdin, options.filename, args[0])
args = sorted(options.selected_lines)
return options, args
def _main():
"""Parse options and run checks on Python source."""
import signal
# Handle "Broken pipe" gracefully
try:
signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1))
except AttributeError:
pass # not supported on Windows
pep8style = StyleGuide(parse_argv=True)
options = pep8style.options
if options.doctest or options.testsuite:
from testsuite.support import run_tests
report = run_tests(pep8style)
else:
report = pep8style.check_files()
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if options.testsuite and not options.quiet:
report.print_results()
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
sys.exit(1)
if __name__ == '__main__':
_main()
| apache-2.0 |
mbauskar/tele-frappe | frappe/modules/import_file.py | 13 | 3318 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, json
from frappe.modules import get_module_path, scrub_dt_dn
from frappe.utils import get_datetime_str
def import_files(module, dt=None, dn=None, force=False):
if type(module) is list:
out = []
for m in module:
out.append(import_file(m[0], m[1], m[2], force=force))
return out
else:
return import_file(module, dt, dn, force=force)
def import_file(module, dt, dn, force=False):
"""Sync a file from txt if modifed, return false if not updated"""
path = get_file_path(module, dt, dn)
ret = import_file_by_path(path, force)
return ret
def get_file_path(module, dt, dn):
dt, dn = scrub_dt_dn(dt, dn)
path = os.path.join(get_module_path(module),
os.path.join(dt, dn, dn + ".json"))
return path
def import_file_by_path(path, force=False, data_import=False):
frappe.flags.in_import = True
try:
docs = read_doc_from_file(path)
except IOError:
print path + " missing"
return
if docs:
if not isinstance(docs, list):
docs = [docs]
for doc in docs:
if not force:
# check if timestamps match
db_modified = frappe.db.get_value(doc['doctype'], doc['name'], 'modified')
if db_modified and doc.get('modified')==get_datetime_str(db_modified):
return False
original_modified = doc.get("modified")
import_doc(doc, force=force, data_import=data_import)
if original_modified:
# since there is a new timestamp on the file, update timestamp in
if doc["doctype"] == doc["name"] and doc["name"]!="DocType":
frappe.db.sql("""update tabSingles set value=%s where field="modified" and doctype=%s""",
(original_modified, doc["name"]))
else:
frappe.db.sql("update `tab%s` set modified=%s where name=%s" % \
(doc['doctype'], '%s', '%s'),
(original_modified, doc['name']))
frappe.flags.in_import = False
return True
def read_doc_from_file(path):
doc = None
if os.path.exists(path):
with open(path, 'r') as f:
try:
doc = json.loads(f.read())
except ValueError:
print "bad json: {0}".format(path)
raise
else:
raise IOError, '%s missing' % path
return doc
ignore_values = {
"Report": ["disabled"],
"Print Format": ["disabled"]
}
ignore_doctypes = ["Page Role", "DocPerm"]
def import_doc(docdict, force=False, data_import=False):
frappe.flags.in_import = True
docdict["__islocal"] = 1
doc = frappe.get_doc(docdict)
ignore = []
if frappe.db.exists(doc.doctype, doc.name):
old_doc = frappe.get_doc(doc.doctype, doc.name)
if doc.doctype in ignore_values and not force:
# update ignore values
for key in ignore_values.get(doc.doctype) or []:
doc.set(key, old_doc.get(key))
# update ignored docs into new doc
for df in doc.meta.get_table_fields():
if df.options in ignore_doctypes and not force:
doc.set(df.fieldname, [])
ignore.append(df.options)
# delete old
frappe.delete_doc(doc.doctype, doc.name, force=1, ignore_doctypes=ignore, for_reload=True)
doc.flags.ignore_children_type = ignore
doc.flags.ignore_links = True
if not data_import:
doc.flags.ignore_validate = True
doc.flags.ignore_permissions = True
doc.flags.ignore_mandatory = True
doc.insert()
frappe.flags.in_import = False
| mit |
lino-framework/xl | lino_xl/lib/addresses/__init__.py | 1 | 1343 | # Copyright 2014-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""See :doc:`/specs/addresses`.
.. autosummary::
:toctree:
choicelists
Some unit test cases are
:mod:`lino.projects.min2.tests.test_addresses`.
"""
from lino.api import ad, _
class Plugin(ad.Plugin):
"The descriptor for this plugin. See :class:`lino.core.Plugin`."
verbose_name = _("Addresses")
partner_model = 'contacts.Partner'
needs_plugins = ['lino.modlib.checkdata']
def on_site_startup(self, site):
super(Plugin, self).on_site_startup(site)
if self.partner_model is None:
return
from lino_xl.lib.addresses.mixins import AddressOwner
self.partner_model = site.models.resolve(self.partner_model)
# TODO: raise an exception if the specified model does not
# implement AddressOwner. Currently it causes failures in book
# if not issubclass(self.partner_model, AddressOwner):
# raise Exception("partner_model is not an AddressOwner")
def setup_explorer_menu(self, site, user_type, m):
# mg = self.get_menu_group()
mg = site.plugins.contacts
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('addresses.AddressTypes')
m.add_action('addresses.Addresses')
| bsd-2-clause |
brianmay/spud | spud/serializers.py | 1 | 33434 | # spud - keep track of photos
# Copyright (C) 2008-2013 Brian May
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function, unicode_literals
import base64
import mimetypes
import os
import shutil
import pytz
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.db import transaction
from django.db.models import Max
from rest_framework import exceptions
from rest_framework import fields as f
from rest_framework import serializers
from rest_framework.utils import html
from . import media, models
class BinaryField(serializers.Field):
def to_internal_value(self, data):
return base64.decodebytes(data.encode('ASCII'))
def to_representation(self, value):
return base64.encodebytes(value)
class CharField(f.CharField):
default_empty_html = None
class ListSerializer(serializers.ListSerializer):
def set_request(self, request):
field = self.child
if isinstance(field, ModelSerializer):
field.set_request(request)
elif isinstance(field, ListSerializer):
field.set_request(request)
class ModelSerializer(serializers.ModelSerializer):
def set_request(self, request):
for key, field in self.fields.items():
if isinstance(field, ModelSerializer):
field.set_request(request)
elif isinstance(field, ListSerializer):
field.set_request(request)
class PhotoFileSerializer(ModelSerializer):
url = f.URLField(source="get_url")
class Meta:
model = models.photo_file
fields = ['id', 'url', 'size_key', 'width', 'height', 'mime_type', 'is_video', 'photo']
class PhotoFileListSerializer(ListSerializer):
child = PhotoFileSerializer()
def to_representation(self, value):
result = {}
for v in value:
if v.size_key not in result:
result[v.size_key] = []
result[v.size_key].append(self.child.to_representation(v))
return result
class PhotoTitleField(CharField):
def get_attribute(self, obj):
value = super(PhotoTitleField, self).get_attribute(obj)
if not value:
value = obj.name
return value
class NestedPhotoPlaceSerializer(ModelSerializer):
class Meta:
model = models.place
fields = [
'id', 'title',
]
list_serializer_class = ListSerializer
class NestedPhotoSerializer(ModelSerializer):
title = PhotoTitleField(required=False, allow_null=True)
place = NestedPhotoPlaceSerializer(read_only=True)
thumbs = PhotoFileListSerializer(
source="get_thumbs", read_only=True)
class Meta:
model = models.photo
fields = [
'id', 'title', 'description', 'datetime', 'utc_offset', 'place',
'action', 'thumbs',
]
list_serializer_class = ListSerializer
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = [
'id', 'username', 'first_name', 'last_name', 'email', 'groups'
]
list_serializer_class = ListSerializer
class GroupSerializer(ModelSerializer):
class Meta:
model = Group
fields = ['id', 'name']
list_serializer_class = ListSerializer
class NestedAlbumSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
class Meta:
model = models.album
fields = [
'id', 'title', 'cover_photo', 'cover_photo_pk',
]
list_serializer_class = ListSerializer
class AlbumSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
ascendants = NestedAlbumSerializer(
source="list_ascendants", many=True, read_only=True)
def set_request(self, request):
super(AlbumSerializer, self).set_request(request)
if not request.user.is_staff:
del self.fields['revised']
del self.fields['revised_utc_offset']
class Meta:
model = models.album
list_serializer_class = ListSerializer
fields = [
'id', 'cover_photo', 'cover_photo_pk', 'ascendants', 'title',
'description', 'sort_name', 'sort_order',
'revised', 'revised_utc_offset', 'parent',
]
class NestedCategorySerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
class Meta:
model = models.category
fields = [
'id', 'title', 'cover_photo', 'cover_photo_pk',
]
list_serializer_class = ListSerializer
class CategorySerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
ascendants = NestedCategorySerializer(
source="list_ascendants", many=True, read_only=True)
class Meta:
model = models.category
list_serializer_class = ListSerializer
fields = [
'id', 'cover_photo', 'cover_photo_pk', 'ascendants', 'title',
'description', 'sort_name', 'sort_order', 'parent', 'cover_photo'
]
class NestedPlaceSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
class Meta:
model = models.place
fields = [
'id', 'title', 'cover_photo', 'cover_photo_pk',
]
list_serializer_class = ListSerializer
class PlaceSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
ascendants = NestedPlaceSerializer(
source="list_ascendants", many=True, read_only=True)
def set_request(self, request):
super(PlaceSerializer, self).set_request(request)
if not request.user.is_staff:
del self.fields['address']
del self.fields['address2']
class Meta:
model = models.place
list_serializer_class = ListSerializer
fields = [
'id', 'cover_photo', 'cover_photo_pk', 'ascendants', 'title',
'address', 'address2', 'city', 'state', 'postcode', 'country',
'url', 'urldesc', 'notes', 'parent', 'cover_photo', 'description',
]
class PersonTitleField(CharField):
def get_attribute(self, obj):
return obj
def to_representation(self, value):
return "%s" % value
class NestedPersonSerializer(ModelSerializer):
title = PersonTitleField(read_only=True)
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
class Meta:
model = models.photo
fields = [
'id', 'title', 'cover_photo', 'cover_photo_pk',
]
list_serializer_class = ListSerializer
class PersonSerializer(ModelSerializer):
title = PersonTitleField(read_only=True)
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
required=False, allow_null=True,
style={'base_template': 'input.html'})
home = PlaceSerializer(read_only=True)
home_pk = serializers.PrimaryKeyRelatedField(
queryset=models.place.objects.all(), source="home",
required=False, allow_null=True)
work = PlaceSerializer(read_only=True)
work_pk = serializers.PrimaryKeyRelatedField(
queryset=models.place.objects.all(), source="work",
required=False, allow_null=True)
mother = NestedPersonSerializer(read_only=True)
mother_pk = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all(), source="mother",
required=False, allow_null=True)
father = NestedPersonSerializer(read_only=True)
father_pk = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all(), source="father",
required=False, allow_null=True)
spouse = NestedPersonSerializer(read_only=True)
spouse_pk = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all(), source="spouse",
required=False, allow_null=True)
spouses = NestedPersonSerializer(many=True, read_only=True)
grandparents = NestedPersonSerializer(many=True, read_only=True)
uncles_aunts = NestedPersonSerializer(many=True, read_only=True)
parents = NestedPersonSerializer(many=True, read_only=True)
siblings = NestedPersonSerializer(many=True, read_only=True)
cousins = NestedPersonSerializer(many=True, read_only=True)
children = NestedPersonSerializer(many=True, read_only=True)
nephews_nieces = NestedPersonSerializer(many=True, read_only=True)
grandchildren = NestedPersonSerializer(many=True, read_only=True)
ascendants = NestedPersonSerializer(
source="list_ascendants", many=True, read_only=True)
def set_request(self, request):
super(PersonSerializer, self).set_request(request)
if not request.user.is_staff:
del self.fields['sex']
del self.fields['dob']
del self.fields['dod']
del self.fields['home']
del self.fields['home_pk']
del self.fields['work']
del self.fields['work_pk']
del self.fields['father']
del self.fields['father_pk']
del self.fields['mother']
del self.fields['mother_pk']
del self.fields['spouse']
del self.fields['spouse_pk']
del self.fields['spouses']
del self.fields['grandparents']
del self.fields['uncles_aunts']
del self.fields['parents']
del self.fields['siblings']
del self.fields['cousins']
del self.fields['children']
del self.fields['nephews_nieces']
del self.fields['grandchildren']
del self.fields['notes']
del self.fields['email']
del self.fields['ascendants']
class Meta:
model = models.person
list_serializer_class = ListSerializer
fields = [
'id', 'title', 'description',
'cover_photo', 'cover_photo_pk',
'home', 'home_pk',
'work', 'work_pk',
'mother', 'mother_pk',
'father', 'father_pk',
'spouse', 'spouse_pk', 'spouses',
'grandparents',
'uncles_aunts',
'parents',
'siblings',
'cousins',
'children',
'nephews_nieces',
'grandchildren',
'ascendants',
'first_name', 'last_name', 'middle_name',
'called', 'sex', 'dob', 'dod', 'notes', 'email',
'cover_photo'
]
class PersonListSerializer(ListSerializer):
child = PersonSerializer()
def get_value(self, dictionary):
if html.is_html_input(dictionary):
return dictionary.getlist(self.field_name)
return dictionary.get(self.field_name, None)
def to_internal_value(self, data):
raise NotImplementedError()
def to_representation(self, value):
result = []
for pp in value.all():
result.append(self.child.to_representation(pp.person))
return result
class PersonPkListSerializer(ListSerializer):
child = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all())
def get_value(self, dictionary):
if html.is_html_input(dictionary):
return dictionary.getlist(self.field_name)
return dictionary.get(self.field_name, None)
def to_internal_value(self, data):
r = []
for index, pk in enumerate(data):
try:
pk = int(pk)
except ValueError:
raise exceptions.ValidationError(
"Person '%s' is not integer." % pk)
try:
models.person.objects.get(pk=pk)
except models.person.DoesNotExist:
raise exceptions.ValidationError(
"Person '%s' does not exist." % pk)
data_entry = {
'person_id': pk,
'position': index + 1,
}
r.append(data_entry)
return r
def to_representation(self, value):
result = []
for pp in value.all():
result.append(pp.person_id)
return result
class NestedFeedbackSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
allow_null=True,
style={'base_template': 'input.html'})
class Meta:
model = models.feedback
fields = [
'id', 'cover_photo', 'cover_photo_pk', 'rating', 'comment',
'user_name', 'user_email', 'user_url',
'submit_datetime', 'utc_offset',
'ip_address', 'is_public', 'is_removed',
'user',
]
list_serializer_class = ListSerializer
class FeedbackSerializer(ModelSerializer):
cover_photo = NestedPhotoSerializer(read_only=True)
cover_photo_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="cover_photo",
allow_null=True,
style={'base_template': 'input.html'})
ascendants = NestedFeedbackSerializer(
source="list_ascendants", many=True, read_only=True)
class Meta:
model = models.feedback
list_serializer_class = ListSerializer
extra_kwargs = {
'submit_datetime': {'read_only': True},
'utc_offset': {'read_only': True},
}
fields = [
'id', 'cover_photo', 'cover_photo_pk', 'ascendants',
'rating', 'comment', 'user_name', 'user_email', 'user_url',
'submit_datetime', 'utc_offset', 'ip_address', 'is_public',
'is_removed', 'cover_photo', 'parent', 'user'
]
class PhotoRelationSerializer(ModelSerializer):
class Meta:
model = models.photo_relation
list_serializer_class = ListSerializer
fields = [
'id',
'photo_1', 'photo_1_pk', 'desc_1',
'photo_2', 'photo_2_pk', 'desc_2',
]
photo_1 = NestedPhotoSerializer(read_only=True)
photo_1_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="photo_1",
allow_null=True,
style={'base_template': 'input.html'})
photo_2 = NestedPhotoSerializer(read_only=True)
photo_2_pk = serializers.PrimaryKeyRelatedField(
queryset=models.photo.objects.all(), source="photo_2",
allow_null=True,
style={'base_template': 'input.html'})
default_timezone = pytz.timezone(settings.TIME_ZONE)
class PhotoListSerializer(ListSerializer):
def to_representation(self, data):
# iterable = data.all() if isinstance(data, models.Manager) else data
iterable = data
results = []
for photo in iterable.all():
result = self.child.to_representation(photo)
if 'related_photo_pk' in self.context:
related_photo_pk = self.context['related_photo_pk']
try:
pr = photo.relations_2.get(photo_1__id=related_photo_pk)
result['relation'] = pr.desc_2
except models.photo_relation.DoesNotExist:
pass
try:
pr = photo.relations_1.get(photo_2__id=related_photo_pk)
result['relation'] = pr.desc_1
except models.photo_relation.DoesNotExist:
pass
results.append(result)
return results
class CreatePhotoSerializer(ModelSerializer):
orig_url = f.URLField(source="get_orig_url", read_only=True)
title = PhotoTitleField(required=False, allow_null=True)
albums_pk = serializers.PrimaryKeyRelatedField(
queryset=models.album.objects.all(), source="albums",
many=True, required=False,
style={'base_template': 'input.html'})
categorys_pk = serializers.PrimaryKeyRelatedField(
queryset=models.category.objects.all(), source="categorys",
many=True, required=False,
style={'base_template': 'input.html'})
place_pk = serializers.PrimaryKeyRelatedField(
queryset=models.place.objects.all(), source="place",
required=False, allow_null=True,
style={'base_template': 'input.html'})
persons_pk = PersonPkListSerializer(
source="photo_person_set", required=False, allow_null=True)
photographer_pk = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all(), source="photographer",
required=False, allow_null=True,
style={'base_template': 'input.html'})
sha256_hash = BinaryField(write_only=True)
def validate(self, attrs):
if 'photo' not in self.initial_data:
raise exceptions.ValidationError('Photo was not supplied.')
file_obj = self.initial_data['photo']
if settings.IMAGE_PATH is None:
raise exceptions.PermissionDenied(
'This site does not support uploads.')
# if file_obj.size > options["maxfilesize"]:
# raise exceptions.ValidationError('Maximum file size exceeded.')
try:
m = media.get_media(file_obj.name, file_obj)
except media.UnknownMediaType:
raise exceptions.ValidationError('File type not supported.')
width, height = m.get_size()
photo_dir = models.photo.build_photo_dir(attrs['datetime'], attrs['utc_offset'])
new_name = file_obj.name
sha256_hash = m.get_sha256_hash()
mime_type, _ = mimetypes.guess_type(new_name)
is_video = m.is_video()
size_key = "orig"
if attrs['sha256_hash'] != sha256_hash:
raise exceptions.ValidationError(
"File received with incorrect sha256 hash")
del attrs['sha256_hash']
dups = models.photo_file.get_conflicts(dir, new_name, size_key, sha256_hash)
if dups.count() > 0:
raise exceptions.ValidationError(
'File already exists in db at %s.'
% ",".join([str(d.id) for d in dups]))
new_dir = models.photo_file.build_dir(is_video, size_key, photo_dir)
models.photo_file.check_filename_free(new_dir, new_name)
pf = {
'size_key': size_key,
'width': width,
'height': height,
'mime_type': mime_type,
'dir': new_dir,
'name': new_name,
'is_video': is_video,
'sha256_hash': sha256_hash,
'num_bytes': file_obj.size,
}
attrs['photo_file_set'] = [pf]
attrs['name'] = new_name
return attrs
def create(self, validated_attrs):
if 'photo' not in self.initial_data:
raise exceptions.ValidationError('Photo file not supplied')
file_obj = self.initial_data['photo']
validated_attrs['action'] = 'R'
pf = validated_attrs['photo_file_set'][0]
dir = pf['dir']
name = pf['name']
dst = os.path.join(settings.IMAGE_PATH, dir, name)
# Go ahead and do stuff
print("importing to %s" % dst)
umask = os.umask(0o022)
try:
if not os.path.lexists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst), 0o755)
with open(dst, "wb") as dst_file_obj:
file_obj.seek(0)
shutil.copyfileobj(file_obj, dst_file_obj)
finally:
os.umask(umask)
try:
m = media.get_media(dst)
exif = m.get_normalized_exif()
assert 'datetime' not in exif
exif.update(validated_attrs)
validated_attrs = exif
with transaction.atomic():
m2m_attrs = self._pop_m2m_attrs(validated_attrs)
print(validated_attrs)
instance = models.photo.objects.create(**validated_attrs)
self._process_m2m(instance, m2m_attrs)
print("imported %s/%s as %d" % (dir, name, instance.pk))
return instance
except Exception:
print("deleting failed import %s" % dst)
os.remove(dst)
raise
def _pop_m2m_attrs(self, validated_attrs):
return {
'albums': validated_attrs.pop("albums", None),
'categorys': validated_attrs.pop("categorys", None),
'persons': validated_attrs.pop("photo_person_set", None),
'photo_file_set': validated_attrs.pop("photo_file_set", []),
}
def _process_m2m(self, instance, m2m_attrs):
albums = m2m_attrs["albums"]
categorys = m2m_attrs["categorys"]
persons = m2m_attrs["persons"]
photo_file_set = m2m_attrs["photo_file_set"]
print("albums", albums)
print("categorys", categorys)
print("persons", persons)
print("photo_file_set", photo_file_set)
if albums is not None:
for value in albums:
models.photo_album.objects.create(
photo=instance, album=value)
del value
if categorys is not None:
for value in categorys:
models.photo_category.objects.create(
photo=instance, category=value)
del value
if persons is not None:
for person in persons:
models.photo_person.objects.create(
photo=instance, **person)
del person
for pf in photo_file_set:
instance.photo_file_set.create(**pf)
return instance
class Meta:
model = models.photo
list_serializer_class = PhotoListSerializer
fields = [
'id', 'orig_url', 'sha256_hash', 'title',
'albums_pk', 'categorys_pk', 'persons_pk',
'place_pk', 'photographer_pk',
'title', 'view', 'rating',
'description', 'utc_offset', 'datetime', 'camera_make',
'camera_model', 'flash_used', 'focal_length', 'exposure',
'compression', 'aperture', 'level', 'iso_equiv', 'metering_mode',
'focus_dist', 'ccd_width', 'comment',
'photographer',
'relations'
]
class PhotoSerializer(ModelSerializer):
orig_url = f.URLField(source="get_orig_url", read_only=True)
title = PhotoTitleField(required=False, allow_null=True)
albums = AlbumSerializer(many=True, read_only=True)
albums_pk = serializers.PrimaryKeyRelatedField(
queryset=models.album.objects.all(), source="albums",
many=True, required=False,
style={'base_template': 'input.html'})
add_albums_pk = serializers.PrimaryKeyRelatedField(
queryset=models.album.objects.all(), write_only=True,
many=True, required=False,
style={'base_template': 'input.html'})
rem_albums_pk = serializers.PrimaryKeyRelatedField(
queryset=models.album.objects.all(), write_only=True,
many=True, required=False,
style={'base_template': 'input.html'})
categorys = CategorySerializer(many=True, read_only=True)
categorys_pk = serializers.PrimaryKeyRelatedField(
queryset=models.category.objects.all(), source="categorys",
many=True, required=False,
style={'base_template': 'input.html'})
add_categorys_pk = serializers.PrimaryKeyRelatedField(
queryset=models.category.objects.all(), write_only=True,
many=True, required=False,
style={'base_template': 'input.html'})
rem_categorys_pk = serializers.PrimaryKeyRelatedField(
queryset=models.category.objects.all(), write_only=True,
many=True, required=False,
style={'base_template': 'input.html'})
place = PlaceSerializer(read_only=True)
place_pk = serializers.PrimaryKeyRelatedField(
queryset=models.place.objects.all(), source="place",
required=False, allow_null=True,
style={'base_template': 'input.html'})
persons = PersonListSerializer(
child=NestedPersonSerializer(),
source="photo_person_set", read_only=True)
persons_pk = PersonPkListSerializer(
source="photo_person_set", required=False, allow_null=True)
add_persons_pk = PersonPkListSerializer(
required=False, write_only=True, allow_null=True)
rem_persons_pk = PersonPkListSerializer(
required=False, write_only=True, allow_null=True)
photographer = NestedPersonSerializer(read_only=True)
photographer_pk = serializers.PrimaryKeyRelatedField(
queryset=models.person.objects.all(), source="photographer",
required=False, allow_null=True,
style={'base_template': 'input.html'})
feedbacks = FeedbackSerializer(many=True, read_only=True)
thumbs = PhotoFileListSerializer(
source="get_thumbs", read_only=True)
videos = PhotoFileListSerializer(
source="get_videos", read_only=True)
def update(self, instance, validated_attrs):
m2m_attrs = self._pop_m2m_attrs(validated_attrs)
for attr, value in validated_attrs.items():
setattr(instance, attr, value)
instance.save()
self._process_m2m(instance, m2m_attrs)
# we need to get new object to ensure m2m attributes not cached
instance = models.photo.objects.get(pk=instance.pk)
return instance
def _pop_m2m_attrs(self, validated_attrs):
return {
'albums': validated_attrs.pop("albums", None),
'add_albums': validated_attrs.pop("add_albums_pk", None),
'rem_albums': validated_attrs.pop("rem_albums_pk", None),
'categorys': validated_attrs.pop("categorys", None),
'add_categorys': validated_attrs.pop("add_categorys_pk", None),
'rem_categorys': validated_attrs.pop("rem_categorys_pk", None),
'persons': validated_attrs.pop("photo_person_set", None),
'add_persons': validated_attrs.pop("add_persons_pk", None),
'rem_persons': validated_attrs.pop("rem_persons_pk", None),
}
def _process_m2m(self, instance, m2m_attrs):
albums = m2m_attrs["albums"]
add_albums = m2m_attrs["add_albums"]
rem_albums = m2m_attrs["rem_albums"]
categorys = m2m_attrs["categorys"]
add_categorys = m2m_attrs["add_categorys"]
rem_categorys = m2m_attrs["rem_categorys"]
persons = m2m_attrs["persons"]
add_persons = m2m_attrs["add_persons"]
rem_persons = m2m_attrs["rem_persons"]
print("albums", albums, add_albums, rem_albums)
print("categorys", categorys, add_categorys, rem_categorys)
print("persons", persons, add_persons, rem_persons)
if albums is not None:
pa_list = list(instance.photo_album_set.all())
for pa in pa_list:
if pa.album in albums:
albums.remove(pa.album)
else:
pa.delete()
del pa
for value in albums:
models.photo_album.objects.create(
photo=instance, album=value)
del value
del pa_list
if rem_albums is not None:
for album in rem_albums:
models.photo_album.objects.filter(
photo=instance, album=album).delete()
if add_albums is not None:
for album in add_albums:
models.photo_album.objects.get_or_create(
photo=instance, album=album)
if categorys is not None:
pc_list = list(instance.photo_category_set.all())
for pc in pc_list:
if pc.category in categorys:
categorys.remove(pc.category)
else:
pc.delete()
del pc
for value in categorys:
models.photo_category.objects.create(
photo=instance, category=value)
del value
del pc_list
if rem_categorys is not None:
for category in rem_categorys:
models.photo_category.objects.filter(
photo=instance, category=category).delete()
if add_categorys is not None:
for category in add_categorys:
models.photo_category.objects.get_or_create(
photo=instance, category=category)
if persons is not None:
pp_list = list(instance.photo_person_set.all())
for pp in pp_list:
found = None
for index, person in enumerate(persons):
if pp.position == person['position'] and \
pp.person_id == person['person_id']:
found = index
if found is not None:
del persons[found]
else:
pp.delete()
for person in persons:
models.photo_person.objects.create(
photo=instance, **person)
del person
del pp_list
if rem_persons is not None:
for person in rem_persons:
person_id = person['person_id']
models.photo_person.objects.filter(
photo=instance, person_id=person_id).delete()
if add_persons is not None:
for person in add_persons:
result = models.photo_person.objects\
.filter(photo=instance)\
.aggregate(Max('position'))
position_max = result['position__max'] or 0
person_id = person['person_id']
position = position_max + 1
models.photo_person.objects.get_or_create(
photo=instance, person_id=person_id,
defaults={'position': position})
return instance
def set_request(self, request):
super(PhotoSerializer, self).set_request(request)
if not request.user.is_staff:
del self.fields['orig_url']
class Meta:
model = models.photo
extra_kwargs = {
'name': {'read_only': True},
'timestamp': {'read_only': True},
'action': {'required': False},
}
list_serializer_class = PhotoListSerializer
fields = [
'id', 'orig_url', 'title',
'albums', 'albums_pk', 'add_albums_pk', 'rem_albums_pk',
'categorys',
'categorys_pk', 'add_categorys_pk', 'rem_categorys_pk',
'place', 'place_pk',
'persons', 'persons_pk',
'add_persons_pk', 'rem_persons_pk',
'photographer', 'photographer_pk',
'feedbacks', 'thumbs', 'videos',
'name', 'title', 'view', 'rating',
'description', 'utc_offset', 'datetime', 'camera_make',
'camera_model', 'flash_used', 'focal_length', 'exposure',
'compression', 'aperture', 'level', 'iso_equiv', 'metering_mode',
'focus_dist', 'ccd_width', 'comment', 'action', 'timestamp',
'photographer',
'relations'
]
| gpl-3.0 |
airbnb/airflow | tests/ti_deps/deps/test_task_concurrency.py | 7 | 2409 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import datetime
from unittest.mock import Mock
from airflow.models import DAG
from airflow.models.baseoperator import BaseOperator
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.deps.task_concurrency_dep import TaskConcurrencyDep
class TestTaskConcurrencyDep(unittest.TestCase):
def _get_task(self, **kwargs):
return BaseOperator(task_id='test_task', dag=DAG('test_dag'), **kwargs)
def test_not_task_concurrency(self):
task = self._get_task(start_date=datetime(2016, 1, 1))
dep_context = DepContext()
ti = Mock(task=task, execution_date=datetime(2016, 1, 1))
self.assertTrue(TaskConcurrencyDep().is_met(ti=ti, dep_context=dep_context))
def test_not_reached_concurrency(self):
task = self._get_task(start_date=datetime(2016, 1, 1), task_concurrency=1)
dep_context = DepContext()
ti = Mock(task=task, execution_date=datetime(2016, 1, 1))
ti.get_num_running_task_instances = lambda x: 0
self.assertTrue(TaskConcurrencyDep().is_met(ti=ti, dep_context=dep_context))
def test_reached_concurrency(self):
task = self._get_task(start_date=datetime(2016, 1, 1), task_concurrency=2)
dep_context = DepContext()
ti = Mock(task=task, execution_date=datetime(2016, 1, 1))
ti.get_num_running_task_instances = lambda x: 1
self.assertTrue(TaskConcurrencyDep().is_met(ti=ti, dep_context=dep_context))
ti.get_num_running_task_instances = lambda x: 2
self.assertFalse(TaskConcurrencyDep().is_met(ti=ti, dep_context=dep_context))
| apache-2.0 |
remh/dd-agent | checks.d/postgres.py | 12 | 25223 | """PostgreSQL check
Collects database-wide metrics and optionally per-relation metrics, custom metrics.
"""
# stdlib
import socket
# 3rd party
import pg8000 as pg
from pg8000 import InterfaceError, ProgrammingError
# project
from checks import AgentCheck, CheckException
from config import _is_affirmative
MAX_CUSTOM_RESULTS = 100
class ShouldRestartException(Exception):
pass
class PostgreSql(AgentCheck):
"""Collects per-database, and optionally per-relation metrics, custom metrics
"""
SOURCE_TYPE_NAME = 'postgresql'
RATE = AgentCheck.rate
GAUGE = AgentCheck.gauge
MONOTONIC = AgentCheck.monotonic_count
SERVICE_CHECK_NAME = 'postgres.can_connect'
# turning columns into tags
DB_METRICS = {
'descriptors': [
('datname', 'db')
],
'metrics': {},
'query': """
SELECT datname,
%s
FROM pg_stat_database
WHERE datname not ilike 'template%%'
AND datname not ilike 'postgres'
AND datname not ilike 'rdsadmin'
""",
'relation': False,
}
COMMON_METRICS = {
'numbackends' : ('postgresql.connections', GAUGE),
'xact_commit' : ('postgresql.commits', RATE),
'xact_rollback' : ('postgresql.rollbacks', RATE),
'blks_read' : ('postgresql.disk_read', RATE),
'blks_hit' : ('postgresql.buffer_hit', RATE),
'tup_returned' : ('postgresql.rows_returned', RATE),
'tup_fetched' : ('postgresql.rows_fetched', RATE),
'tup_inserted' : ('postgresql.rows_inserted', RATE),
'tup_updated' : ('postgresql.rows_updated', RATE),
'tup_deleted' : ('postgresql.rows_deleted', RATE),
'pg_database_size(datname) as pg_database_size' : ('postgresql.database_size', GAUGE),
}
NEWER_92_METRICS = {
'deadlocks' : ('postgresql.deadlocks', RATE),
'temp_bytes' : ('postgresql.temp_bytes', RATE),
'temp_files' : ('postgresql.temp_files', RATE),
}
BGW_METRICS = {
'descriptors': [],
'metrics': {},
'query': "select %s FROM pg_stat_bgwriter",
'relation': False,
}
COMMON_BGW_METRICS = {
'checkpoints_timed' : ('postgresql.bgwriter.checkpoints_timed', MONOTONIC),
'checkpoints_req' : ('postgresql.bgwriter.checkpoints_requested', MONOTONIC),
'buffers_checkpoint' : ('postgresql.bgwriter.buffers_checkpoint', MONOTONIC),
'buffers_clean' : ('postgresql.bgwriter.buffers_clean', MONOTONIC),
'maxwritten_clean' : ('postgresql.bgwriter.maxwritten_clean', MONOTONIC),
'buffers_backend' : ('postgresql.bgwriter.buffers_backend', MONOTONIC),
'buffers_alloc' : ('postgresql.bgwriter.buffers_alloc', MONOTONIC),
}
NEWER_91_BGW_METRICS = {
'buffers_backend_fsync': ('postgresql.bgwriter.buffers_backend_fsync', MONOTONIC),
}
NEWER_92_BGW_METRICS = {
'checkpoint_write_time': ('postgresql.bgwriter.write_time', MONOTONIC),
'checkpoint_sync_time' : ('postgresql.bgwriter.sync_time', MONOTONIC),
}
LOCK_METRICS = {
'descriptors': [
('mode', 'lock_mode'),
('relname', 'table'),
],
'metrics': {
'lock_count' : ('postgresql.locks', GAUGE),
},
'query': """
SELECT mode,
pc.relname,
count(*) AS %s
FROM pg_locks l
JOIN pg_class pc ON (l.relation = pc.oid)
WHERE l.mode IS NOT NULL
AND pc.relname NOT LIKE 'pg_%%'
GROUP BY pc.relname, mode""",
'relation': False,
}
REL_METRICS = {
'descriptors': [
('relname', 'table'),
('schemaname', 'schema'),
],
'metrics': {
'seq_scan' : ('postgresql.seq_scans', RATE),
'seq_tup_read' : ('postgresql.seq_rows_read', RATE),
'idx_scan' : ('postgresql.index_scans', RATE),
'idx_tup_fetch' : ('postgresql.index_rows_fetched', RATE),
'n_tup_ins' : ('postgresql.rows_inserted', RATE),
'n_tup_upd' : ('postgresql.rows_updated', RATE),
'n_tup_del' : ('postgresql.rows_deleted', RATE),
'n_tup_hot_upd' : ('postgresql.rows_hot_updated', RATE),
'n_live_tup' : ('postgresql.live_rows', GAUGE),
'n_dead_tup' : ('postgresql.dead_rows', GAUGE),
},
'query': """
SELECT relname,schemaname,%s
FROM pg_stat_user_tables
WHERE relname = ANY(%s)""",
'relation': True,
}
IDX_METRICS = {
'descriptors': [
('relname', 'table'),
('schemaname', 'schema'),
('indexrelname', 'index')
],
'metrics': {
'idx_scan' : ('postgresql.index_scans', RATE),
'idx_tup_read' : ('postgresql.index_rows_read', RATE),
'idx_tup_fetch' : ('postgresql.index_rows_fetched', RATE),
},
'query': """
SELECT relname,
schemaname,
indexrelname,
%s
FROM pg_stat_user_indexes
WHERE relname = ANY(%s)""",
'relation': True,
}
SIZE_METRICS = {
'descriptors': [
('relname', 'table'),
],
'metrics': {
'pg_table_size(C.oid) as table_size' : ('postgresql.table_size', GAUGE),
'pg_indexes_size(C.oid) as index_size' : ('postgresql.index_size', GAUGE),
'pg_total_relation_size(C.oid) as total_size' : ('postgresql.total_size', GAUGE),
},
'relation': True,
'query': """
SELECT
relname,
%s
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE nspname NOT IN ('pg_catalog', 'information_schema') AND
nspname !~ '^pg_toast' AND
relkind IN ('r') AND
relname = ANY(%s)"""
}
COUNT_METRICS = {
'descriptors': [
('schemaname', 'schema')
],
'metrics': {
'pg_stat_user_tables': ('postgresql.table.count', GAUGE),
},
'relation': False,
'query': """
SELECT schemaname, count(*)
FROM %s
GROUP BY schemaname
"""
}
REPLICATION_METRICS_9_1 = {
'CASE WHEN pg_last_xlog_receive_location() = pg_last_xlog_replay_location() THEN 0 ELSE GREATEST (0, EXTRACT (EPOCH FROM now() - pg_last_xact_replay_timestamp())) END': ('postgresql.replication_delay', GAUGE),
}
REPLICATION_METRICS_9_2 = {
'abs(pg_xlog_location_diff(pg_last_xlog_receive_location(), pg_last_xlog_replay_location())) AS replication_delay_bytes': ('postgres.replication_delay_bytes', GAUGE)
}
REPLICATION_METRICS = {
'descriptors': [],
'metrics': {},
'relation': False,
'query': """
SELECT %s
WHERE (SELECT pg_is_in_recovery())"""
}
CONNECTION_METRICS = {
'descriptors': [],
'metrics': {
'MAX(setting) AS max_connections': ('postgresql.max_connections', GAUGE),
'SUM(numbackends)/MAX(setting) AS pct_connections': ('postgresql.percent_usage_connections', GAUGE),
},
'relation': False,
'query': """
WITH max_con AS (SELECT setting::float FROM pg_settings WHERE name = 'max_connections')
SELECT %s
FROM pg_stat_database, max_con
"""
}
STATIO_METRICS = {
'descriptors': [
('relname', 'table'),
('schemaname', 'schema')
],
'metrics': {
'heap_blks_read' : ('postgresql.heap_blocks_read', RATE),
'heap_blks_hit' : ('postgresql.heap_blocks_hit', RATE),
'idx_blks_read' : ('postgresql.index_blocks_read', RATE),
'idx_blks_hit' : ('postgresql.index_blocks_hit', RATE),
'toast_blks_read' : ('postgresql.toast_blocks_read', RATE),
'toast_blks_hit' : ('postgresql.toast_blocks_hit', RATE),
'tidx_blks_read' : ('postgresql.toast_index_blocks_read', RATE),
'tidx_blks_hit' : ('postgresql.toast_index_blocks_hit', RATE),
},
'query': """
SELECT relname,
schemaname,
%s
FROM pg_statio_user_tables
WHERE relname = ANY(%s)""",
'relation': True,
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.dbs = {}
self.versions = {}
self.instance_metrics = {}
self.bgw_metrics = {}
self.db_instance_metrics = []
self.db_bgw_metrics = []
self.replication_metrics = {}
self.custom_metrics = {}
def _get_version(self, key, db):
if key not in self.versions:
cursor = db.cursor()
cursor.execute('SHOW SERVER_VERSION;')
result = cursor.fetchone()
try:
version = map(int, result[0].split('.'))
except Exception:
version = result[0]
self.versions[key] = version
self.service_metadata('version', self.versions[key])
return self.versions[key]
def _is_above(self, key, db, version_to_compare):
version = self._get_version(key, db)
if type(version) == list:
return version >= version_to_compare
return False
def _is_9_1_or_above(self, key, db):
return self._is_above(key, db, [9,1,0])
def _is_9_2_or_above(self, key, db):
return self._is_above(key, db, [9,2,0])
def _get_instance_metrics(self, key, db):
"""Use either COMMON_METRICS or COMMON_METRICS + NEWER_92_METRICS
depending on the postgres version.
Uses a dictionnary to save the result for each instance
"""
# Extended 9.2+ metrics if needed
metrics = self.instance_metrics.get(key)
if metrics is None:
# Hack to make sure that if we have multiple instances that connect to
# the same host, port, we don't collect metrics twice
# as it will result in https://github.com/DataDog/dd-agent/issues/1211
sub_key = key[:2]
if sub_key in self.db_instance_metrics:
self.instance_metrics[key] = None
self.log.debug("Not collecting instance metrics for key: {0} as"
" they are already collected by another instance".format(key))
return None
self.db_instance_metrics.append(sub_key)
if self._is_9_2_or_above(key, db):
self.instance_metrics[key] = dict(self.COMMON_METRICS, **self.NEWER_92_METRICS)
else:
self.instance_metrics[key] = dict(self.COMMON_METRICS)
metrics = self.instance_metrics.get(key)
return metrics
def _get_bgw_metrics(self, key, db):
"""Use either COMMON_BGW_METRICS or COMMON_BGW_METRICS + NEWER_92_BGW_METRICS
depending on the postgres version.
Uses a dictionnary to save the result for each instance
"""
# Extended 9.2+ metrics if needed
metrics = self.bgw_metrics.get(key)
if metrics is None:
# Hack to make sure that if we have multiple instances that connect to
# the same host, port, we don't collect metrics twice
# as it will result in https://github.com/DataDog/dd-agent/issues/1211
sub_key = key[:2]
if sub_key in self.db_bgw_metrics:
self.bgw_metrics[key] = None
self.log.debug("Not collecting bgw metrics for key: {0} as"
" they are already collected by another instance".format(key))
return None
self.db_bgw_metrics.append(sub_key)
self.bgw_metrics[key] = dict(self.COMMON_BGW_METRICS)
if self._is_9_1_or_above(key, db):
self.bgw_metrics[key].update(self.NEWER_91_BGW_METRICS)
if self._is_9_2_or_above(key, db):
self.bgw_metrics[key].update(self.NEWER_92_BGW_METRICS)
metrics = self.bgw_metrics.get(key)
return metrics
def _get_replication_metrics(self, key, db):
""" Use either REPLICATION_METRICS_9_1 or REPLICATION_METRICS_9_1 + REPLICATION_METRICS_9_2
depending on the postgres version.
Uses a dictionnary to save the result for each instance
"""
metrics = self.replication_metrics.get(key)
if self._is_9_1_or_above(key, db) and metrics is None:
self.replication_metrics[key] = dict(self.REPLICATION_METRICS_9_1)
if self._is_9_2_or_above(key, db):
self.replication_metrics[key].update(self.REPLICATION_METRICS_9_2)
metrics = self.replication_metrics.get(key)
return metrics
def _build_relations_config(self, yamlconfig):
"""Builds a dictionary from relations configuration while maintaining compatibility
"""
config = {}
for element in yamlconfig:
try:
if isinstance(element, str):
config[element] = {'relation_name': element, 'schemas': []}
elif isinstance(element, dict):
name = element['relation_name']
config[name] = {}
config[name]['schemas'] = element['schemas']
config[name]['relation_name'] = name
else:
self.log.warn('Unhandled relations config type: %s' % str(element))
except KeyError:
self.log.warn('Failed to parse config element=%s, check syntax' % str(element))
return config
def _collect_stats(self, key, db, instance_tags, relations, custom_metrics):
"""Query pg_stat_* for various metrics
If relations is not an empty list, gather per-relation metrics
on top of that.
If custom_metrics is not an empty list, gather custom metrics defined in postgres.yaml
"""
metric_scope = [
self.CONNECTION_METRICS,
self.LOCK_METRICS,
self.COUNT_METRICS,
]
# These are added only once per PG server, thus the test
db_instance_metrics = self._get_instance_metrics(key, db)
bgw_instance_metrics = self._get_bgw_metrics(key, db)
if db_instance_metrics is not None:
# FIXME: constants shouldn't be modified
self.DB_METRICS['metrics'] = db_instance_metrics
metric_scope.append(self.DB_METRICS)
if bgw_instance_metrics is not None:
# FIXME: constants shouldn't be modified
self.BGW_METRICS['metrics'] = bgw_instance_metrics
metric_scope.append(self.BGW_METRICS)
# Do we need relation-specific metrics?
if relations:
metric_scope += [
self.REL_METRICS,
self.IDX_METRICS,
self.SIZE_METRICS,
self.STATIO_METRICS
]
relations_config = self._build_relations_config(relations)
replication_metrics = self._get_replication_metrics(key, db)
if replication_metrics is not None:
# FIXME: constants shouldn't be modified
self.REPLICATION_METRICS['metrics'] = replication_metrics
metric_scope.append(self.REPLICATION_METRICS)
full_metric_scope = list(metric_scope) + custom_metrics
try:
cursor = db.cursor()
for scope in full_metric_scope:
if scope == self.REPLICATION_METRICS or not self._is_above(key, db, [9,0,0]):
log_func = self.log.debug
else:
log_func = self.log.warning
# build query
cols = scope['metrics'].keys() # list of metrics to query, in some order
# we must remember that order to parse results
try:
# if this is a relation-specific query, we need to list all relations last
if scope['relation'] and len(relations) > 0:
relnames = relations_config.keys()
query = scope['query'] % (", ".join(cols), "%s") # Keep the last %s intact
self.log.debug("Running query: %s with relations: %s" % (query, relnames))
cursor.execute(query, (relnames, ))
else:
query = scope['query'] % (", ".join(cols))
self.log.debug("Running query: %s" % query)
cursor.execute(query.replace(r'%', r'%%'))
results = cursor.fetchall()
except ProgrammingError, e:
log_func("Not all metrics may be available: %s" % str(e))
continue
if not results:
continue
if scope in custom_metrics and len(results) > MAX_CUSTOM_RESULTS:
self.warning(
"Query: {0} returned more than {1} results ({2}). Truncating"
.format(query, MAX_CUSTOM_RESULTS, len(results))
)
results = results[:MAX_CUSTOM_RESULTS]
# FIXME this cramps my style
if scope == self.DB_METRICS:
self.gauge("postgresql.db.count", len(results),
tags=[t for t in instance_tags if not t.startswith("db:")])
desc = scope['descriptors']
# parse & submit results
# A row should look like this
# (descriptor, descriptor, ..., value, value, value, value, ...)
# with descriptor a PG relation or index name, which we use to create the tags
for row in results:
# Check that all columns will be processed
assert len(row) == len(cols) + len(desc)
# build a map of descriptors and their values
desc_map = dict(zip([x[1] for x in desc], row[0:len(desc)]))
if 'schema' in desc_map:
try:
relname = desc_map['table']
config_schemas = relations_config[relname]['schemas']
if config_schemas and desc_map['schema'] not in config_schemas:
continue
except KeyError:
pass
# Build tags
# descriptors are: (pg_name, dd_tag_name): value
# Special-case the "db" tag, which overrides the one that is passed as instance_tag
# The reason is that pg_stat_database returns all databases regardless of the
# connection.
if not scope['relation']:
tags = [t for t in instance_tags if not t.startswith("db:")]
else:
tags = [t for t in instance_tags]
tags += [("%s:%s" % (k,v)) for (k,v) in desc_map.iteritems()]
# [(metric-map, value), (metric-map, value), ...]
# metric-map is: (dd_name, "rate"|"gauge")
# shift the results since the first columns will be the "descriptors"
values = zip([scope['metrics'][c] for c in cols], row[len(desc):])
# To submit simply call the function for each value v
# v[0] == (metric_name, submit_function)
# v[1] == the actual value
# tags are
[v[0][1](self, v[0][0], v[1], tags=tags) for v in values]
cursor.close()
except InterfaceError, e:
self.log.error("Connection error: %s" % str(e))
raise ShouldRestartException
except socket.error, e:
self.log.error("Connection error: %s" % str(e))
raise ShouldRestartException
def _get_service_check_tags(self, host, port, dbname):
service_check_tags = [
"host:%s" % host,
"port:%s" % port,
"db:%s" % dbname
]
return service_check_tags
def get_connection(self, key, host, port, user, password, dbname, ssl, use_cached=True):
"Get and memoize connections to instances"
if key in self.dbs and use_cached:
return self.dbs[key]
elif host != "" and user != "":
try:
if host == 'localhost' and password == '':
# Use ident method
connection = pg.connect("user=%s dbname=%s" % (user, dbname))
elif port != '':
connection = pg.connect(host=host, port=port, user=user,
password=password, database=dbname, ssl=ssl)
else:
connection = pg.connect(host=host, user=user, password=password,
database=dbname, ssl=ssl)
except Exception as e:
message = u'Error establishing postgres connection: %s' % (str(e))
service_check_tags = self._get_service_check_tags(host, port, dbname)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=message)
raise
else:
if not host:
raise CheckException("Please specify a Postgres host to connect to.")
elif not user:
raise CheckException("Please specify a user to connect to Postgres as.")
self.dbs[key] = connection
return connection
def _get_custom_metrics(self, custom_metrics, key):
# Pre-processed cached custom_metrics
if key in self.custom_metrics:
return self.custom_metrics[key]
# Otherwise pre-process custom metrics and verify definition
required_parameters = ("descriptors", "metrics", "query", "relation")
for m in custom_metrics:
for param in required_parameters:
if param not in m:
raise CheckException("Missing {0} parameter in custom metric".format(param))
self.log.debug("Metric: {0}".format(m))
for ref, (_, mtype) in m['metrics'].iteritems():
cap_mtype = mtype.upper()
if cap_mtype not in ('RATE', 'GAUGE', 'MONOTONIC'):
raise CheckException("Collector method {0} is not known."
" Known methods are RATE, GAUGE, MONOTONIC".format(cap_mtype))
m['metrics'][ref][1] = getattr(PostgreSql, cap_mtype)
self.log.debug("Method: %s" % (str(mtype)))
self.custom_metrics[key] = custom_metrics
return custom_metrics
def check(self, instance):
host = instance.get('host', '')
port = instance.get('port', '')
user = instance.get('username', '')
password = instance.get('password', '')
tags = instance.get('tags', [])
dbname = instance.get('dbname', None)
relations = instance.get('relations', [])
ssl = _is_affirmative(instance.get('ssl', False))
if relations and not dbname:
self.warning('"dbname" parameter must be set when using the "relations" parameter.')
if dbname is None:
dbname = 'postgres'
key = (host, port, dbname)
custom_metrics = self._get_custom_metrics(instance.get('custom_metrics', []), key)
# Clean up tags in case there was a None entry in the instance
# e.g. if the yaml contains tags: but no actual tags
if tags is None:
tags = []
else:
tags = list(set(tags))
# preset tags to the database name
tags.extend(["db:%s" % dbname])
self.log.debug("Custom metrics: %s" % custom_metrics)
# preset tags to the database name
db = None
# Collect metrics
try:
# Check version
db = self.get_connection(key, host, port, user, password, dbname, ssl)
version = self._get_version(key, db)
self.log.debug("Running check against version %s" % version)
self._collect_stats(key, db, tags, relations, custom_metrics)
except ShouldRestartException:
self.log.info("Resetting the connection")
db = self.get_connection(key, host, port, user, password, dbname, ssl, use_cached=False)
self._collect_stats(key, db, tags, relations, custom_metrics)
if db is not None:
service_check_tags = self._get_service_check_tags(host, port, dbname)
message = u'Established connection to postgres://%s:%s/%s' % (host, port, dbname)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags, message=message)
try:
# commit to close the current query transaction
db.commit()
except Exception, e:
self.log.warning("Unable to commit: {0}".format(e))
| bsd-3-clause |
davidcoallier/blaze | blaze/table.py | 2 | 15758 | """
The toplevel modules containing the core Blaze datastructures.
* Indexable
* NDArray
* NDTable
* Table
* Array
"""
import numpy as np
from operator import eq
from blaze.sources.descriptors.byteprovider import ByteProvider
from idx import Space, Subspace, Index
from datashape import DataShape, Fixed, dynamic, dshape as _dshape
from layouts.scalar import ChunkedL
from layouts.query import retrieve, write
from expr.graph import ArrayNode, injest_iterable
import metadata as md
from sources.chunked import CArraySource, CTableSource
from sources.canonical import ArraySource
from printer import generic_str, generic_repr
#------------------------------------------------------------------------
# Evaluation Class ( eclass )
#------------------------------------------------------------------------
MANIFEST = 1
DELAYED = 2
#------------------------------------------------------------------------
# Indexable
#------------------------------------------------------------------------
# TODO: Indexable seems to be historical design notes, none of it
# is used in live code
class Indexable(object):
"""
The top abstraction in the Blaze class hierarchy.
An index is a mapping from a domain specification to a collection of
byte or subtables. Indexable objects can be sliced/getitemed to
return some other object in the Blaze system.
"""
#------------------------------------------------------------------------
# Slice/stride/getitem interface
#
# Define explicit indexing operations, as distinguished from operator
# overloads, so that we can more easily guard, test, and validate
# indexing calls based on their semantic intent, and not merely based
# on the type of the operand. Such dispatch may be done in the overloaded
# operators, but that is a matter of syntactic sugar for end-user benefit.
#------------------------------------------------------------------------
def slice(self, slice_obj):
""" Extracts a subset of values from this object. If there is
no inner dimension, then this should return a scalar. Slicing
typically preserves the data parallelism of the slicee, and the
index-space transform is computable in constant time.
"""
raise NotImplementedError
def query(self, query_expr):
""" Queries this object and produces a view or an actual copy
of data (or a deferred eval object which can produce those). A
query is typically a value-dependent streaming operation and
produces an indeterminate number of return values.
"""
raise NotImplementedError
def take(self, indices, unique=None):
""" Returns a view or copy of the indicated data. **Indices**
can be another Indexable or a Python iterable. If **unique**
if True, then implies that no indices are duplicated; if False,
then implies that there are definitely duplicates. If None, then
no assumptions can be made about the indices.
take() differs from slice() in that indices may be duplicated.
"""
raise NotImplementedError
#------------------------------------------------------------------------
# Iteration protocol interface
#
# Defines the mechanisms by which other objects can determine the types
# of iteration supported by this object.
#------------------------------------------------------------------------
def returntype(self):
""" Returns the most efficient/general Data Descriptor this object can
return. Returns a value from the list the values defined in
DataDescriptor.desctype: "buflist", "buffer", "streamlist", or
"stream".
"""
raise NotImplementedError
def global_id(self):
"Get a unique global id for this source"
# TODO: make it global :)
return id(self)
#------------------------------------------------------------------------
# Immediate
#------------------------------------------------------------------------
class Array(Indexable):
"""
Manifest array, does not create a graph. Forces evaluation on every
call.
Parameters
----------
obj : A list of byte providers, other NDTables or a Python object.
Optional
--------
datashape : dshape
Manual datashape specification for the table, if None then
shape will be inferred if possible.
metadata :
Manual datashape specification for the table, if None then
shape will be inferred if possible.
Usage
-----
>>> Array([1,2,3])
>>> Array([1,2,3], dshape='3, int32')
>>> Array([1,2,3], dshape('3, int32'))
>>> Array([1,2,3], params=params(clevel=3, storage='file'))
"""
eclass = MANIFEST
_metaheader = [
md.manifest,
md.arraylike,
]
def __init__(self, obj, dshape=None, metadata=None, layout=None,
params=None):
# Datashape
# ---------
if isinstance(dshape, basestring):
dshape = _dshape(dshape)
if not dshape:
# The user just passed in a raw data source, try
# and infer how it should be layed out or fail
# back on dynamic types.
self._datashape = dshape = CArraySource.infer_datashape(obj)
else:
# The user overlayed their custom dshape on this
# data, check if it makes sense
CArraySource.check_datashape(obj, given_dshape=dshape)
self._datashape = dshape
# Values
# ------
# Mimic NumPy behavior in that we have a variety of
# possible arguments to the first argument which result
# in different behavior for the values.
if isinstance(obj, ByteProvider):
self.data = obj
else:
self.data = CArraySource(obj, params=params)
# children graph nodes
self.children = []
self.space = Space(self.data)
# Layout
# ------
if layout:
self._layout = layout
elif not layout:
self._layout = self.data.default_layout()
# Metadata
# --------
self._metadata = NDArray._metaheader + (metadata or [])
# Parameters
# ----------
self.params = params
#------------------------------------------------------------------------
# Properties
#------------------------------------------------------------------------
@property
def datashape(self):
"""
Type deconstructor
"""
return self._datashape
@property
def size(self):
"""
Size of the Array.
"""
# TODO: need to generalize, not every Array will look
# like Numpy
return sum(i.val for i in self._datashape.parameters[:-1])
@property
def backends(self):
"""
The storage backends that make up the space behind the
Array.
"""
return iter(self.space)
#------------------------------------------------------------------------
# Basic Slicing
#------------------------------------------------------------------------
# Immediete slicing
def __getitem__(self, indexer):
cc = self._layout.change_coordinates
return retrieve(cc, indexer)
# Immediete slicing ( Side-effectful )
def __setitem__(self, indexer, value):
cc = self._layout.change_coordinates
write(cc, indexer, value)
def __iter__(self):
raise NotImplementedError
def __eq__(self):
raise NotImplementedError
def __str__(self):
return generic_str(self, deferred=False)
def __repr__(self):
return generic_repr('Array', self, deferred=False)
class NDArray(Indexable, ArrayNode):
"""
Deferred array, operations on this array create a graph built
around an ArrayNode.
"""
eclass = DELAYED
_metaheader = [
md.manifest,
md.arraylike,
]
def __init__(self, obj, dshape=None, metadata=None, layout=None,
params=None):
# Datashape
# ---------
if isinstance(dshape, basestring):
dshape = _dshape(dshape)
if not dshape:
# The user just passed in a raw data source, try
# and infer how it should be layed out or fail
# back on dynamic types.
self._datashape = dshape = CArraySource.infer_datashape(obj)
else:
# The user overlayed their custom dshape on this
# data, check if it makes sense
CArraySource.check_datashape(obj, given_dshape=dshape)
self._datashape = dshape
# Values
# ------
# Mimic NumPy behavior in that we have a variety of
# possible arguments to the first argument which result
# in different behavior for the values.
if isinstance(obj, CArraySource):
self.data = obj
else:
self.data = CArraySource(obj, params)
# children graph nodes
self.children = []
self.space = Space(self.data)
# Layout
# ------
if layout:
self._layout = layout
elif not layout:
self._layout = ChunkedL(self.data, cdimension=0)
# Metadata
# --------
self._metadata = NDArray._metaheader + (metadata or [])
# Parameters
# ----------
self.params = params
def __str__(self):
return generic_str(self, deferred=True)
def __repr__(self):
return generic_repr('NDArray', self, deferred=True)
#------------------------------------------------------------------------
# Properties
#------------------------------------------------------------------------
@property
def name(self):
return repr(self)
@property
def datashape(self):
"""
Type deconstructor
"""
return self._datashape
@property
def size(self):
"""
Size of the NDArray.
"""
# TODO: need to generalize, not every Array will look
# like Numpy
return sum(i.val for i in self._datashape.parameters[:-1])
@property
def backends(self):
"""
The storage backends that make up the space behind the
Array.
"""
return iter(self.space)
#------------------------------------------------------------------------
# NDTable
#------------------------------------------------------------------------
# Here's how the multiple inheritance boils down. Going to remove
# the multiple inheritance at some point because it's not kind
# for other developers.
#
# Indexable
# =========
#
# index1d : function
# indexnd : function
# query : function
# returntype : function
# slice : function
# take : function
#
#
# ArrayNode
# =========
#
# children : attribute
# T : function
# dtype : function
# flags : function
# flat : function
# imag : function
# itemsize : function
# ndim : function
# real : function
# shape : function
# size : function
# strides : function
# tofile : function
# tolist : function
# tostring : function
# __len__ : function
# __getitem__ : function
class Table(Indexable):
eclass = MANIFEST
_metaheader = [
md.manifest,
md.tablelike,
]
def __init__(self, obj, dshape=None, metadata=None, layout=None,
params=None):
# Datashape
# ---------
if isinstance(dshape, basestring):
dshape = _dshape(dshape)
if not dshape:
# The user just passed in a raw data source, try
# and infer how it should be layed out or fail
# back on dynamic types.
self._datashape = dshape = CTableSource.infer_datashape(obj)
else:
# The user overlayed their custom dshape on this
# data, check if it makes sense
CTableSource.check_datashape(obj, given_dshape=dshape)
self._datashape = dshape
# Source
# ------
if isinstance(obj, ByteProvider):
self.data = obj
else:
self.data = CTableSource(obj, dshape=dshape, params=params)
# children graph nodes
self.children = []
self.space = Space(self.data)
# Layout
# ------
if layout:
self._layout = layout
elif not layout:
self._layout = self.data.default_layout()
# Metadata
# --------
self._metadata = NDTable._metaheader + (metadata or [])
# Parameters
# ----------
self.params = params
class NDTable(Indexable, ArrayNode):
"""
The base NDTable. Indexable contains the indexing logic for
how to access elements, while ArrayNode contains the graph
related logic for building expression trees with this table
as an element.
"""
eclass = DELAYED
_metaheader = [
md.deferred,
md.tablelike,
]
#------------------------------------------------------------------------
# Properties
#------------------------------------------------------------------------
def __init__(self, obj, dshape=None, metadata=None, layout=None,
params=None):
# Datashape
# ---------
if isinstance(dshape, basestring):
dshape = _dshape(dshape)
if not dshape:
# The user just passed in a raw data source, try
# and infer how it should be layed out or fail
# back on dynamic types.
self._datashape = dshape = CTableSource.infer_datashape(obj)
else:
# The user overlayed their custom dshape on this
# data, check if it makes sense
CTableSource.check_datashape(obj, given_dshape=dshape)
self._datashape = dshape
# Source
# ------
if isinstance(obj, ByteProvider):
self.data = obj
else:
self.data = CTableSource(obj, dshape=dshape, params=params)
# children graph nodes
self.children = []
self.space = Space(self.data)
# Layout
# ------
if layout:
self._layout = layout
elif not layout:
self._layout = self.data.default_layout()
# Metadata
# --------
self._metadata = NDTable._metaheader + (metadata or [])
# Parameters
# ----------
self.params = params
@property
def datashape(self):
"""
Type deconstructor
"""
return self._datashape
@property
def size(self):
"""
Size of the NDTable.
"""
# TODO: need to generalize, not every Array will look
# like Numpy
return sum(i.val for i in self._datashape.parameters[:-1])
@property
def backends(self):
"""
The storage backends that make up the space behind the Array.
"""
return iter(self.space)
def __repr__(self):
return generic_repr('NDTable', self, deferred=True)
def infer_eclass(a,b):
if (a,b) == (MANIFEST, MANIFEST):
return MANIFEST
if (a,b) == (MANIFEST, DELAYED):
return MANIFEST
if (a,b) == (DELAYED, MANIFEST):
return MANIFEST
if (a,b) == (DELAYED, DELAYED):
return DELAYED
| bsd-2-clause |
littlecodersh/itchatmp | itchatmp/controllers/mpapi/mp/utils.py | 1 | 3776 | import logging, io
from tornado import gen
from ..requests import requests
from itchatmp.utils import retry, encode_send_dict
from itchatmp.config import SERVER_URL, COROUTINE
from itchatmp.returnvalues import ReturnValue
logger = logging.getLogger('itchatmp')
def create_qrcode(sceneData, expire=2592000, accessToken=None):
''' create qrcode with specific data
* qrcode can be permanent, if so you need to set expire to False
* sceneData can be string or integer if it's permanent
* but it can only be integer if it's not
'''
data = {'action_info': {'scene': {}}}
try:
expire = int(expire)
except ValueError:
return ReturnValue({'errcode': -10003, 'errmsg': 'expire should be int'})
if not (isinstance(sceneData, int) or hasattr(sceneData, 'capitalize')):
return ReturnValue({'errcode': -10003, 'errmsg':
'sceneData should be int or string'})
if expire:
if not isinstance(sceneData, int):
return ReturnValue({'errcode': -10003, 'errmsg':
'sceneData for tmp qrcode can only be int'})
if not 0 < expire < 2592000:
expire = 2592000
data['expire_seconds'] = expire
data['action_name'] = 'QR_SCENE'
data['action_info']['scene']['scene_id'] = sceneData
else:
if isinstance(sceneData, int):
data['action_name'] = 'QR_LIMIT_SCENE'
data['action_info']['scene']['scene_id'] = sceneData
else:
data['action_name'] = 'QR_LIMIT_STR_SCENE'
data['action_info']['scene']['scene_str'] = sceneData
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/qrcode/create?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'ticket' in result:
result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
def download_qrcode(ticket):
if COROUTINE:
@gen.coroutine
def _download_qrcode(ticket):
params = {'ticket': ticket}
r = yield requests.get('https://mp.weixin.qq.com/cgi-bin/showqrcode',
params=params, stream=True)
if 'application/json' in r.headers['Content-Type']:
r = ReturnValue(r.json())
else:
tempStorage = io.BytesIO()
for block in r.iter_content(1024):
tempStorage.write(block)
r = ReturnValue({'File': tempStorage, 'errcode': 0})
raise gen.Return(r)
else:
def _download_qrcode(ticket):
params = {'ticket': ticket}
r = requests.get('https://mp.weixin.qq.com/cgi-bin/showqrcode',
params=params, stream=True)
if 'application/json' in r.headers['Content-Type']:
r = ReturnValue(r.json())
else:
tempStorage = io.BytesIO()
for block in r.iter_content(1024):
tempStorage.write(block)
r = ReturnValue({'File': tempStorage, 'errcode': 0})
return r
return _download_qrcode()
def long_url_to_short(url, accessToken=None):
data = {'action': 'long2short', 'long_url': url}
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/shorturl?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
| mit |
DeltaEpsilon-HackFMI2/FMICalendar-REST | venv/lib/python2.7/site-packages/django/conf/locale/ko/formats.py | 107 | 2105 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y년 n월 j일'
TIME_FORMAT = 'A g:i:s'
DATETIME_FORMAT = 'Y년 n월 j일 g:i:s A'
YEAR_MONTH_FORMAT = 'Y년 F월'
MONTH_DAY_FORMAT = 'F월 j일'
SHORT_DATE_FORMAT = 'Y-n-j.'
SHORT_DATETIME_FORMAT = 'Y-n-j H:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
'%Y년 %m월 %d일', # '2006년 10월 25일', with localized suffix.
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
'%H시 %M분 %S초', # '14시 30분 59초'
'%H시 %M분', # '14시 30분'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
'%Y년 %m월 %d일 %H시 %M분 %S초', # '2006년 10월 25일 14시 30분 59초'
'%Y년 %m월 %d일 %H시 %M분', # '2006년 10월 25일 14시 30분'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| mit |
the-blue-alliance/the-blue-alliance | helpers/validation_helper.py | 4 | 5159 | from consts.district_type import DistrictType
from models.district import District
from models.event import Event
from models.match import Match
from models.team import Team
import tba_config
class ValidationHelper(object):
"""
A collection of methods to validate model ids and return standard
error messages if they are invalid.
"""
@classmethod
def validate(cls, validators):
"""
Takes a list of tuples that defines a call to a validator
(ie team_id_validator) and it's corresponding value to validate.
Returns a dictionary of error messages if invalid.
Example: ValidationHelper.validate([('team_id_validator', 'frc101')])
"""
error_dict = { "Errors": list() }
valid = True
for v in validators:
results = getattr(ValidationHelper, v[0])(v[1])
if results:
error_dict["Errors"].append(results)
valid = False
if valid is False:
return error_dict
@classmethod
def validate_request(cls, handler):
kwargs = handler.request.route_kwargs
error_dict = {'Errors': []}
valid = True
team_future = None
event_future = None
match_future = None
district_future = None
# Check key formats
if 'team_key' in kwargs:
team_key = kwargs['team_key']
results = cls.team_id_validator(team_key)
if results:
error_dict['Errors'].append(results)
valid = False
else:
team_future = Team.get_by_id_async(team_key)
if 'event_key' in kwargs:
event_key = kwargs['event_key']
results = cls.event_id_validator(event_key)
if results:
error_dict['Errors'].append(results)
valid = False
else:
event_future = Event.get_by_id_async(event_key)
if 'match_key' in kwargs:
match_key = kwargs['match_key']
results = cls.match_id_validator(match_key)
if results:
error_dict['Errors'].append(results)
valid = False
else:
match_future = Match.get_by_id_async(match_key)
if 'district_key' in kwargs:
district_key = kwargs['district_key']
results = cls.district_id_validator(district_key)
if results:
error_dict['Errors'].append(results)
valid = False
else:
district_future = District.get_by_id_async(district_key)
if 'year' in kwargs:
year = int(kwargs['year'])
if year > tba_config.MAX_YEAR or year < tba_config.MIN_YEAR:
error_dict['Errors'].append({'year': 'Invalid year: {}. Must be between {} and {} inclusive.'.format(year, tba_config.MIN_YEAR, tba_config.MAX_YEAR)})
valid = False
# Check if keys exist
if team_future and team_future.get_result() is None:
error_dict['Errors'].append({'team_id': 'team id {} does not exist'.format(team_key)})
valid = False
if event_future and event_future.get_result() is None:
error_dict['Errors'].append({'event_id': 'event id {} does not exist'.format(event_key)})
valid = False
if match_future and match_future.get_result() is None:
error_dict['Errors'].append({'match_id': 'match id {} does not exist'.format(match_key)})
valid = False
if district_future and district_future.get_result() is None:
error_dict['Errors'].append({'district_id': 'district id {} does not exist'.format(district_key)})
valid = False
if not valid:
return error_dict
@classmethod
def is_valid_model_key(cls, key):
return (Team.validate_key_name(key) or
Event.validate_key_name(key) or
Match.validate_key_name(key) or
District.validate_key_name(key))
@classmethod
def team_id_validator(cls, value):
error_message = "{} is not a valid team id".format(value)
team_key_error = { "team_id": error_message}
if Team.validate_key_name(value) is False:
return team_key_error
@classmethod
def event_id_validator(cls, value):
error_message = "{} is not a valid event id".format(value)
event_key_error = { "event_id": error_message}
if Event.validate_key_name(value) is False:
return event_key_error
@classmethod
def match_id_validator(cls, value):
error_message = "{} is not a valid match id".format(value)
match_key_error = { "match_id": error_message}
if Match.validate_key_name(value) is False:
return match_key_error
@classmethod
def district_id_validator(cls, value):
error_message = "{} is not a valid district abbreviation".format(value)
district_key_error = {"district_abbrev": error_message}
if District.validate_key_name(value) is False:
return district_key_error
| mit |
coursemdetw/2014c2 | exts/w2/static/Brython2.0.0-20140209-164925/Lib/unittest/test/test_case.py | 738 | 51689 | import difflib
import pprint
import pickle
import re
import sys
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from .support import (
TestEquality, TestHashing, LoggingResult,
ResultWithNoStartTestRunStopTestRun
)
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertTrue(Foo('test').failureException is AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) < len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**8
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**7)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**9)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertCountEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertCountEqual(a, b)
# test utility functions supporting assertCountEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
# no fair testing ourself with ourself, and assertEqual is used for strings
# so can't use assertEqual either. Just use assertTrue.
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
# Success when the right warning is triggered, even several times
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
# A tuple of warning classes is accepted
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
# *args and **kwargs also work
self.assertWarns(RuntimeWarning,
warnings.warn, "foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
# Believe it or not, it is preferrable to duplicate all tests above,
# to make sure the __warningregistry__ $@ is circumvented correctly.
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
# A tuple of warning classes is accepted
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
# The context manager exposes various useful attributes
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foo")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Same with several warnings
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn("foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "foox")
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, "o+",
_runtime_warn, "foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
def testAssertWarnsRegexContext(self):
# Same as above, but with assertWarnsRegex as a context manager
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, "o+") as cm:
_runtime_warn("foox")
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foox")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, "o+"):
_runtime_warn("foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (
(self.failIfEqual, (3, 5)),
(self.assertNotEquals, (3, 5)),
(self.failUnlessEqual, (3, 3)),
(self.assertEquals, (3, 3)),
(self.failUnlessAlmostEqual, (2.0, 2.0)),
(self.assertAlmostEquals, (2.0, 2.0)),
(self.failIfAlmostEqual, (3.0, 5.0)),
(self.assertNotAlmostEquals, (3.0, 5.0)),
(self.failUnless, (True,)),
(self.assert_, (True,)),
(self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')),
(self.failIf, (False,)),
(self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))),
(self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])),
(self.assertRegexpMatches, ('bar', 'bar')),
)
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
# disable this test for now. When the version where the fail* methods will
# be removed is decided, re-enable it and update the version
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = [
'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual',
'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf',
'assertDictContainsSubset',
]
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name) # remove these in 3.x
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
| gpl-2.0 |
Passtechsoft/TPEAlpGen | blender/release/scripts/startup/bl_ui/space_view3d.py | 1 | 118093 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Header, Menu, Panel
from bl_ui.properties_grease_pencil_common import GreasePencilDataPanel
from bl_ui.properties_paint_common import UnifiedPaintPanel
from bpy.app.translations import contexts as i18n_contexts
class VIEW3D_HT_header(Header):
bl_space_type = 'VIEW_3D'
def draw(self, context):
layout = self.layout
view = context.space_data
# mode_string = context.mode
obj = context.active_object
toolsettings = context.tool_settings
row = layout.row(align=True)
row.template_header()
sub = row.row(align=True)
VIEW3D_MT_editor_menus.draw_collapsible(context, layout)
# Contains buttons like Mode, Pivot, Manipulator, Layer, Mesh Select Mode...
row = layout
layout.template_header_3D()
if obj:
mode = obj.mode
# Particle edit
if mode == 'PARTICLE_EDIT':
row.prop(toolsettings.particle_edit, "select_mode", text="", expand=True)
# Occlude geometry
if ((view.viewport_shade not in {'BOUNDBOX', 'WIREFRAME'} and (mode == 'PARTICLE_EDIT' or (mode == 'EDIT' and obj.type == 'MESH'))) or
(mode == 'WEIGHT_PAINT')):
row.prop(view, "use_occlude_geometry", text="")
# Proportional editing
if context.gpencil_data and context.gpencil_data.use_stroke_edit_mode:
row = layout.row(align=True)
row.prop(toolsettings, "proportional_edit", icon_only=True)
if toolsettings.proportional_edit != 'DISABLED':
row.prop(toolsettings, "proportional_edit_falloff", icon_only=True)
elif mode in {'EDIT', 'PARTICLE_EDIT'}:
row = layout.row(align=True)
row.prop(toolsettings, "proportional_edit", icon_only=True)
if toolsettings.proportional_edit != 'DISABLED':
row.prop(toolsettings, "proportional_edit_falloff", icon_only=True)
elif mode == 'OBJECT':
row = layout.row(align=True)
row.prop(toolsettings, "use_proportional_edit_objects", icon_only=True)
if toolsettings.use_proportional_edit_objects:
row.prop(toolsettings, "proportional_edit_falloff", icon_only=True)
else:
# Proportional editing
if context.gpencil_data and context.gpencil_data.use_stroke_edit_mode:
row = layout.row(align=True)
row.prop(toolsettings, "proportional_edit", icon_only=True)
if toolsettings.proportional_edit != 'DISABLED':
row.prop(toolsettings, "proportional_edit_falloff", icon_only=True)
# Snap
if not obj or mode not in {'SCULPT', 'VERTEX_PAINT', 'WEIGHT_PAINT', 'TEXTURE_PAINT'}:
snap_element = toolsettings.snap_element
row = layout.row(align=True)
row.prop(toolsettings, "use_snap", text="")
row.prop(toolsettings, "snap_element", icon_only=True)
if snap_element == 'INCREMENT':
row.prop(toolsettings, "use_snap_grid_absolute", text="")
else:
row.prop(toolsettings, "snap_target", text="")
if obj:
if mode in {'OBJECT', 'POSE'} and snap_element != 'VOLUME':
row.prop(toolsettings, "use_snap_align_rotation", text="")
elif mode == 'EDIT':
row.prop(toolsettings, "use_snap_self", text="")
if snap_element == 'VOLUME':
row.prop(toolsettings, "use_snap_peel_object", text="")
elif snap_element == 'FACE':
row.prop(toolsettings, "use_snap_project", text="")
# AutoMerge editing
if obj:
if (mode == 'EDIT' and obj.type == 'MESH'):
layout.prop(toolsettings, "use_mesh_automerge", text="", icon='AUTOMERGE_ON')
# OpenGL render
row = layout.row(align=True)
row.operator("render.opengl", text="", icon='RENDER_STILL')
row.operator("render.opengl", text="", icon='RENDER_ANIMATION').animation = True
# Pose
if obj and mode == 'POSE':
row = layout.row(align=True)
row.operator("pose.copy", text="", icon='COPYDOWN')
row.operator("pose.paste", text="", icon='PASTEDOWN').flipped = False
row.operator("pose.paste", text="", icon='PASTEFLIPDOWN').flipped = True
# GPencil
if context.gpencil_data and context.gpencil_data.use_stroke_edit_mode:
row = layout.row(align=True)
row.operator("gpencil.copy", text="", icon='COPYDOWN')
row.operator("gpencil.paste", text="", icon='PASTEDOWN')
layout.prop(context.gpencil_data, "use_onion_skinning", text="Onion Skins", icon='PARTICLE_PATH') # XXX: icon
class VIEW3D_MT_editor_menus(Menu):
bl_space_type = 'VIEW3D_MT_editor_menus'
bl_label = ""
def draw(self, context):
self.draw_menus(self.layout, context)
@staticmethod
def draw_menus(layout, context):
obj = context.active_object
mode_string = context.mode
edit_object = context.edit_object
gp_edit = context.gpencil_data and context.gpencil_data.use_stroke_edit_mode
layout.menu("VIEW3D_MT_view")
# Select Menu
if gp_edit:
layout.menu("VIEW3D_MT_select_gpencil")
elif mode_string in {'PAINT_WEIGHT', 'PAINT_VERTEX', 'PAINT_TEXTURE'}:
mesh = obj.data
if mesh.use_paint_mask:
layout.menu("VIEW3D_MT_select_paint_mask")
elif mesh.use_paint_mask_vertex and mode_string == 'PAINT_WEIGHT':
layout.menu("VIEW3D_MT_select_paint_mask_vertex")
elif mode_string != 'SCULPT':
layout.menu("VIEW3D_MT_select_%s" % mode_string.lower())
if gp_edit:
pass
elif mode_string == 'OBJECT':
layout.menu("INFO_MT_add", text="Add")
elif mode_string == 'EDIT_MESH':
layout.menu("INFO_MT_mesh_add", text="Add")
elif mode_string == 'EDIT_CURVE':
layout.menu("INFO_MT_curve_add", text="Add")
elif mode_string == 'EDIT_SURFACE':
layout.menu("INFO_MT_surface_add", text="Add")
elif mode_string == 'EDIT_METABALL':
layout.menu("INFO_MT_metaball_add", text="Add")
elif mode_string == 'EDIT_ARMATURE':
layout.menu("INFO_MT_edit_armature_add", text="Add")
if gp_edit:
layout.menu("VIEW3D_MT_edit_gpencil")
elif edit_object:
layout.menu("VIEW3D_MT_edit_%s" % edit_object.type.lower())
elif obj:
if mode_string != 'PAINT_TEXTURE':
layout.menu("VIEW3D_MT_%s" % mode_string.lower())
if mode_string in {'SCULPT', 'PAINT_VERTEX', 'PAINT_WEIGHT', 'PAINT_TEXTURE'}:
layout.menu("VIEW3D_MT_brush")
if mode_string == 'SCULPT':
layout.menu("VIEW3D_MT_hide_mask")
else:
layout.menu("VIEW3D_MT_object")
# ********** Menu **********
# ********** Utilities **********
class ShowHideMenu:
bl_label = "Show/Hide"
_operator_name = ""
def draw(self, context):
layout = self.layout
layout.operator("%s.reveal" % self._operator_name, text="Show Hidden")
layout.operator("%s.hide" % self._operator_name, text="Hide Selected").unselected = False
layout.operator("%s.hide" % self._operator_name, text="Hide Unselected").unselected = True
# Standard transforms which apply to all cases
# NOTE: this doesn't seem to be able to be used directly
class VIEW3D_MT_transform_base(Menu):
bl_label = "Transform"
# TODO: get rid of the custom text strings?
def draw(self, context):
layout = self.layout
layout.operator("transform.translate", text="Grab/Move")
# TODO: sub-menu for grab per axis
layout.operator("transform.rotate", text="Rotate")
# TODO: sub-menu for rot per axis
layout.operator("transform.resize", text="Scale")
# TODO: sub-menu for scale per axis
layout.separator()
layout.operator("transform.tosphere", text="To Sphere")
layout.operator("transform.shear", text="Shear")
layout.operator("transform.bend", text="Bend")
layout.operator("transform.push_pull", text="Push/Pull")
if context.mode != 'OBJECT':
layout.operator("transform.vertex_warp", text="Warp")
layout.operator("transform.vertex_random", text="Randomize")
# Generic transform menu - geometry types
class VIEW3D_MT_transform(VIEW3D_MT_transform_base):
def draw(self, context):
# base menu
VIEW3D_MT_transform_base.draw(self, context)
# generic...
layout = self.layout
layout.operator("transform.shrink_fatten", text="Shrink Fatten")
layout.separator()
layout.operator("transform.translate", text="Move Texture Space").texture_space = True
layout.operator("transform.resize", text="Scale Texture Space").texture_space = True
# Object-specific extensions to Transform menu
class VIEW3D_MT_transform_object(VIEW3D_MT_transform_base):
def draw(self, context):
layout = self.layout
# base menu
VIEW3D_MT_transform_base.draw(self, context)
# object-specific option follow...
layout.separator()
layout.operator("transform.translate", text="Move Texture Space").texture_space = True
layout.operator("transform.resize", text="Scale Texture Space").texture_space = True
layout.separator()
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("transform.transform", text="Align to Transform Orientation").mode = 'ALIGN' # XXX see alignmenu() in edit.c of b2.4x to get this working
layout.separator()
layout.operator_context = 'EXEC_AREA'
layout.operator("object.origin_set", text="Geometry to Origin").type = 'GEOMETRY_ORIGIN'
layout.operator("object.origin_set", text="Origin to Geometry").type = 'ORIGIN_GEOMETRY'
layout.operator("object.origin_set", text="Origin to 3D Cursor").type = 'ORIGIN_CURSOR'
layout.operator("object.origin_set", text="Origin to Center of Mass").type = 'ORIGIN_CENTER_OF_MASS'
layout.separator()
layout.operator("object.randomize_transform")
layout.operator("object.align")
layout.separator()
layout.operator("object.anim_transforms_to_deltas")
# Armature EditMode extensions to Transform menu
class VIEW3D_MT_transform_armature(VIEW3D_MT_transform_base):
def draw(self, context):
layout = self.layout
# base menu
VIEW3D_MT_transform_base.draw(self, context)
# armature specific extensions follow...
layout.separator()
obj = context.object
if obj.type == 'ARMATURE' and obj.mode in {'EDIT', 'POSE'}:
if obj.data.draw_type == 'BBONE':
layout.operator("transform.transform", text="Scale BBone").mode = 'BONE_SIZE'
elif obj.data.draw_type == 'ENVELOPE':
layout.operator("transform.transform", text="Scale Envelope Distance").mode = 'BONE_SIZE'
layout.operator("transform.transform", text="Scale Radius").mode = 'BONE_ENVELOPE'
if context.edit_object and context.edit_object.type == 'ARMATURE':
layout.operator("armature.align")
class VIEW3D_MT_mirror(Menu):
bl_label = "Mirror"
def draw(self, context):
layout = self.layout
layout.operator("transform.mirror", text="Interactive Mirror")
layout.separator()
layout.operator_context = 'INVOKE_REGION_WIN'
props = layout.operator("transform.mirror", text="X Global")
props.constraint_axis = (True, False, False)
props.constraint_orientation = 'GLOBAL'
props = layout.operator("transform.mirror", text="Y Global")
props.constraint_axis = (False, True, False)
props.constraint_orientation = 'GLOBAL'
props = layout.operator("transform.mirror", text="Z Global")
props.constraint_axis = (False, False, True)
props.constraint_orientation = 'GLOBAL'
if context.edit_object:
layout.separator()
props = layout.operator("transform.mirror", text="X Local")
props.constraint_axis = (True, False, False)
props.constraint_orientation = 'LOCAL'
props = layout.operator("transform.mirror", text="Y Local")
props.constraint_axis = (False, True, False)
props.constraint_orientation = 'LOCAL'
props = layout.operator("transform.mirror", text="Z Local")
props.constraint_axis = (False, False, True)
props.constraint_orientation = 'LOCAL'
layout.operator("object.vertex_group_mirror")
class VIEW3D_MT_snap(Menu):
bl_label = "Snap"
def draw(self, context):
layout = self.layout
layout.operator("view3d.snap_selected_to_grid", text="Selection to Grid")
layout.operator("view3d.snap_selected_to_cursor", text="Selection to Cursor").use_offset = False
layout.operator("view3d.snap_selected_to_cursor", text="Selection to Cursor (Offset)").use_offset = True
layout.separator()
layout.operator("view3d.snap_cursor_to_selected", text="Cursor to Selected")
layout.operator("view3d.snap_cursor_to_center", text="Cursor to Center")
layout.operator("view3d.snap_cursor_to_grid", text="Cursor to Grid")
layout.operator("view3d.snap_cursor_to_active", text="Cursor to Active")
class VIEW3D_MT_uv_map(Menu):
bl_label = "UV Mapping"
def draw(self, context):
layout = self.layout
layout.operator("uv.unwrap")
layout.operator_context = 'INVOKE_DEFAULT'
layout.operator("uv.smart_project")
layout.operator("uv.lightmap_pack")
layout.operator("uv.follow_active_quads")
layout.separator()
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("uv.cube_project")
layout.operator("uv.cylinder_project")
layout.operator("uv.sphere_project")
layout.separator()
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("uv.project_from_view").scale_to_bounds = False
layout.operator("uv.project_from_view", text="Project from View (Bounds)").scale_to_bounds = True
layout.separator()
layout.operator("uv.reset")
# ********** View menus **********
class VIEW3D_MT_view(Menu):
bl_label = "View"
def draw(self, context):
layout = self.layout
layout.operator("view3d.properties", icon='MENU_PANEL')
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.separator()
layout.operator("view3d.viewnumpad", text="Camera").type = 'CAMERA'
layout.operator("view3d.viewnumpad", text="Top").type = 'TOP'
layout.operator("view3d.viewnumpad", text="Bottom").type = 'BOTTOM'
layout.operator("view3d.viewnumpad", text="Front").type = 'FRONT'
layout.operator("view3d.viewnumpad", text="Back").type = 'BACK'
layout.operator("view3d.viewnumpad", text="Right").type = 'RIGHT'
layout.operator("view3d.viewnumpad", text="Left").type = 'LEFT'
layout.menu("VIEW3D_MT_view_cameras", text="Cameras")
layout.separator()
layout.operator("view3d.view_persportho")
layout.separator()
layout.menu("VIEW3D_MT_view_navigation")
layout.menu("VIEW3D_MT_view_align")
layout.separator()
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("view3d.clip_border", text="Clipping Border...")
layout.operator("view3d.zoom_border", text="Zoom Border...")
layout.operator("view3d.render_border", text="Render Border...").camera_only = False
layout.separator()
layout.operator("view3d.layers", text="Show All Layers").nr = 0
layout.separator()
layout.operator("view3d.localview", text="View Global/Local")
layout.operator("view3d.view_selected").use_all_regions = False
layout.operator("view3d.view_all").center = False
layout.separator()
layout.operator("screen.animation_play", text="Playback Animation")
layout.separator()
layout.operator("screen.area_dupli")
layout.operator("screen.region_quadview")
layout.operator("screen.screen_full_area", text="Toggle Maximize Area")
layout.operator("screen.screen_full_area").use_hide_panels = True
class VIEW3D_MT_view_navigation(Menu):
bl_label = "Navigation"
def draw(self, context):
from math import pi
layout = self.layout
layout.operator_enum("view3d.view_orbit", "type")
props = layout.operator("view3d.view_orbit", "Orbit Opposite")
props.type = 'ORBITRIGHT'
props.angle = pi
layout.separator()
layout.operator("view3d.view_roll", text="Roll Left").angle = pi / -12.0
layout.operator("view3d.view_roll", text="Roll Right").angle = pi / 12.0
layout.separator()
layout.operator_enum("view3d.view_pan", "type")
layout.separator()
layout.operator("view3d.zoom", text="Zoom In").delta = 1
layout.operator("view3d.zoom", text="Zoom Out").delta = -1
layout.operator("view3d.zoom_camera_1_to_1", text="Zoom Camera 1:1")
layout.separator()
layout.operator("view3d.fly")
layout.operator("view3d.walk")
class VIEW3D_MT_view_align(Menu):
bl_label = "Align View"
def draw(self, context):
layout = self.layout
layout.menu("VIEW3D_MT_view_align_selected")
layout.separator()
layout.operator("view3d.view_all", text="Center Cursor and View All").center = True
layout.operator("view3d.camera_to_view", text="Align Active Camera to View")
layout.operator("view3d.camera_to_view_selected", text="Align Active Camera to Selected")
layout.operator("view3d.view_selected")
layout.operator("view3d.view_center_cursor")
layout.separator()
layout.operator("view3d.view_lock_to_active")
layout.operator("view3d.view_lock_clear")
class VIEW3D_MT_view_align_selected(Menu):
bl_label = "Align View to Active"
def draw(self, context):
layout = self.layout
props = layout.operator("view3d.viewnumpad", text="Top")
props.align_active = True
props.type = 'TOP'
props = layout.operator("view3d.viewnumpad", text="Bottom")
props.align_active = True
props.type = 'BOTTOM'
props = layout.operator("view3d.viewnumpad", text="Front")
props.align_active = True
props.type = 'FRONT'
props = layout.operator("view3d.viewnumpad", text="Back")
props.align_active = True
props.type = 'BACK'
props = layout.operator("view3d.viewnumpad", text="Right")
props.align_active = True
props.type = 'RIGHT'
props = layout.operator("view3d.viewnumpad", text="Left")
props.align_active = True
props.type = 'LEFT'
class VIEW3D_MT_view_cameras(Menu):
bl_label = "Cameras"
def draw(self, context):
layout = self.layout
layout.operator("view3d.object_as_camera")
layout.operator("view3d.viewnumpad", text="Active Camera").type = 'CAMERA'
# ********** Select menus, suffix from context.mode **********
class VIEW3D_MT_select_object(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.separator()
layout.operator("object.select_all").action = 'TOGGLE'
layout.operator("object.select_all", text="Inverse").action = 'INVERT'
layout.operator("object.select_random", text="Random")
layout.operator("object.select_mirror", text="Mirror")
layout.operator("object.select_by_layer", text="Select All by Layer")
layout.operator_menu_enum("object.select_by_type", "type", text="Select All by Type...")
layout.operator("object.select_camera", text="Select Camera")
layout.separator()
layout.operator_menu_enum("object.select_grouped", "type", text="Grouped")
layout.operator_menu_enum("object.select_linked", "type", text="Linked")
layout.operator("object.select_pattern", text="Select Pattern...")
class VIEW3D_MT_select_pose(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.separator()
layout.operator("pose.select_all").action = 'TOGGLE'
layout.operator("pose.select_all", text="Inverse").action = 'INVERT'
layout.operator("pose.select_mirror", text="Flip Active")
layout.operator("pose.select_constraint_target", text="Constraint Target")
layout.operator("pose.select_linked", text="Linked")
layout.separator()
props = layout.operator("pose.select_hierarchy", text="Parent")
props.extend = False
props.direction = 'PARENT'
props = layout.operator("pose.select_hierarchy", text="Child")
props.extend = False
props.direction = 'CHILD'
layout.separator()
props = layout.operator("pose.select_hierarchy", text="Extend Parent")
props.extend = True
props.direction = 'PARENT'
props = layout.operator("pose.select_hierarchy", text="Extend Child")
props.extend = True
props.direction = 'CHILD'
layout.separator()
layout.operator_menu_enum("pose.select_grouped", "type", text="Grouped")
layout.operator("object.select_pattern", text="Select Pattern...")
class VIEW3D_MT_select_particle(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.separator()
layout.operator("particle.select_all").action = 'TOGGLE'
layout.operator("particle.select_linked")
layout.operator("particle.select_all", text="Inverse").action = 'INVERT'
layout.separator()
layout.operator("particle.select_more")
layout.operator("particle.select_less")
layout.separator()
layout.operator("particle.select_random")
layout.separator()
layout.operator("particle.select_roots", text="Roots")
layout.operator("particle.select_tips", text="Tips")
class VIEW3D_MT_edit_mesh_select_similar(Menu):
bl_label = "Select Similar"
def draw(self, context):
layout = self.layout
layout.operator_enum("mesh.select_similar", "type")
layout.separator()
layout.operator("mesh.select_similar_region", text="Face Regions")
class VIEW3D_MT_select_edit_mesh(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.separator()
# primitive
layout.operator("mesh.select_all").action = 'TOGGLE'
layout.operator("mesh.select_all", text="Inverse").action = 'INVERT'
layout.separator()
# numeric
layout.operator("mesh.select_random", text="Random")
layout.operator("mesh.select_nth")
layout.separator()
# geometric
layout.operator("mesh.edges_select_sharp", text="Sharp Edges")
layout.operator("mesh.faces_select_linked_flat", text="Linked Flat Faces")
layout.separator()
# topology
layout.operator("mesh.select_loose", text="Loose Geometry")
if context.scene.tool_settings.mesh_select_mode[2] is False:
layout.operator("mesh.select_non_manifold", text="Non Manifold")
layout.operator("mesh.select_interior_faces", text="Interior Faces")
layout.operator("mesh.select_face_by_sides")
layout.separator()
# other ...
layout.menu("VIEW3D_MT_edit_mesh_select_similar")
layout.operator("mesh.select_ungrouped", text="Ungrouped Verts")
layout.separator()
layout.operator("mesh.select_more", text="More")
layout.operator("mesh.select_less", text="Less")
layout.separator()
layout.operator("mesh.select_mirror", text="Mirror")
layout.operator("mesh.select_axis", text="Side of Active")
layout.operator("mesh.select_linked", text="Linked")
layout.operator("mesh.shortest_path_select", text="Shortest Path")
layout.operator("mesh.loop_multi_select", text="Edge Loops").ring = False
layout.operator("mesh.loop_multi_select", text="Edge Rings").ring = True
layout.separator()
layout.operator("mesh.loop_to_region")
layout.operator("mesh.region_to_loop")
class VIEW3D_MT_select_edit_curve(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.separator()
layout.operator("curve.select_all").action = 'TOGGLE'
layout.operator("curve.select_all", text="Inverse").action = 'INVERT'
layout.operator("curve.select_random")
layout.operator("curve.select_nth")
layout.operator("curve.select_linked", text="Select Linked")
layout.operator("curve.select_similar", text="Select Similar")
layout.separator()
layout.operator("curve.de_select_first")
layout.operator("curve.de_select_last")
layout.operator("curve.select_next")
layout.operator("curve.select_previous")
layout.separator()
layout.operator("curve.select_more")
layout.operator("curve.select_less")
class VIEW3D_MT_select_edit_surface(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.separator()
layout.operator("curve.select_all").action = 'TOGGLE'
layout.operator("curve.select_all", text="Inverse").action = 'INVERT'
layout.operator("curve.select_random")
layout.operator("curve.select_nth")
layout.operator("curve.select_linked", text="Select Linked")
layout.operator("curve.select_similar", text="Select Similar")
layout.separator()
layout.operator("curve.select_row")
layout.separator()
layout.operator("curve.select_more")
layout.operator("curve.select_less")
class VIEW3D_MT_select_edit_text(Menu):
# intentional name mis-match
# select menu for 3d-text doesn't make sense
bl_label = "Edit"
def draw(self, context):
layout = self.layout
layout.operator("font.text_copy", text="Copy")
layout.operator("font.text_cut", text="Cut")
layout.operator("font.text_paste", text="Paste")
layout.separator()
layout.operator("font.text_paste_from_file")
layout.operator("font.text_paste_from_clipboard")
layout.separator()
layout.operator("font.select_all")
class VIEW3D_MT_select_edit_metaball(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.separator()
layout.operator("mball.select_all").action = 'TOGGLE'
layout.operator("mball.select_all", text="Inverse").action = 'INVERT'
layout.separator()
layout.operator("mball.select_random_metaelems")
layout.separator()
layout.operator_menu_enum("mball.select_similar", "type", text="Similar")
class VIEW3D_MT_select_edit_lattice(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.separator()
layout.operator("lattice.select_mirror")
layout.operator("lattice.select_random")
layout.operator("lattice.select_all").action = 'TOGGLE'
layout.operator("lattice.select_all", text="Inverse").action = 'INVERT'
layout.separator()
layout.operator("lattice.select_ungrouped", text="Ungrouped Verts")
class VIEW3D_MT_select_edit_armature(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.separator()
layout.operator("armature.select_all").action = 'TOGGLE'
layout.operator("armature.select_all", text="Inverse").action = 'INVERT'
layout.operator("armature.select_mirror", text="Mirror").extend = False
layout.separator()
layout.operator("armature.select_more", text="More")
layout.operator("armature.select_less", text="Less")
layout.separator()
props = layout.operator("armature.select_hierarchy", text="Parent")
props.extend = False
props.direction = 'PARENT'
props = layout.operator("armature.select_hierarchy", text="Child")
props.extend = False
props.direction = 'CHILD'
layout.separator()
props = layout.operator("armature.select_hierarchy", text="Extend Parent")
props.extend = True
props.direction = 'PARENT'
props = layout.operator("armature.select_hierarchy", text="Extend Child")
props.extend = True
props.direction = 'CHILD'
layout.operator_menu_enum("armature.select_similar", "type", text="Similar")
layout.operator("object.select_pattern", text="Select Pattern...")
class VIEW3D_MT_select_gpencil(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("gpencil.select_border")
layout.operator("gpencil.select_circle")
layout.separator()
layout.operator("gpencil.select_all", text="(De)select All").action = 'TOGGLE'
layout.operator("gpencil.select_all", text="Inverse").action = 'INVERT'
layout.operator("gpencil.select_linked", text="Linked")
layout.separator()
layout.operator("gpencil.select_more")
layout.operator("gpencil.select_less")
class VIEW3D_MT_select_paint_mask(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.separator()
layout.operator("paint.face_select_all").action = 'TOGGLE'
layout.operator("paint.face_select_all", text="Inverse").action = 'INVERT'
layout.separator()
layout.operator("paint.face_select_linked", text="Linked")
class VIEW3D_MT_select_paint_mask_vertex(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.separator()
layout.operator("paint.vert_select_all").action = 'TOGGLE'
layout.operator("paint.vert_select_all", text="Inverse").action = 'INVERT'
layout.separator()
layout.operator("paint.vert_select_ungrouped", text="Ungrouped Verts")
class VIEW3D_MT_angle_control(Menu):
bl_label = "Angle Control"
@classmethod
def poll(cls, context):
settings = UnifiedPaintPanel.paint_settings(context)
if not settings:
return False
brush = settings.brush
tex_slot = brush.texture_slot
return tex_slot.has_texture_angle and tex_slot.has_texture_angle_source
def draw(self, context):
layout = self.layout
settings = UnifiedPaintPanel.paint_settings(context)
brush = settings.brush
sculpt = (context.sculpt_object is not None)
tex_slot = brush.texture_slot
layout.prop(tex_slot, "use_rake", text="Rake")
if brush.brush_capabilities.has_random_texture_angle and tex_slot.has_random_texture_angle:
if sculpt:
if brush.sculpt_capabilities.has_random_texture_angle:
layout.prop(tex_slot, "use_random", text="Random")
else:
layout.prop(tex_slot, "use_random", text="Random")
# ********** Add menu **********
# XXX: INFO_MT_ names used to keep backwards compatibility (Addons etc that hook into the menu)
class INFO_MT_mesh_add(Menu):
bl_idname = "INFO_MT_mesh_add"
bl_label = "Mesh"
def draw(self, context):
from .space_view3d_toolbar import VIEW3D_PT_tools_add_object
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
VIEW3D_PT_tools_add_object.draw_add_mesh(layout)
class INFO_MT_curve_add(Menu):
bl_idname = "INFO_MT_curve_add"
bl_label = "Curve"
def draw(self, context):
from .space_view3d_toolbar import VIEW3D_PT_tools_add_object
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
VIEW3D_PT_tools_add_object.draw_add_curve(layout)
class INFO_MT_surface_add(Menu):
bl_idname = "INFO_MT_surface_add"
bl_label = "Surface"
def draw(self, context):
from .space_view3d_toolbar import VIEW3D_PT_tools_add_object
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
VIEW3D_PT_tools_add_object.draw_add_surface(layout)
class INFO_MT_metaball_add(Menu):
bl_idname = "INFO_MT_metaball_add"
bl_label = "Metaball"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator_enum("object.metaball_add", "type")
class INFO_MT_edit_curve_add(Menu):
bl_idname = "INFO_MT_edit_curve_add"
bl_label = "Add"
def draw(self, context):
is_surf = context.active_object.type == 'SURFACE'
layout = self.layout
layout.operator_context = 'EXEC_REGION_WIN'
if is_surf:
INFO_MT_surface_add.draw(self, context)
else:
INFO_MT_curve_add.draw(self, context)
class INFO_MT_edit_armature_add(Menu):
bl_idname = "INFO_MT_edit_armature_add"
bl_label = "Armature"
def draw(self, context):
layout = self.layout
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("armature.bone_primitive_add", text="Single Bone", icon='BONE_DATA')
class INFO_MT_armature_add(Menu):
bl_idname = "INFO_MT_armature_add"
bl_label = "Armature"
def draw(self, context):
layout = self.layout
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("object.armature_add", text="Single Bone", icon='BONE_DATA')
class INFO_MT_lamp_add(Menu):
bl_idname = "INFO_MT_lamp_add"
bl_label = "Lamp"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator_enum("object.lamp_add", "type")
class INFO_MT_add(Menu):
bl_label = "Add"
def draw(self, context):
layout = self.layout
# note, don't use 'EXEC_SCREEN' or operators wont get the 'v3d' context.
# Note: was EXEC_AREA, but this context does not have the 'rv3d', which prevents
# "align_view" to work on first call (see [#32719]).
layout.operator_context = 'EXEC_REGION_WIN'
#layout.operator_menu_enum("object.mesh_add", "type", text="Mesh", icon='OUTLINER_OB_MESH')
layout.menu("INFO_MT_mesh_add", icon='OUTLINER_OB_MESH')
#layout.operator_menu_enum("object.curve_add", "type", text="Curve", icon='OUTLINER_OB_CURVE')
layout.menu("INFO_MT_curve_add", icon='OUTLINER_OB_CURVE')
#layout.operator_menu_enum("object.surface_add", "type", text="Surface", icon='OUTLINER_OB_SURFACE')
layout.menu("INFO_MT_surface_add", icon='OUTLINER_OB_SURFACE')
layout.menu("INFO_MT_metaball_add", text="Metaball", icon='OUTLINER_OB_META')
layout.operator("object.text_add", text="Text", icon='OUTLINER_OB_FONT')
layout.separator()
layout.menu("INFO_MT_armature_add", icon='OUTLINER_OB_ARMATURE')
layout.operator("object.add", text="Lattice", icon='OUTLINER_OB_LATTICE').type = 'LATTICE'
layout.operator_menu_enum("object.empty_add", "type", text="Empty", icon='OUTLINER_OB_EMPTY')
layout.separator()
layout.operator("object.speaker_add", text="Speaker", icon='OUTLINER_OB_SPEAKER')
layout.separator()
layout.operator("object.camera_add", text="Camera", icon='OUTLINER_OB_CAMERA')
layout.menu("INFO_MT_lamp_add", icon='OUTLINER_OB_LAMP')
layout.separator()
layout.operator_menu_enum("object.effector_add", "type", text="Force Field", icon='OUTLINER_OB_EMPTY')
layout.separator()
if len(bpy.data.groups) > 10:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("object.group_instance_add", text="Group Instance...", icon='OUTLINER_OB_EMPTY')
else:
layout.operator_menu_enum("object.group_instance_add", "group", text="Group Instance", icon='OUTLINER_OB_EMPTY')
# ********** Object menu **********
class VIEW3D_MT_object(Menu):
bl_context = "objectmode"
bl_label = "Object"
def draw(self, context):
layout = self.layout
layout.operator("ed.undo")
layout.operator("ed.redo")
layout.operator("ed.undo_history")
layout.separator()
layout.menu("VIEW3D_MT_transform_object")
layout.menu("VIEW3D_MT_mirror")
layout.menu("VIEW3D_MT_object_clear")
layout.menu("VIEW3D_MT_object_apply")
layout.menu("VIEW3D_MT_snap")
layout.separator()
layout.menu("VIEW3D_MT_object_animation")
layout.separator()
layout.operator("object.duplicate_move")
layout.operator("object.duplicate_move_linked")
layout.operator("object.delete", text="Delete...").use_global = False
layout.operator("object.proxy_make", text="Make Proxy...")
layout.menu("VIEW3D_MT_make_links", text="Make Links...")
layout.operator("object.make_dupli_face")
layout.operator_menu_enum("object.make_local", "type", text="Make Local...")
layout.menu("VIEW3D_MT_make_single_user")
layout.separator()
layout.menu("VIEW3D_MT_object_parent")
layout.menu("VIEW3D_MT_object_track")
layout.menu("VIEW3D_MT_object_group")
layout.menu("VIEW3D_MT_object_constraints")
layout.separator()
layout.menu("VIEW3D_MT_object_quick_effects")
layout.separator()
layout.menu("VIEW3D_MT_object_game")
layout.separator()
layout.operator("object.join")
layout.operator("object.data_transfer")
layout.operator("object.datalayout_transfer")
layout.separator()
layout.operator("object.move_to_layer", text="Move to Layer...")
layout.menu("VIEW3D_MT_object_showhide")
layout.operator_menu_enum("object.convert", "target")
class VIEW3D_MT_object_animation(Menu):
bl_label = "Animation"
def draw(self, context):
layout = self.layout
layout.operator("anim.keyframe_insert_menu", text="Insert Keyframe...")
layout.operator("anim.keyframe_delete_v3d", text="Delete Keyframes...")
layout.operator("anim.keyframe_clear_v3d", text="Clear Keyframes...")
layout.operator("anim.keying_set_active_set", text="Change Keying Set...")
layout.separator()
layout.operator("nla.bake", text="Bake Action...")
class VIEW3D_MT_object_clear(Menu):
bl_label = "Clear"
def draw(self, context):
layout = self.layout
layout.operator("object.location_clear", text="Location")
layout.operator("object.rotation_clear", text="Rotation")
layout.operator("object.scale_clear", text="Scale")
layout.operator("object.origin_clear", text="Origin")
class VIEW3D_MT_object_specials(Menu):
bl_label = "Specials"
@classmethod
def poll(cls, context):
# add more special types
return context.object
def draw(self, context):
layout = self.layout
scene = context.scene
obj = context.object
if obj.type == 'CAMERA':
layout.operator_context = 'INVOKE_REGION_WIN'
if obj.data.type == 'PERSP':
props = layout.operator("wm.context_modal_mouse", text="Camera Lens Angle")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.lens"
props.input_scale = 0.1
if obj.data.lens_unit == 'MILLIMETERS':
props.header_text = "Camera Lens Angle: %.1fmm"
else:
props.header_text = "Camera Lens Angle: %.1f\u00B0"
else:
props = layout.operator("wm.context_modal_mouse", text="Camera Lens Scale")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.ortho_scale"
props.input_scale = 0.01
props.header_text = "Camera Lens Scale: %.3f"
if not obj.data.dof_object:
view = context.space_data
if view and view.camera == obj and view.region_3d.view_perspective == 'CAMERA':
props = layout.operator("ui.eyedropper_depth", text="DOF Distance (Pick)")
else:
props = layout.operator("wm.context_modal_mouse", text="DOF Distance")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.dof_distance"
props.input_scale = 0.02
props.header_text = "DOF Distance: %.3f"
del view
if obj.type in {'CURVE', 'FONT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
props = layout.operator("wm.context_modal_mouse", text="Extrude Size")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.extrude"
props.input_scale = 0.01
props.header_text = "Extrude Size: %.3f"
props = layout.operator("wm.context_modal_mouse", text="Width Size")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.offset"
props.input_scale = 0.01
props.header_text = "Width Size: %.3f"
if obj.type == 'EMPTY':
layout.operator_context = 'INVOKE_REGION_WIN'
props = layout.operator("wm.context_modal_mouse", text="Empty Draw Size")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "empty_draw_size"
props.input_scale = 0.01
props.header_text = "Empty Draw Size: %.3f"
if obj.type == 'LAMP':
lamp = obj.data
layout.operator_context = 'INVOKE_REGION_WIN'
if scene.render.use_shading_nodes:
try:
value = lamp.node_tree.nodes["Emission"].inputs["Strength"].default_value
except AttributeError:
value = None
if value is not None:
props = layout.operator("wm.context_modal_mouse", text="Strength")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.node_tree.nodes[\"Emission\"].inputs[\"Strength\"].default_value"
props.header_text = "Lamp Strength: %.3f"
props.input_scale = 0.1
del value
if lamp.type == 'AREA':
props = layout.operator("wm.context_modal_mouse", text="Size X")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.size"
props.header_text = "Lamp Size X: %.3f"
if lamp.shape == 'RECTANGLE':
props = layout.operator("wm.context_modal_mouse", text="Size Y")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.size_y"
props.header_text = "Lamp Size Y: %.3f"
elif lamp.type in {'SPOT', 'POINT', 'SUN'}:
props = layout.operator("wm.context_modal_mouse", text="Size")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.shadow_soft_size"
props.header_text = "Lamp Size: %.3f"
else:
props = layout.operator("wm.context_modal_mouse", text="Energy")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.energy"
props.header_text = "Lamp Energy: %.3f"
if lamp.type in {'SPOT', 'AREA', 'POINT'}:
props = layout.operator("wm.context_modal_mouse", text="Falloff Distance")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.distance"
props.input_scale = 0.1
props.header_text = "Lamp Falloff Distance: %.1f"
if lamp.type == 'SPOT':
layout.separator()
props = layout.operator("wm.context_modal_mouse", text="Spot Size")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.spot_size"
props.input_scale = 0.01
props.header_text = "Spot Size: %.2f"
props = layout.operator("wm.context_modal_mouse", text="Spot Blend")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.spot_blend"
props.input_scale = -0.01
props.header_text = "Spot Blend: %.2f"
if not scene.render.use_shading_nodes:
props = layout.operator("wm.context_modal_mouse", text="Clip Start")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.shadow_buffer_clip_start"
props.input_scale = 0.05
props.header_text = "Clip Start: %.2f"
props = layout.operator("wm.context_modal_mouse", text="Clip End")
props.data_path_iter = "selected_editable_objects"
props.data_path_item = "data.shadow_buffer_clip_end"
props.input_scale = 0.05
props.header_text = "Clip End: %.2f"
layout.separator()
props = layout.operator("object.isolate_type_render")
props = layout.operator("object.hide_render_clear_all")
class VIEW3D_MT_object_apply(Menu):
bl_label = "Apply"
def draw(self, context):
layout = self.layout
props = layout.operator("object.transform_apply", text="Location", text_ctxt=i18n_contexts.default)
props.location, props.rotation, props.scale = True, False, False
props = layout.operator("object.transform_apply", text="Rotation", text_ctxt=i18n_contexts.default)
props.location, props.rotation, props.scale = False, True, False
props = layout.operator("object.transform_apply", text="Scale", text_ctxt=i18n_contexts.default)
props.location, props.rotation, props.scale = False, False, True
props = layout.operator("object.transform_apply", text="Rotation & Scale", text_ctxt=i18n_contexts.default)
props.location, props.rotation, props.scale = False, True, True
layout.separator()
layout.operator("object.visual_transform_apply", text="Visual Transform", text_ctxt=i18n_contexts.default)
layout.operator("object.duplicates_make_real")
class VIEW3D_MT_object_parent(Menu):
bl_label = "Parent"
def draw(self, context):
layout = self.layout
layout.operator_enum("object.parent_set", "type")
layout.separator()
layout.operator_enum("object.parent_clear", "type")
class VIEW3D_MT_object_track(Menu):
bl_label = "Track"
def draw(self, context):
layout = self.layout
layout.operator_enum("object.track_set", "type")
layout.separator()
layout.operator_enum("object.track_clear", "type")
class VIEW3D_MT_object_group(Menu):
bl_label = "Group"
def draw(self, context):
layout = self.layout
layout.operator("group.create")
# layout.operator_menu_enum("group.objects_remove", "group") # BUGGY
layout.operator("group.objects_remove")
layout.operator("group.objects_remove_all")
layout.separator()
layout.operator("group.objects_add_active")
layout.operator("group.objects_remove_active")
class VIEW3D_MT_object_constraints(Menu):
bl_label = "Constraints"
def draw(self, context):
layout = self.layout
layout.operator("object.constraint_add_with_targets")
layout.operator("object.constraints_copy")
layout.operator("object.constraints_clear")
class VIEW3D_MT_object_quick_effects(Menu):
bl_label = "Quick Effects"
def draw(self, context):
layout = self.layout
layout.operator("object.quick_fur")
layout.operator("object.quick_explode")
layout.operator("object.quick_smoke")
layout.operator("object.quick_fluid")
class VIEW3D_MT_object_showhide(Menu):
bl_label = "Show/Hide"
def draw(self, context):
layout = self.layout
layout.operator("object.hide_view_clear", text="Show Hidden")
layout.operator("object.hide_view_set", text="Hide Selected").unselected = False
layout.operator("object.hide_view_set", text="Hide Unselected").unselected = True
class VIEW3D_MT_make_single_user(Menu):
bl_label = "Make Single User"
def draw(self, context):
layout = self.layout
props = layout.operator("object.make_single_user", text="Object")
props.object = True
props.obdata = props.material = props.texture = props.animation = False
props = layout.operator("object.make_single_user", text="Object & Data")
props.object = props.obdata = True
props.material = props.texture = props.animation = False
props = layout.operator("object.make_single_user", text="Object & Data & Materials+Tex")
props.object = props.obdata = props.material = props.texture = True
props.animation = False
props = layout.operator("object.make_single_user", text="Materials+Tex")
props.material = props.texture = True
props.object = props.obdata = props.animation = False
props = layout.operator("object.make_single_user", text="Object Animation")
props.animation = True
props.object = props.obdata = props.material = props.texture = False
class VIEW3D_MT_make_links(Menu):
bl_label = "Make Links"
def draw(self, context):
layout = self.layout
operator_context_default = layout.operator_context
if len(bpy.data.scenes) > 10:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("object.make_links_scene", text="Objects to Scene...", icon='OUTLINER_OB_EMPTY')
else:
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator_menu_enum("object.make_links_scene", "scene", text="Objects to Scene...")
layout.operator_context = operator_context_default
layout.operator_enum("object.make_links_data", "type") # inline
layout.operator("object.join_uvs") # stupid place to add this!
class VIEW3D_MT_object_game(Menu):
bl_label = "Game"
def draw(self, context):
layout = self.layout
layout.operator("object.logic_bricks_copy", text="Copy Logic Bricks")
layout.operator("object.game_physics_copy", text="Copy Physics Properties")
layout.separator()
layout.operator("object.game_property_copy", text="Replace Properties").operation = 'REPLACE'
layout.operator("object.game_property_copy", text="Merge Properties").operation = 'MERGE'
layout.operator_menu_enum("object.game_property_copy", "property", text="Copy Properties...")
layout.separator()
layout.operator("object.game_property_clear")
# ********** Brush menu **********
class VIEW3D_MT_brush(Menu):
bl_label = "Brush"
def draw(self, context):
layout = self.layout
settings = UnifiedPaintPanel.paint_settings(context)
brush = settings.brush
ups = context.tool_settings.unified_paint_settings
layout.prop(ups, "use_unified_size", text="Unified Size")
layout.prop(ups, "use_unified_strength", text="Unified Strength")
if context.image_paint_object or context.vertex_paint_object:
layout.prop(ups, "use_unified_color", text="Unified Color")
layout.separator()
# brush paint modes
layout.menu("VIEW3D_MT_brush_paint_modes")
# brush tool
if context.sculpt_object:
layout.operator("brush.reset")
layout.prop_menu_enum(brush, "sculpt_tool")
elif context.image_paint_object:
layout.prop_menu_enum(brush, "image_tool")
elif context.vertex_paint_object or context.weight_paint_object:
layout.prop_menu_enum(brush, "vertex_tool")
# skip if no active brush
if not brush:
return
# TODO: still missing a lot of brush options here
# sculpt options
if context.sculpt_object:
sculpt_tool = brush.sculpt_tool
layout.separator()
layout.operator_menu_enum("brush.curve_preset", "shape", text="Curve Preset")
layout.separator()
if sculpt_tool != 'GRAB':
layout.prop_menu_enum(brush, "stroke_method")
if sculpt_tool in {'DRAW', 'PINCH', 'INFLATE', 'LAYER', 'CLAY'}:
layout.prop_menu_enum(brush, "direction")
if sculpt_tool == 'LAYER':
layout.prop(brush, "use_persistent")
layout.operator("sculpt.set_persistent_base")
class VIEW3D_MT_brush_paint_modes(Menu):
bl_label = "Enabled Modes"
def draw(self, context):
layout = self.layout
settings = UnifiedPaintPanel.paint_settings(context)
brush = settings.brush
layout.prop(brush, "use_paint_sculpt", text="Sculpt")
layout.prop(brush, "use_paint_vertex", text="Vertex Paint")
layout.prop(brush, "use_paint_weight", text="Weight Paint")
layout.prop(brush, "use_paint_image", text="Texture Paint")
# ********** Vertex paint menu **********
class VIEW3D_MT_paint_vertex(Menu):
bl_label = "Paint"
def draw(self, context):
layout = self.layout
layout.operator("ed.undo")
layout.operator("ed.redo")
layout.separator()
layout.operator("paint.vertex_color_set")
layout.operator("paint.vertex_color_smooth")
layout.operator("paint.vertex_color_dirt")
class VIEW3D_MT_hook(Menu):
bl_label = "Hooks"
def draw(self, context):
layout = self.layout
layout.operator_context = 'EXEC_AREA'
layout.operator("object.hook_add_newob")
layout.operator("object.hook_add_selob").use_bone = False
layout.operator("object.hook_add_selob", text="Hook to Selected Object Bone").use_bone = True
if [mod.type == 'HOOK' for mod in context.active_object.modifiers]:
layout.separator()
layout.operator_menu_enum("object.hook_assign", "modifier")
layout.operator_menu_enum("object.hook_remove", "modifier")
layout.separator()
layout.operator_menu_enum("object.hook_select", "modifier")
layout.operator_menu_enum("object.hook_reset", "modifier")
layout.operator_menu_enum("object.hook_recenter", "modifier")
class VIEW3D_MT_vertex_group(Menu):
bl_label = "Vertex Groups"
def draw(self, context):
layout = self.layout
layout.operator_context = 'EXEC_AREA'
layout.operator("object.vertex_group_assign_new")
ob = context.active_object
if ob.mode == 'EDIT' or (ob.mode == 'WEIGHT_PAINT' and ob.type == 'MESH' and ob.data.use_paint_mask_vertex):
if ob.vertex_groups.active:
layout.separator()
layout.operator("object.vertex_group_assign", text="Assign to Active Group")
layout.operator("object.vertex_group_remove_from", text="Remove from Active Group").use_all_groups = False
layout.operator("object.vertex_group_remove_from", text="Remove from All").use_all_groups = True
layout.separator()
if ob.vertex_groups.active:
layout.operator_menu_enum("object.vertex_group_set_active", "group", text="Set Active Group")
layout.operator("object.vertex_group_remove", text="Remove Active Group").all = False
layout.operator("object.vertex_group_remove", text="Remove All Groups").all = True
# ********** Weight paint menu **********
class VIEW3D_MT_paint_weight(Menu):
bl_label = "Weights"
def draw(self, context):
layout = self.layout
layout.operator("ed.undo")
layout.operator("ed.redo")
layout.operator("ed.undo_history")
layout.separator()
layout.operator("paint.weight_from_bones", text="Assign Automatic From Bones").type = 'AUTOMATIC'
layout.operator("paint.weight_from_bones", text="Assign From Bone Envelopes").type = 'ENVELOPES'
layout.separator()
layout.operator("object.vertex_group_normalize_all", text="Normalize All")
layout.operator("object.vertex_group_normalize", text="Normalize")
layout.operator("object.vertex_group_mirror", text="Mirror")
layout.operator("object.vertex_group_invert", text="Invert")
layout.operator("object.vertex_group_clean", text="Clean")
layout.operator("object.vertex_group_quantize", text="Quantize")
layout.operator("object.vertex_group_levels", text="Levels")
layout.operator("object.vertex_group_smooth", text="Smooth")
props = layout.operator("object.data_transfer", text="Transfer Weights")
props.use_reverse_transfer = True
props.data_type = 'VGROUP_WEIGHTS'
layout.operator("object.vertex_group_limit_total", text="Limit Total")
layout.operator("object.vertex_group_fix", text="Fix Deforms")
layout.separator()
layout.operator("paint.weight_set")
# ********** Sculpt menu **********
class VIEW3D_MT_sculpt(Menu):
bl_label = "Sculpt"
def draw(self, context):
layout = self.layout
toolsettings = context.tool_settings
sculpt = toolsettings.sculpt
layout.operator("ed.undo")
layout.operator("ed.redo")
layout.separator()
layout.prop(sculpt, "use_symmetry_x")
layout.prop(sculpt, "use_symmetry_y")
layout.prop(sculpt, "use_symmetry_z")
layout.separator()
layout.prop(sculpt, "lock_x")
layout.prop(sculpt, "lock_y")
layout.prop(sculpt, "lock_z")
layout.separator()
layout.prop(sculpt, "use_threaded", text="Threaded Sculpt")
layout.prop(sculpt, "show_low_resolution")
layout.prop(sculpt, "show_brush")
layout.prop(sculpt, "use_deform_only")
layout.prop(sculpt, "show_diffuse_color")
class VIEW3D_MT_hide_mask(Menu):
bl_label = "Hide/Mask"
def draw(self, context):
layout = self.layout
props = layout.operator("paint.hide_show", text="Show All")
props.action = 'SHOW'
props.area = 'ALL'
props = layout.operator("paint.hide_show", text="Hide Bounding Box")
props.action = 'HIDE'
props.area = 'INSIDE'
props = layout.operator("paint.hide_show", text="Show Bounding Box")
props.action = 'SHOW'
props.area = 'INSIDE'
props = layout.operator("paint.hide_show", text="Hide Masked")
props.area = 'MASKED'
props.action = 'HIDE'
layout.separator()
props = layout.operator("paint.mask_flood_fill", text="Invert Mask")
props.mode = 'INVERT'
props = layout.operator("paint.mask_flood_fill", text="Fill Mask")
props.mode = 'VALUE'
props.value = 1
props = layout.operator("paint.mask_flood_fill", text="Clear Mask")
props.mode = 'VALUE'
props.value = 0
props = layout.operator("view3d.select_border", text="Box Mask")
props = layout.operator("paint.mask_lasso_gesture", text="Lasso Mask")
# ********** Particle menu **********
class VIEW3D_MT_particle(Menu):
bl_label = "Particle"
def draw(self, context):
layout = self.layout
particle_edit = context.tool_settings.particle_edit
layout.operator("ed.undo")
layout.operator("ed.redo")
layout.operator("ed.undo_history")
layout.separator()
layout.operator("particle.mirror")
layout.separator()
layout.operator("particle.remove_doubles")
layout.operator("particle.delete")
if particle_edit.select_mode == 'POINT':
layout.operator("particle.subdivide")
layout.operator("particle.rekey")
layout.operator("particle.weight_set")
layout.separator()
layout.menu("VIEW3D_MT_particle_showhide")
class VIEW3D_MT_particle_specials(Menu):
bl_label = "Specials"
def draw(self, context):
layout = self.layout
particle_edit = context.tool_settings.particle_edit
layout.operator("particle.rekey")
layout.operator("particle.delete")
layout.operator("particle.remove_doubles")
if particle_edit.select_mode == 'POINT':
layout.operator("particle.subdivide")
layout.operator("particle.weight_set")
layout.separator()
layout.operator("particle.mirror")
if particle_edit.select_mode == 'POINT':
layout.separator()
layout.operator("particle.select_roots")
layout.operator("particle.select_tips")
layout.separator()
layout.operator("particle.select_random")
layout.separator()
layout.operator("particle.select_more")
layout.operator("particle.select_less")
layout.separator()
layout.operator("particle.select_all").action = 'TOGGLE'
layout.operator("particle.select_linked")
layout.operator("particle.select_all", text="Inverse").action = 'INVERT'
class VIEW3D_MT_particle_showhide(ShowHideMenu, Menu):
_operator_name = "particle"
# ********** Pose Menu **********
class VIEW3D_MT_pose(Menu):
bl_label = "Pose"
def draw(self, context):
layout = self.layout
layout.operator("ed.undo")
layout.operator("ed.redo")
layout.operator("ed.undo_history")
layout.separator()
layout.menu("VIEW3D_MT_transform_armature")
layout.menu("VIEW3D_MT_pose_transform")
layout.menu("VIEW3D_MT_pose_apply")
layout.menu("VIEW3D_MT_snap")
layout.separator()
layout.menu("VIEW3D_MT_object_animation")
layout.separator()
layout.menu("VIEW3D_MT_pose_slide")
layout.menu("VIEW3D_MT_pose_propagate")
layout.separator()
layout.operator("pose.copy")
layout.operator("pose.paste").flipped = False
layout.operator("pose.paste", text="Paste X-Flipped Pose").flipped = True
layout.separator()
layout.menu("VIEW3D_MT_pose_library")
layout.menu("VIEW3D_MT_pose_motion")
layout.menu("VIEW3D_MT_pose_group")
layout.separator()
layout.menu("VIEW3D_MT_object_parent")
layout.menu("VIEW3D_MT_pose_ik")
layout.menu("VIEW3D_MT_pose_constraints")
layout.separator()
layout.operator_context = 'EXEC_AREA'
layout.operator("pose.autoside_names", text="AutoName Left/Right").axis = 'XAXIS'
layout.operator("pose.autoside_names", text="AutoName Front/Back").axis = 'YAXIS'
layout.operator("pose.autoside_names", text="AutoName Top/Bottom").axis = 'ZAXIS'
layout.operator("pose.flip_names")
layout.operator("pose.quaternions_flip")
layout.separator()
layout.operator_context = 'INVOKE_AREA'
layout.operator("armature.armature_layers", text="Change Armature Layers...")
layout.operator("pose.bone_layers", text="Change Bone Layers...")
layout.separator()
layout.menu("VIEW3D_MT_pose_showhide")
layout.menu("VIEW3D_MT_bone_options_toggle", text="Bone Settings")
class VIEW3D_MT_pose_transform(Menu):
bl_label = "Clear Transform"
def draw(self, context):
layout = self.layout
layout.operator("pose.transforms_clear", text="All")
layout.separator()
layout.operator("pose.loc_clear", text="Location")
layout.operator("pose.rot_clear", text="Rotation")
layout.operator("pose.scale_clear", text="Scale")
layout.separator()
layout.operator("pose.user_transforms_clear", text="Reset unkeyed")
class VIEW3D_MT_pose_slide(Menu):
bl_label = "In-Betweens"
def draw(self, context):
layout = self.layout
layout.operator("pose.push")
layout.operator("pose.relax")
layout.operator("pose.breakdown")
class VIEW3D_MT_pose_propagate(Menu):
bl_label = "Propagate"
def draw(self, context):
layout = self.layout
layout.operator("pose.propagate").mode = 'WHILE_HELD'
layout.separator()
layout.operator("pose.propagate", text="To Next Keyframe").mode = 'NEXT_KEY'
layout.operator("pose.propagate", text="To Last Keyframe (Make Cyclic)").mode = 'LAST_KEY'
layout.separator()
layout.operator("pose.propagate", text="On Selected Keyframes").mode = 'SELECTED_KEYS'
layout.separator()
layout.operator("pose.propagate", text="On Selected Markers").mode = 'SELECTED_MARKERS'
class VIEW3D_MT_pose_library(Menu):
bl_label = "Pose Library"
def draw(self, context):
layout = self.layout
layout.operator("poselib.browse_interactive", text="Browse Poses...")
layout.separator()
layout.operator("poselib.pose_add", text="Add Pose...")
layout.operator("poselib.pose_rename", text="Rename Pose...")
layout.operator("poselib.pose_remove", text="Remove Pose...")
class VIEW3D_MT_pose_motion(Menu):
bl_label = "Motion Paths"
def draw(self, context):
layout = self.layout
layout.operator("pose.paths_calculate", text="Calculate")
layout.operator("pose.paths_clear", text="Clear")
class VIEW3D_MT_pose_group(Menu):
bl_label = "Bone Groups"
def draw(self, context):
layout = self.layout
pose = context.active_object.pose
layout.operator_context = 'EXEC_AREA'
layout.operator("pose.group_assign", text="Assign to New Group").type = 0
if pose.bone_groups:
active_group = pose.bone_groups.active_index + 1
layout.operator("pose.group_assign", text="Assign to Group").type = active_group
layout.separator()
# layout.operator_context = 'INVOKE_AREA'
layout.operator("pose.group_unassign")
layout.operator("pose.group_remove")
class VIEW3D_MT_pose_ik(Menu):
bl_label = "Inverse Kinematics"
def draw(self, context):
layout = self.layout
layout.operator("pose.ik_add")
layout.operator("pose.ik_clear")
class VIEW3D_MT_pose_constraints(Menu):
bl_label = "Constraints"
def draw(self, context):
layout = self.layout
layout.operator("pose.constraint_add_with_targets", text="Add (With Targets)...")
layout.operator("pose.constraints_copy")
layout.operator("pose.constraints_clear")
class VIEW3D_MT_pose_showhide(ShowHideMenu, Menu):
_operator_name = "pose"
class VIEW3D_MT_pose_apply(Menu):
bl_label = "Apply"
def draw(self, context):
layout = self.layout
layout.operator("pose.armature_apply")
layout.operator("pose.visual_transform_apply")
class VIEW3D_MT_pose_specials(Menu):
bl_label = "Specials"
def draw(self, context):
layout = self.layout
layout.operator("paint.weight_from_bones", text="Assign Automatic from Bones").type = 'AUTOMATIC'
layout.operator("paint.weight_from_bones", text="Assign from Bone Envelopes").type = 'ENVELOPES'
layout.separator()
layout.operator("pose.select_constraint_target")
layout.operator("pose.flip_names")
layout.operator("pose.paths_calculate")
layout.operator("pose.paths_clear")
layout.operator("pose.user_transforms_clear")
layout.operator("pose.user_transforms_clear", text="Clear User Transforms (All)").only_selected = False
layout.operator("pose.relax")
layout.separator()
layout.operator_menu_enum("pose.autoside_names", "axis")
class BoneOptions:
def draw(self, context):
layout = self.layout
options = [
"show_wire",
"use_deform",
"use_envelope_multiply",
"use_inherit_rotation",
"use_inherit_scale",
]
if context.mode == 'EDIT_ARMATURE':
bone_props = bpy.types.EditBone.bl_rna.properties
data_path_iter = "selected_bones"
opt_suffix = ""
options.append("lock")
else: # pose-mode
bone_props = bpy.types.Bone.bl_rna.properties
data_path_iter = "selected_pose_bones"
opt_suffix = "bone."
for opt in options:
props = layout.operator("wm.context_collection_boolean_set", text=bone_props[opt].name,
text_ctxt=i18n_contexts.default)
props.data_path_iter = data_path_iter
props.data_path_item = opt_suffix + opt
props.type = self.type
class VIEW3D_MT_bone_options_toggle(Menu, BoneOptions):
bl_label = "Toggle Bone Options"
type = 'TOGGLE'
class VIEW3D_MT_bone_options_enable(Menu, BoneOptions):
bl_label = "Enable Bone Options"
type = 'ENABLE'
class VIEW3D_MT_bone_options_disable(Menu, BoneOptions):
bl_label = "Disable Bone Options"
type = 'DISABLE'
# ********** Edit Menus, suffix from ob.type **********
class VIEW3D_MT_edit_mesh(Menu):
bl_label = "Mesh"
def draw(self, context):
layout = self.layout
toolsettings = context.tool_settings
layout.operator("ed.undo")
layout.operator("ed.redo")
layout.operator("ed.undo_history")
layout.separator()
layout.menu("VIEW3D_MT_transform")
layout.menu("VIEW3D_MT_mirror")
layout.menu("VIEW3D_MT_snap")
layout.separator()
layout.menu("VIEW3D_MT_uv_map", text="UV Unwrap...")
layout.separator()
layout.operator("mesh.duplicate_move")
layout.menu("VIEW3D_MT_edit_mesh_extrude")
layout.menu("VIEW3D_MT_edit_mesh_delete")
layout.separator()
layout.menu("VIEW3D_MT_edit_mesh_vertices")
layout.menu("VIEW3D_MT_edit_mesh_edges")
layout.menu("VIEW3D_MT_edit_mesh_faces")
layout.menu("VIEW3D_MT_edit_mesh_normals")
layout.menu("VIEW3D_MT_edit_mesh_clean")
layout.separator()
layout.operator("mesh.symmetrize")
layout.operator("mesh.symmetry_snap")
layout.operator("mesh.bisect")
layout.operator_menu_enum("mesh.sort_elements", "type", text="Sort Elements...")
layout.separator()
layout.prop(toolsettings, "use_mesh_automerge")
layout.prop_menu_enum(toolsettings, "proportional_edit")
layout.prop_menu_enum(toolsettings, "proportional_edit_falloff")
layout.separator()
layout.menu("VIEW3D_MT_edit_mesh_showhide")
class VIEW3D_MT_edit_mesh_specials(Menu):
bl_label = "Specials"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.subdivide", text="Subdivide").smoothness = 0.0
layout.operator("mesh.subdivide", text="Subdivide Smooth").smoothness = 1.0
layout.separator()
layout.operator("mesh.merge", text="Merge...")
layout.operator("mesh.remove_doubles")
layout.separator()
layout.operator("mesh.hide", text="Hide").unselected = False
layout.operator("mesh.reveal", text="Reveal")
layout.operator("mesh.select_all", text="Select Inverse").action = 'INVERT'
layout.separator()
layout.operator("mesh.flip_normals")
layout.operator("mesh.vertices_smooth", text="Smooth")
layout.operator("mesh.vertices_smooth_laplacian", text="Laplacian Smooth")
layout.separator()
layout.operator("mesh.inset")
layout.operator("mesh.bevel", text="Bevel")
layout.operator("mesh.bridge_edge_loops")
layout.separator()
layout.operator("mesh.faces_shade_smooth")
layout.operator("mesh.faces_shade_flat")
layout.separator()
layout.operator("mesh.blend_from_shape")
layout.operator("mesh.shape_propagate_to_all")
layout.operator("mesh.shortest_path_select")
layout.operator("mesh.sort_elements")
layout.operator("mesh.symmetrize")
layout.operator("mesh.symmetry_snap")
class VIEW3D_MT_edit_mesh_select_mode(Menu):
bl_label = "Mesh Select Mode"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.select_mode", text="Vertex", icon='VERTEXSEL').type = 'VERT'
layout.operator("mesh.select_mode", text="Edge", icon='EDGESEL').type = 'EDGE'
layout.operator("mesh.select_mode", text="Face", icon='FACESEL').type = 'FACE'
class VIEW3D_MT_edit_mesh_extrude(Menu):
bl_label = "Extrude"
_extrude_funcs = {
'VERT': lambda layout:
layout.operator("mesh.extrude_vertices_move", text="Vertices Only"),
'EDGE': lambda layout:
layout.operator("mesh.extrude_edges_move", text="Edges Only"),
'FACE': lambda layout:
layout.operator("mesh.extrude_faces_move", text="Individual Faces"),
'REGION': lambda layout:
layout.operator("view3d.edit_mesh_extrude_move_normal", text="Region"),
'REGION_VERT_NORMAL': lambda layout:
layout.operator("view3d.edit_mesh_extrude_move_shrink_fatten", text="Region (Vertex Normals)"),
}
@staticmethod
def extrude_options(context):
mesh = context.object.data
select_mode = context.tool_settings.mesh_select_mode
menu = []
if mesh.total_face_sel:
menu += ['REGION', 'REGION_VERT_NORMAL', 'FACE']
if mesh.total_edge_sel and (select_mode[0] or select_mode[1]):
menu += ['EDGE']
if mesh.total_vert_sel and select_mode[0]:
menu += ['VERT']
# should never get here
return menu
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
for menu_id in self.extrude_options(context):
self._extrude_funcs[menu_id](layout)
class VIEW3D_MT_edit_mesh_vertices(Menu):
bl_label = "Vertices"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.merge")
layout.operator("mesh.rip_move")
layout.operator("mesh.rip_move_fill")
layout.operator("mesh.rip_edge_move")
layout.operator("mesh.split")
layout.operator_menu_enum("mesh.separate", "type")
layout.operator("mesh.vert_connect_path", text="Connect Vertex Path")
layout.operator("mesh.vert_connect", text="Connect Vertices")
layout.operator("transform.vert_slide", text="Slide")
layout.separator()
layout.operator("mesh.mark_sharp", text="Mark Sharp Edges").use_verts = True
props = layout.operator("mesh.mark_sharp", text="Clear Sharp Edges")
props.use_verts = True
props.clear = True
layout.separator()
layout.operator("mesh.bevel").vertex_only = True
layout.operator("mesh.convex_hull")
layout.operator("mesh.vertices_smooth")
layout.operator("mesh.remove_doubles")
layout.operator("mesh.blend_from_shape")
layout.operator("object.vertex_group_smooth")
layout.operator("mesh.shape_propagate_to_all")
layout.separator()
layout.menu("VIEW3D_MT_vertex_group")
layout.menu("VIEW3D_MT_hook")
class VIEW3D_MT_edit_mesh_edges(Menu):
bl_label = "Edges"
def draw(self, context):
layout = self.layout
with_freestyle = bpy.app.build_options.freestyle
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.edge_face_add")
layout.operator("mesh.subdivide")
layout.operator("mesh.unsubdivide")
layout.separator()
layout.operator("transform.edge_crease")
layout.operator("transform.edge_bevelweight")
layout.separator()
layout.operator("mesh.mark_seam").clear = False
layout.operator("mesh.mark_seam", text="Clear Seam").clear = True
layout.separator()
layout.operator("mesh.mark_sharp")
layout.operator("mesh.mark_sharp", text="Clear Sharp").clear = True
layout.separator()
if with_freestyle:
layout.operator("mesh.mark_freestyle_edge").clear = False
layout.operator("mesh.mark_freestyle_edge", text="Clear Freestyle Edge").clear = True
layout.separator()
layout.operator("mesh.edge_rotate", text="Rotate Edge CW").use_ccw = False
layout.operator("mesh.edge_rotate", text="Rotate Edge CCW").use_ccw = True
layout.separator()
layout.operator("mesh.bevel").vertex_only = False
layout.operator("mesh.edge_split")
layout.operator("mesh.bridge_edge_loops")
layout.separator()
layout.operator("transform.edge_slide")
layout.operator("mesh.loop_multi_select", text="Edge Loops").ring = False
layout.operator("mesh.loop_multi_select", text="Edge Rings").ring = True
layout.operator("mesh.loop_to_region")
layout.operator("mesh.region_to_loop")
class VIEW3D_MT_edit_mesh_faces(Menu):
bl_label = "Faces"
bl_idname = "VIEW3D_MT_edit_mesh_faces"
def draw(self, context):
layout = self.layout
with_freestyle = bpy.app.build_options.freestyle
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.flip_normals")
layout.operator("mesh.edge_face_add")
layout.operator("mesh.fill")
layout.operator("mesh.fill_grid")
layout.operator("mesh.beautify_fill")
layout.operator("mesh.inset")
layout.operator("mesh.bevel").vertex_only = False
layout.operator("mesh.solidify")
layout.operator("mesh.intersect")
layout.operator("mesh.intersect_boolean")
layout.operator("mesh.wireframe")
layout.separator()
if with_freestyle:
layout.operator("mesh.mark_freestyle_face").clear = False
layout.operator("mesh.mark_freestyle_face", text="Clear Freestyle Face").clear = True
layout.separator()
layout.operator("mesh.poke")
props = layout.operator("mesh.quads_convert_to_tris")
props.quad_method = props.ngon_method = 'BEAUTY'
layout.operator("mesh.tris_convert_to_quads")
layout.operator("mesh.face_split_by_edges")
layout.separator()
layout.operator("mesh.faces_shade_smooth")
layout.operator("mesh.faces_shade_flat")
layout.operator("mesh.normals_make_consistent", text="Recalculate Normals").inside = False
layout.separator()
layout.operator("mesh.edge_rotate", text="Rotate Edge CW").use_ccw = False
layout.separator()
layout.operator("mesh.uvs_rotate")
layout.operator("mesh.uvs_reverse")
layout.operator("mesh.colors_rotate")
layout.operator("mesh.colors_reverse")
class VIEW3D_MT_edit_mesh_normals(Menu):
bl_label = "Normals"
def draw(self, context):
layout = self.layout
layout.operator("mesh.normals_make_consistent", text="Recalculate Outside").inside = False
layout.operator("mesh.normals_make_consistent", text="Recalculate Inside").inside = True
layout.separator()
layout.operator("mesh.flip_normals")
class VIEW3D_MT_edit_mesh_clean(Menu):
bl_label = "Clean up"
def draw(self, context):
layout = self.layout
layout.operator("mesh.delete_loose")
layout.separator()
layout.operator("mesh.dissolve_degenerate")
layout.operator("mesh.dissolve_limited")
layout.operator("mesh.face_make_planar")
layout.operator("mesh.vert_connect_nonplanar")
layout.operator("mesh.vert_connect_concave")
layout.operator("mesh.fill_holes")
class VIEW3D_MT_edit_mesh_delete(Menu):
bl_label = "Delete"
def draw(self, context):
layout = self.layout
layout.operator_enum("mesh.delete", "type")
layout.separator()
layout.operator("mesh.dissolve_verts")
layout.operator("mesh.dissolve_edges")
layout.operator("mesh.dissolve_faces")
layout.separator()
layout.operator("mesh.dissolve_limited")
layout.separator()
layout.operator("mesh.edge_collapse")
layout.operator("mesh.delete_edgeloop", text="Edge Loops")
class VIEW3D_MT_edit_mesh_showhide(ShowHideMenu, Menu):
_operator_name = "mesh"
class VIEW3D_MT_edit_gpencil_delete(Menu):
bl_label = "Delete"
def draw(self, context):
layout = self.layout
layout.operator_enum("gpencil.delete", "type")
layout.separator()
layout.operator("gpencil.dissolve")
# Edit Curve
# draw_curve is used by VIEW3D_MT_edit_curve and VIEW3D_MT_edit_surface
def draw_curve(self, context):
layout = self.layout
toolsettings = context.tool_settings
layout.menu("VIEW3D_MT_transform")
layout.menu("VIEW3D_MT_mirror")
layout.menu("VIEW3D_MT_snap")
layout.separator()
layout.operator("curve.extrude_move")
layout.operator("curve.spin")
layout.operator("curve.duplicate_move")
layout.operator("curve.split")
layout.operator("curve.separate")
layout.operator("curve.make_segment")
layout.operator("curve.cyclic_toggle")
layout.operator("curve.delete", text="Delete...")
layout.separator()
layout.menu("VIEW3D_MT_edit_curve_ctrlpoints")
layout.menu("VIEW3D_MT_edit_curve_segments")
layout.separator()
layout.prop_menu_enum(toolsettings, "proportional_edit")
layout.prop_menu_enum(toolsettings, "proportional_edit_falloff")
layout.separator()
layout.menu("VIEW3D_MT_edit_curve_showhide")
class VIEW3D_MT_edit_curve(Menu):
bl_label = "Curve"
draw = draw_curve
class VIEW3D_MT_edit_curve_ctrlpoints(Menu):
bl_label = "Control Points"
def draw(self, context):
layout = self.layout
edit_object = context.edit_object
if edit_object.type == 'CURVE':
layout.operator("transform.tilt")
layout.operator("curve.tilt_clear")
layout.separator()
layout.operator_menu_enum("curve.handle_type_set", "type")
layout.operator("curve.normals_make_consistent")
layout.separator()
layout.menu("VIEW3D_MT_hook")
class VIEW3D_MT_edit_curve_segments(Menu):
bl_label = "Segments"
def draw(self, context):
layout = self.layout
layout.operator("curve.subdivide")
layout.operator("curve.switch_direction")
class VIEW3D_MT_edit_curve_specials(Menu):
bl_label = "Specials"
def draw(self, context):
layout = self.layout
layout.operator("curve.subdivide")
layout.operator("curve.switch_direction")
layout.operator("curve.spline_weight_set")
layout.operator("curve.radius_set")
layout.operator("curve.smooth")
layout.operator("curve.smooth_weight")
layout.operator("curve.smooth_radius")
layout.operator("curve.smooth_tilt")
class VIEW3D_MT_edit_curve_showhide(ShowHideMenu, Menu):
_operator_name = "curve"
class VIEW3D_MT_edit_surface(Menu):
bl_label = "Surface"
draw = draw_curve
class VIEW3D_MT_edit_font(Menu):
bl_label = "Text"
def draw(self, context):
layout = self.layout
layout.menu("VIEW3D_MT_edit_text_chars")
layout.separator()
layout.operator("font.style_toggle", text="Toggle Bold").style = 'BOLD'
layout.operator("font.style_toggle", text="Toggle Italic").style = 'ITALIC'
layout.operator("font.style_toggle", text="Toggle Underline").style = 'UNDERLINE'
layout.operator("font.style_toggle", text="Toggle Small Caps").style = 'SMALL_CAPS'
class VIEW3D_MT_edit_text_chars(Menu):
bl_label = "Special Characters"
def draw(self, context):
layout = self.layout
layout.operator("font.text_insert", text="Copyright").text = "\u00A9"
layout.operator("font.text_insert", text="Registered Trademark").text = "\u00AE"
layout.separator()
layout.operator("font.text_insert", text="Degree Sign").text = "\u00B0"
layout.operator("font.text_insert", text="Multiplication Sign").text = "\u00D7"
layout.operator("font.text_insert", text="Circle").text = "\u008A"
layout.operator("font.text_insert", text="Superscript 1").text = "\u00B9"
layout.operator("font.text_insert", text="Superscript 2").text = "\u00B2"
layout.operator("font.text_insert", text="Superscript 3").text = "\u00B3"
layout.operator("font.text_insert", text="Double >>").text = "\u00BB"
layout.operator("font.text_insert", text="Double <<").text = "\u00AB"
layout.operator("font.text_insert", text="Promillage").text = "\u2030"
layout.separator()
layout.operator("font.text_insert", text="Dutch Florin").text = "\u00A4"
layout.operator("font.text_insert", text="British Pound").text = "\u00A3"
layout.operator("font.text_insert", text="Japanese Yen").text = "\u00A5"
layout.separator()
layout.operator("font.text_insert", text="German S").text = "\u00DF"
layout.operator("font.text_insert", text="Spanish Question Mark").text = "\u00BF"
layout.operator("font.text_insert", text="Spanish Exclamation Mark").text = "\u00A1"
class VIEW3D_MT_edit_meta(Menu):
bl_label = "Metaball"
def draw(self, context):
layout = self.layout
toolsettings = context.tool_settings
layout.operator("ed.undo")
layout.operator("ed.redo")
layout.operator("ed.undo_history")
layout.separator()
layout.menu("VIEW3D_MT_transform")
layout.menu("VIEW3D_MT_mirror")
layout.menu("VIEW3D_MT_snap")
layout.separator()
layout.operator("mball.delete_metaelems", text="Delete...")
layout.operator("mball.duplicate_metaelems")
layout.separator()
layout.prop_menu_enum(toolsettings, "proportional_edit")
layout.prop_menu_enum(toolsettings, "proportional_edit_falloff")
layout.separator()
layout.menu("VIEW3D_MT_edit_meta_showhide")
class VIEW3D_MT_edit_meta_showhide(Menu):
bl_label = "Show/Hide"
def draw(self, context):
layout = self.layout
layout.operator("mball.reveal_metaelems", text="Show Hidden")
layout.operator("mball.hide_metaelems", text="Hide Selected").unselected = False
layout.operator("mball.hide_metaelems", text="Hide Unselected").unselected = True
class VIEW3D_MT_edit_lattice(Menu):
bl_label = "Lattice"
def draw(self, context):
layout = self.layout
toolsettings = context.tool_settings
layout.menu("VIEW3D_MT_transform")
layout.menu("VIEW3D_MT_mirror")
layout.menu("VIEW3D_MT_snap")
layout.operator_menu_enum("lattice.flip", "axis")
layout.separator()
layout.operator("lattice.make_regular")
layout.separator()
layout.prop_menu_enum(toolsettings, "proportional_edit")
layout.prop_menu_enum(toolsettings, "proportional_edit_falloff")
class VIEW3D_MT_edit_armature(Menu):
bl_label = "Armature"
def draw(self, context):
layout = self.layout
edit_object = context.edit_object
arm = edit_object.data
layout.menu("VIEW3D_MT_transform_armature")
layout.menu("VIEW3D_MT_mirror")
layout.menu("VIEW3D_MT_snap")
layout.menu("VIEW3D_MT_edit_armature_roll")
layout.separator()
layout.operator("armature.extrude_move")
if arm.use_mirror_x:
layout.operator("armature.extrude_forked")
layout.operator("armature.duplicate_move")
layout.operator("armature.merge")
layout.operator("armature.fill")
layout.operator("armature.delete")
layout.operator("armature.split")
layout.operator("armature.separate")
layout.separator()
layout.operator("armature.subdivide", text="Subdivide")
layout.operator("armature.switch_direction", text="Switch Direction")
layout.separator()
layout.operator_context = 'EXEC_AREA'
layout.operator("armature.symmetrize")
layout.operator("armature.autoside_names", text="AutoName Left/Right").type = 'XAXIS'
layout.operator("armature.autoside_names", text="AutoName Front/Back").type = 'YAXIS'
layout.operator("armature.autoside_names", text="AutoName Top/Bottom").type = 'ZAXIS'
layout.operator("armature.flip_names")
layout.separator()
layout.operator_context = 'INVOKE_DEFAULT'
layout.operator("armature.armature_layers")
layout.operator("armature.bone_layers")
layout.separator()
layout.menu("VIEW3D_MT_edit_armature_parent")
layout.separator()
layout.menu("VIEW3D_MT_bone_options_toggle", text="Bone Settings")
class VIEW3D_MT_armature_specials(Menu):
bl_label = "Specials"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("armature.subdivide", text="Subdivide")
layout.operator("armature.switch_direction", text="Switch Direction")
layout.separator()
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("armature.autoside_names", text="AutoName Left/Right").type = 'XAXIS'
layout.operator("armature.autoside_names", text="AutoName Front/Back").type = 'YAXIS'
layout.operator("armature.autoside_names", text="AutoName Top/Bottom").type = 'ZAXIS'
layout.operator("armature.flip_names", text="Flip Names")
layout.operator("armature.symmetrize")
class VIEW3D_MT_edit_armature_parent(Menu):
bl_label = "Parent"
def draw(self, context):
layout = self.layout
layout.operator("armature.parent_set", text="Make")
layout.operator("armature.parent_clear", text="Clear")
class VIEW3D_MT_edit_armature_roll(Menu):
bl_label = "Bone Roll"
def draw(self, context):
layout = self.layout
layout.operator_menu_enum("armature.calculate_roll", "type")
layout.separator()
layout.operator("transform.transform", text="Set Roll").mode = 'BONE_ROLL'
class VIEW3D_MT_edit_armature_delete(Menu):
bl_label = "Delete"
def draw(self, context):
layout = self.layout
layout.operator("armature.delete", text="Delete Bones")
layout.separator()
layout.operator("armature.dissolve", text="Dissolve")
# ********** GPencil Stroke Edit menu **********
class VIEW3D_MT_edit_gpencil(Menu):
bl_label = "GPencil"
def draw(self, context):
toolsettings = context.tool_settings
layout = self.layout
layout.operator("ed.undo")
layout.operator("ed.redo")
layout.operator("ed.undo_history")
layout.separator()
layout.operator("gpencil.brush_paint", text="Sculpt Strokes").wait_for_input = True
layout.prop_menu_enum(toolsettings.gpencil_sculpt, "tool", text="Sculpt Brush")
layout.separator()
layout.menu("VIEW3D_MT_edit_gpencil_transform")
layout.operator("transform.mirror", text="Mirror")
layout.menu("GPENCIL_MT_snap")
layout.separator()
layout.menu("VIEW3D_MT_object_animation") # NOTE: provides keyingset access...
layout.separator()
layout.menu("VIEW3D_MT_edit_gpencil_delete")
layout.operator("gpencil.duplicate_move", text="Duplicate")
layout.separator()
layout.operator("gpencil.copy", text="Copy")
layout.operator("gpencil.paste", text="Paste")
layout.separator()
layout.prop_menu_enum(toolsettings, "proportional_edit")
layout.prop_menu_enum(toolsettings, "proportional_edit_falloff")
layout.separator()
layout.operator("gpencil.reveal")
layout.operator("gpencil.hide", text="Show Active Layer Only").unselected = True
layout.operator("gpencil.hide", text="Hide Active Layer").unselected = False
layout.separator()
layout.operator_menu_enum("gpencil.move_to_layer", "layer", text="Move to Layer")
layout.operator_menu_enum("gpencil.convert", "type", text="Convert to Geometry...")
class VIEW3D_MT_edit_gpencil_transform(Menu):
bl_label = "Transform"
def draw(self, context):
layout = self.layout
layout.operator("transform.translate")
layout.operator("transform.rotate")
layout.operator("transform.resize", text="Scale")
layout.separator()
layout.operator("transform.bend", text="Bend")
layout.operator("transform.shear", text="Shear")
layout.operator("transform.tosphere", text="To Sphere")
layout.operator("transform.transform", text="Shrink Fatten").mode = 'GPENCIL_SHRINKFATTEN'
# ********** Panel **********
class VIEW3D_PT_grease_pencil(GreasePencilDataPanel, Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
# NOTE: this is just a wrapper around the generic GP Panel
class VIEW3D_PT_view3d_properties(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "View"
@classmethod
def poll(cls, context):
view = context.space_data
return (view)
def draw(self, context):
layout = self.layout
view = context.space_data
col = layout.column()
col.active = bool(view.region_3d.view_perspective != 'CAMERA' or view.region_quadviews)
col.prop(view, "lens")
col.label(text="Lock to Object:")
col.prop(view, "lock_object", text="")
lock_object = view.lock_object
if lock_object:
if lock_object.type == 'ARMATURE':
col.prop_search(view, "lock_bone", lock_object.data,
"edit_bones" if lock_object.mode == 'EDIT'
else "bones",
text="")
else:
col.prop(view, "lock_cursor", text="Lock to Cursor")
col = layout.column()
col.prop(view, "lock_camera")
col = layout.column(align=True)
col.label(text="Clip:")
col.prop(view, "clip_start", text="Start")
col.prop(view, "clip_end", text="End")
subcol = col.column(align=True)
subcol.enabled = not view.lock_camera_and_layers
subcol.label(text="Local Camera:")
subcol.prop(view, "camera", text="")
col = layout.column(align=True)
col.prop(view, "use_render_border")
col.active = view.region_3d.view_perspective != 'CAMERA'
class VIEW3D_PT_view3d_cursor(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "3D Cursor"
@classmethod
def poll(cls, context):
view = context.space_data
return (view is not None)
def draw(self, context):
layout = self.layout
view = context.space_data
layout.column().prop(view, "cursor_location", text="Location")
class VIEW3D_PT_view3d_name(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Item"
@classmethod
def poll(cls, context):
return (context.space_data and context.active_object)
def draw(self, context):
layout = self.layout
ob = context.active_object
row = layout.row()
row.label(text="", icon='OBJECT_DATA')
row.prop(ob, "name", text="")
if ob.type == 'ARMATURE' and ob.mode in {'EDIT', 'POSE'}:
bone = context.active_bone
if bone:
row = layout.row()
row.label(text="", icon='BONE_DATA')
row.prop(bone, "name", text="")
class VIEW3D_PT_view3d_display(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Display"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
view = context.space_data
return (view)
def draw(self, context):
layout = self.layout
view = context.space_data
scene = context.scene
col = layout.column()
col.prop(view, "show_only_render")
col.prop(view, "show_world")
col = layout.column()
display_all = not view.show_only_render
col.active = display_all
col.prop(view, "show_outline_selected")
col.prop(view, "show_all_objects_origin")
col.prop(view, "show_relationship_lines")
col = layout.column()
col.active = display_all
split = col.split(percentage=0.55)
split.prop(view, "show_floor", text="Grid Floor")
row = split.row(align=True)
row.prop(view, "show_axis_x", text="X", toggle=True)
row.prop(view, "show_axis_y", text="Y", toggle=True)
row.prop(view, "show_axis_z", text="Z", toggle=True)
sub = col.column(align=True)
sub.active = (display_all and view.show_floor)
sub.prop(view, "grid_lines", text="Lines")
sub.prop(view, "grid_scale", text="Scale")
subsub = sub.column(align=True)
subsub.active = scene.unit_settings.system == 'NONE'
subsub.prop(view, "grid_subdivisions", text="Subdivisions")
layout.separator()
layout.operator("screen.region_quadview", text="Toggle Quad View")
if view.region_quadviews:
region = view.region_quadviews[2]
col = layout.column()
col.prop(region, "lock_rotation")
row = col.row()
row.enabled = region.lock_rotation
row.prop(region, "show_sync_view")
row = col.row()
row.enabled = region.lock_rotation and region.show_sync_view
row.prop(region, "use_box_clip")
class VIEW3D_PT_view3d_stereo(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Stereoscopy"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
scene = context.scene
multiview = scene.render.use_multiview
return context.space_data and multiview
def draw(self, context):
layout = self.layout
view = context.space_data
basic_stereo = context.scene.render.views_format == 'STEREO_3D'
col = layout.column()
col.row().prop(view, "stereo_3d_camera", expand=True)
col.label(text="Display:")
row = col.row()
row.active = basic_stereo
row.prop(view, "show_stereo_3d_cameras")
row = col.row()
row.active = basic_stereo
split = row.split()
split.prop(view, "show_stereo_3d_convergence_plane")
split = row.split()
split.prop(view, "stereo_3d_convergence_plane_alpha", text="Alpha")
split.active = view.show_stereo_3d_convergence_plane
row = col.row()
split = row.split()
split.prop(view, "show_stereo_3d_volume")
split = row.split()
split.prop(view, "stereo_3d_volume_alpha", text="Alpha")
class VIEW3D_PT_view3d_shading(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Shading"
def draw(self, context):
layout = self.layout
view = context.space_data
scene = context.scene
gs = scene.game_settings
obj = context.object
col = layout.column()
if not scene.render.use_shading_nodes:
col.prop(gs, "material_mode", text="")
if view.viewport_shade == 'SOLID':
col.prop(view, "show_textured_solid")
col.prop(view, "use_matcap")
if view.use_matcap:
col.template_icon_view(view, "matcap_icon")
if view.viewport_shade == 'TEXTURED' or context.mode == 'PAINT_TEXTURE':
if scene.render.use_shading_nodes or gs.material_mode != 'GLSL':
col.prop(view, "show_textured_shadeless")
col.prop(view, "show_backface_culling")
if view.viewport_shade not in {'BOUNDBOX', 'WIREFRAME'}:
if obj and obj.mode == 'EDIT':
col.prop(view, "show_occlude_wire")
fx_settings = view.fx_settings
if view.viewport_shade not in {'BOUNDBOX', 'WIREFRAME'}:
sub = col.column()
sub.active = view.region_3d.view_perspective == 'CAMERA'
sub.prop(fx_settings, "use_dof")
col.prop(fx_settings, "use_ssao", text="Ambient Occlusion")
if fx_settings.use_ssao:
ssao_settings = fx_settings.ssao
subcol = col.column(align=True)
subcol.prop(ssao_settings, "factor")
subcol.prop(ssao_settings, "distance_max")
subcol.prop(ssao_settings, "attenuation")
subcol.prop(ssao_settings, "samples")
subcol.prop(ssao_settings, "color")
class VIEW3D_PT_view3d_motion_tracking(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Motion Tracking"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
view = context.space_data
return (view)
def draw_header(self, context):
view = context.space_data
self.layout.prop(view, "show_reconstruction", text="")
def draw(self, context):
layout = self.layout
view = context.space_data
col = layout.column()
col.active = view.show_reconstruction
col.prop(view, "show_camera_path", text="Camera Path")
col.prop(view, "show_bundle_names", text="3D Marker Names")
col.label(text="Track Type and Size:")
row = col.row(align=True)
row.prop(view, "tracks_draw_type", text="")
row.prop(view, "tracks_draw_size", text="")
class VIEW3D_PT_view3d_meshdisplay(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Mesh Display"
@classmethod
def poll(cls, context):
# The active object check is needed because of local-mode
return (context.active_object and (context.mode == 'EDIT_MESH'))
def draw(self, context):
layout = self.layout
with_freestyle = bpy.app.build_options.freestyle
mesh = context.active_object.data
scene = context.scene
split = layout.split()
col = split.column()
col.label(text="Overlays:")
col.prop(mesh, "show_faces", text="Faces")
col.prop(mesh, "show_edges", text="Edges")
col.prop(mesh, "show_edge_crease", text="Creases")
if with_freestyle:
col.prop(mesh, "show_edge_seams", text="Seams")
layout.prop(mesh, "show_weight")
col = split.column()
col.label()
if not with_freestyle:
col.prop(mesh, "show_edge_seams", text="Seams")
col.prop(mesh, "show_edge_sharp", text="Sharp", text_ctxt=i18n_contexts.plural)
col.prop(mesh, "show_edge_bevel_weight", text="Bevel")
if with_freestyle:
col.prop(mesh, "show_freestyle_edge_marks", text="Edge Marks")
col.prop(mesh, "show_freestyle_face_marks", text="Face Marks")
col = layout.column()
col.separator()
col.label(text="Normals:")
row = col.row(align=True)
row.prop(mesh, "show_normal_vertex", text="", icon='VERTEXSEL')
row.prop(mesh, "show_normal_loop", text="", icon='LOOPSEL')
row.prop(mesh, "show_normal_face", text="", icon='FACESEL')
sub = row.row(align=True)
sub.active = mesh.show_normal_vertex or mesh.show_normal_face or mesh.show_normal_loop
sub.prop(scene.tool_settings, "normal_size", text="Size")
col.separator()
split = layout.split()
col = split.column()
col.label(text="Edge Info:")
col.prop(mesh, "show_extra_edge_length", text="Length")
col.prop(mesh, "show_extra_edge_angle", text="Angle")
col = split.column()
col.label(text="Face Info:")
col.prop(mesh, "show_extra_face_area", text="Area")
col.prop(mesh, "show_extra_face_angle", text="Angle")
if bpy.app.debug:
layout.prop(mesh, "show_extra_indices")
class VIEW3D_PT_view3d_meshstatvis(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Mesh Analysis"
@classmethod
def poll(cls, context):
# The active object check is needed because of local-mode
return (context.active_object and (context.mode == 'EDIT_MESH'))
def draw_header(self, context):
mesh = context.active_object.data
self.layout.prop(mesh, "show_statvis", text="")
def draw(self, context):
layout = self.layout
mesh = context.active_object.data
statvis = context.tool_settings.statvis
layout.active = mesh.show_statvis
layout.prop(statvis, "type")
statvis_type = statvis.type
if statvis_type == 'OVERHANG':
row = layout.row(align=True)
row.prop(statvis, "overhang_min", text="")
row.prop(statvis, "overhang_max", text="")
layout.prop(statvis, "overhang_axis", expand=True)
elif statvis_type == 'THICKNESS':
row = layout.row(align=True)
row.prop(statvis, "thickness_min", text="")
row.prop(statvis, "thickness_max", text="")
layout.prop(statvis, "thickness_samples")
elif statvis_type == 'INTERSECT':
pass
elif statvis_type == 'DISTORT':
row = layout.row(align=True)
row.prop(statvis, "distort_min", text="")
row.prop(statvis, "distort_max", text="")
elif statvis_type == 'SHARP':
row = layout.row(align=True)
row.prop(statvis, "sharp_min", text="")
row.prop(statvis, "sharp_max", text="")
class VIEW3D_PT_view3d_curvedisplay(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Curve Display"
@classmethod
def poll(cls, context):
editmesh = context.mode == 'EDIT_CURVE'
return (editmesh)
def draw(self, context):
layout = self.layout
curve = context.active_object.data
col = layout.column()
row = col.row()
row.prop(curve, "show_handles", text="Handles")
row.prop(curve, "show_normal_face", text="Normals")
col.prop(context.scene.tool_settings, "normal_size", text="Normal Size")
class VIEW3D_PT_background_image(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Background Images"
bl_options = {'DEFAULT_CLOSED'}
def draw_header(self, context):
view = context.space_data
self.layout.prop(view, "show_background_images", text="")
def draw(self, context):
layout = self.layout
view = context.space_data
use_multiview = context.scene.render.use_multiview
col = layout.column()
col.operator("view3d.background_image_add", text="Add Image")
for i, bg in enumerate(view.background_images):
layout.active = view.show_background_images
box = layout.box()
row = box.row(align=True)
row.prop(bg, "show_expanded", text="", emboss=False)
if bg.source == 'IMAGE' and bg.image:
row.prop(bg.image, "name", text="", emboss=False)
elif bg.source == 'MOVIE_CLIP' and bg.clip:
row.prop(bg.clip, "name", text="", emboss=False)
else:
row.label(text="Not Set")
if bg.show_background_image:
row.prop(bg, "show_background_image", text="", emboss=False, icon='RESTRICT_VIEW_OFF')
else:
row.prop(bg, "show_background_image", text="", emboss=False, icon='RESTRICT_VIEW_ON')
row.operator("view3d.background_image_remove", text="", emboss=False, icon='X').index = i
box.prop(bg, "view_axis", text="Axis")
if bg.show_expanded:
row = box.row()
row.prop(bg, "source", expand=True)
has_bg = False
if bg.source == 'IMAGE':
row = box.row()
row.template_ID(bg, "image", open="image.open")
if bg.image is not None:
box.template_image(bg, "image", bg.image_user, compact=True)
has_bg = True
if use_multiview and bg.view_axis in {'CAMERA', 'ALL'}:
box.prop(bg.image, "use_multiview")
column = box.column()
column.active = bg.image.use_multiview
column.label(text="Views Format:")
column.row().prop(bg.image, "views_format", expand=True)
sub = column.box()
sub.active = bg.image.views_format == 'STEREO_3D'
sub.template_image_stereo_3d(bg.image.stereo_3d_format)
elif bg.source == 'MOVIE_CLIP':
box.prop(bg, "use_camera_clip")
column = box.column()
column.active = not bg.use_camera_clip
column.template_ID(bg, "clip", open="clip.open")
if bg.clip:
column.template_movieclip(bg, "clip", compact=True)
if bg.use_camera_clip or bg.clip:
has_bg = True
column = box.column()
column.active = has_bg
column.prop(bg.clip_user, "proxy_render_size", text="")
column.prop(bg.clip_user, "use_render_undistorted")
if has_bg:
col = box.column()
col.prop(bg, "opacity", slider=True)
col.row().prop(bg, "draw_depth", expand=True)
if bg.view_axis in {'CAMERA', 'ALL'}:
col.row().prop(bg, "frame_method", expand=True)
box = col.box()
row = box.row()
row.prop(bg, "offset_x", text="X")
row.prop(bg, "offset_y", text="Y")
row = box.row()
row.prop(bg, "use_flip_x")
row.prop(bg, "use_flip_y")
row = box.row()
if bg.view_axis != 'CAMERA':
row.prop(bg, "rotation")
row.prop(bg, "size")
class VIEW3D_PT_transform_orientations(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Transform Orientations"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
view = context.space_data
return (view)
def draw(self, context):
layout = self.layout
view = context.space_data
orientation = view.current_orientation
row = layout.row(align=True)
row.prop(view, "transform_orientation", text="")
row.operator("transform.create_orientation", text="", icon='ZOOMIN')
if orientation:
row = layout.row(align=True)
row.prop(orientation, "name", text="")
row.operator("transform.delete_orientation", text="", icon='X')
class VIEW3D_PT_etch_a_ton(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Skeleton Sketching"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
scene = context.space_data
ob = context.active_object
return scene and ob and ob.type == 'ARMATURE' and ob.mode == 'EDIT'
def draw_header(self, context):
layout = self.layout
toolsettings = context.scene.tool_settings
layout.prop(toolsettings, "use_bone_sketching", text="")
def draw(self, context):
layout = self.layout
toolsettings = context.scene.tool_settings
col = layout.column()
col.prop(toolsettings, "use_etch_quick")
col.prop(toolsettings, "use_etch_overdraw")
col.separator()
col.prop(toolsettings, "etch_convert_mode")
if toolsettings.etch_convert_mode == 'LENGTH':
col.prop(toolsettings, "etch_length_limit")
elif toolsettings.etch_convert_mode == 'ADAPTIVE':
col.prop(toolsettings, "etch_adaptive_limit")
elif toolsettings.etch_convert_mode == 'FIXED':
col.prop(toolsettings, "etch_subdivision_number")
elif toolsettings.etch_convert_mode == 'RETARGET':
col.prop(toolsettings, "etch_template")
col.prop(toolsettings, "etch_roll_mode")
col.separator()
colsub = col.column(align=True)
colsub.prop(toolsettings, "use_etch_autoname")
sub = colsub.column(align=True)
sub.enabled = not toolsettings.use_etch_autoname
sub.prop(toolsettings, "etch_number")
sub.prop(toolsettings, "etch_side")
col.separator()
col.operator("sketch.convert", text="Convert to Bones")
col.operator("sketch.delete", text="Delete Strokes")
class VIEW3D_PT_context_properties(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Properties"
bl_options = {'DEFAULT_CLOSED'}
def _active_context_member(context):
obj = context.object
if obj:
mode = obj.mode
if mode == 'POSE':
return "active_pose_bone"
elif mode == 'EDIT' and obj.type == 'ARMATURE':
return "active_bone"
else:
return "object"
return ""
@classmethod
def poll(cls, context):
import rna_prop_ui
member = cls._active_context_member(context)
if member:
context_member, member = rna_prop_ui.rna_idprop_context_value(context, member, object)
return context_member and rna_prop_ui.rna_idprop_has_properties(context_member)
return False
def draw(self, context):
import rna_prop_ui
member = VIEW3D_PT_context_properties._active_context_member(context)
if member:
# Draw with no edit button
rna_prop_ui.draw(self.layout, context, member, object, False)
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| gpl-3.0 |
paran0ids0ul/httpie | httpie/output/formatters/xml.py | 51 | 1959 | from __future__ import absolute_import
import re
from xml.etree import ElementTree
from httpie.plugins import FormatterPlugin
DECLARATION_RE = re.compile('<\?xml[^\n]+?\?>', flags=re.I)
DOCTYPE_RE = re.compile('<!DOCTYPE[^\n]+?>', flags=re.I)
DEFAULT_INDENT = 4
def indent(elem, indent_text=' ' * DEFAULT_INDENT):
"""
In-place prettyprint formatter
C.f. http://effbot.org/zone/element-lib.htm#prettyprint
"""
def _indent(elem, level=0):
i = "\n" + level * indent_text
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + indent_text
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
return _indent(elem)
class XMLFormatter(FormatterPlugin):
# TODO: tests
def format_body(self, body, mime):
if 'xml' in mime:
# FIXME: orig NS names get forgotten during the conversion, etc.
try:
root = ElementTree.fromstring(body.encode('utf8'))
except ElementTree.ParseError:
# Ignore invalid XML errors (skips attempting to pretty print)
pass
else:
indent(root)
# Use the original declaration
declaration = DECLARATION_RE.match(body)
doctype = DOCTYPE_RE.match(body)
body = ElementTree.tostring(root, encoding='utf-8')\
.decode('utf8')
if doctype:
body = '%s\n%s' % (doctype.group(0), body)
if declaration:
body = '%s\n%s' % (declaration.group(0), body)
return body
| bsd-3-clause |
rgom/Pydev | plugins/org.python.pydev/pysrc/_pydev_filesystem_encoding.py | 54 | 1104 | def __getfilesystemencoding():
'''
Note: there's a copy of this method in interpreterInfo.py
'''
import sys
try:
ret = sys.getfilesystemencoding()
if not ret:
raise RuntimeError('Unable to get encoding.')
return ret
except:
try:
#Handle Jython
from java.lang import System
env = System.getProperty("os.name").lower()
if env.find('win') != -1:
return 'ISO-8859-1' #mbcs does not work on Jython, so, use a (hopefully) suitable replacement
return 'utf-8'
except:
pass
#Only available from 2.3 onwards.
if sys.platform == 'win32':
return 'mbcs'
return 'utf-8'
def getfilesystemencoding():
try:
ret = __getfilesystemencoding()
#Check if the encoding is actually there to be used!
if hasattr('', 'encode'):
''.encode(ret)
if hasattr('', 'decode'):
''.decode(ret)
return ret
except:
return 'utf-8'
| epl-1.0 |
pfschwartz/openelisglobal-core | liquibase/OE2.9/testCatalogHT_Clinical/scripts/testEntry.py | 18 | 2934 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
test_names = []
test_sections = []
sample_types = []
descriptions = []
print_names = []
old_sort = []
uom = []
hand_edit = ['PaCO2','HCO3']
name_file = open('testName.txt','r')
test_section_file = open("testSections.txt",'r')
sample_type_file = open("sampleType.txt")
uom_file = open("newUOM.txt", 'r')
print_name_file = open("printName.txt")
results = open("output/testResults.txt", 'w')
def convert_to_existing_name( name ):
if name == 'Hemato-immunologie' or name == 'Hémato-immunologie':
return 'Hemto-Immunology'
elif name == 'Biochimie':
return 'Biochemistry'
elif name == 'Hématologie' or name == 'Hematologie':
return 'Hematology'
elif name == 'Immunologie':
return 'Immunology'
elif name == 'Immunologie-Serologie' or name == 'Serology':
return 'Serology-Immunology'
return name
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
def use_uom( uom ):
return len(uom) > 0 and uom != 'n/a'
for line in name_file:
test_names.append(line.strip())
for line in print_name_file:
print_names.append(line.strip())
print_name_file.close()
for line in test_section_file:
test_sections.append(line.strip())
for line in sample_type_file:
sample_types.append(line.strip())
name_file.close()
test_section_file.close()
for line in uom_file:
uom.append(line.strip())
uom_file.close()
sql_head = "INSERT INTO test( id, uom_id, description, reporting_description, is_active, is_reportable, lastupdated, test_section_id, local_abbrev, sort_order, name )\n\t"
results.write("The following should go in Tests.sql Note\n")
sort_count = 10
for row in range(0, len(test_names)):
if len(test_names[row]) > 1:
description = esc_char(test_names[row] + "(" + sample_types[row] + ")")
if description not in descriptions:
descriptions.append(description)
results.write( sql_head)
results.write("VALUES ( nextval( 'test_seq' ) ," )
if use_uom(uom[row]):
results.write(" ( select id from clinlims.unit_of_measure where name='" + uom[row] + "') , ")
else:
results.write(" null , ")
results.write( description + " , " + esc_char(print_names[row]) + " , 'Y' , 'N' , now() , ")
results.write("(select id from clinlims.test_section where name = '" + convert_to_existing_name(test_sections[row]) + "' ) ,")
results.write( esc_char(test_names[row][:20]) + " ," + str(sort_count) + " , " + esc_char(test_names[row]) + " );\n")
sort_count += 10
results.close()
print "Done look for results in testResults.txt" | mpl-2.0 |
emin63/eyap | eyap/core/github_comments.py | 1 | 23057 | """Module for working comments from GitHub backend.
"""
import datetime
import doctest
import time
import collections
import re
import json
import logging
import zipfile
import base64
import requests
from eyap.core import comments, yap_exceptions
def fake_markdown(text, *args, **kw):
"Fake markdown"
_dummy_ignore = args, kw
return text
try:
from markdown import markdown
except ImportError as problem:
logging.warning('\n'.join([
'Could not import markdown package. Will render as plain.',
'Install the markdown package if you want comments rendered as',
'markdown.']))
def markdown(text, *args, **kw):
"Fake markdown by just return input text"
dummy = args, kw
return text
GitHubInfo = collections.namedtuple('GitHubInfo', [
'owner', 'realm', 'user', 'token'])
class GitHubAngry(Exception):
"""Exception to indicate something wrong with github API.
"""
def __init__(self, msg, *args, **kw):
Exception.__init__(self, msg, *args, **kw)
class GitHubCommentGroup(object):
"""Class to represent a group of github comments.
"""
def __init__(self, topic_re, gh_info, max_threads=None, params=None):
"""Initializer.
:arg gh_info: Instance of GitHubInfo describing how to access github
:arg param=None: Optional dict of params to pass to github request
"""
self.topic_re = topic_re
self.gh_info = gh_info
self.max_threads = max_threads
self.base_url = 'https://api.github.com/repos/%s/%s' % (
self.gh_info.owner, self.gh_info.realm)
self.params = dict(params) if params else {}
@staticmethod
def parse_date(my_date):
"""Parse a date into canonical format of datetime.dateime.
:param my_date: Either datetime.datetime or string in
'%Y-%m-%dT%H:%M:%SZ' format.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: A datetime.datetime.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Parse a date and make sure it has no time zone.
"""
if isinstance(my_date, datetime.datetime):
result = my_date
elif isinstance(my_date, str):
result = datetime.datetime.strptime(my_date, '%Y-%m-%dT%H:%M:%SZ')
else:
raise ValueError('Unexpected date format for "%s" of type "%s"' % (
str(my_date), type(my_date)))
assert result.tzinfo is None, 'Unexpected tzinfo for date %s' % (
result)
return result
def get_thread_info(self, enforce_re=True, latest_date=None):
"""Return a json list with information about threads in the group.
:param enforce_re=True: Whether to require titles to match
regexp in self.topic_re.
:param latest_date=None: Optional datetime.datetime for latest
date to consider. Things past this
are ignored.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: List of github items found.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Return a json list with information about threads
in the group. Along with latest_date, this can be used
to show issues.
"""
result = []
my_re = re.compile(self.topic_re)
url = '%s/issues?sort=updated' % (self.base_url)
latest_date = self.parse_date(latest_date) if latest_date else None
while url:
kwargs = {} if not self.gh_info.user else {'auth': (
self.gh_info.user, self.gh_info.token)}
my_req = requests.get(url, params=self.params, **kwargs)
my_json = my_req.json()
for item in my_json:
if (not enforce_re) or my_re.search(item['title']):
idate = self.parse_date(item['updated_at'])
if (latest_date is not None and idate > latest_date):
logging.debug('Skip %s since updated at %s > %s',
item['title'], idate, latest_date)
continue
result.append(item)
if self.max_threads is not None and len(
result) >= self.max_threads:
logging.debug('Stopping after max_threads=%i threads.',
len(result))
return result
url = None
if 'link' in my_req.headers:
link = my_req.headers['link'].split(',')
for thing in link:
potential_url, part = thing.split('; ')
if part == 'rel="next"':
url = potential_url.lstrip(' <').rstrip('> ')
return result
def export(self, out_filename):
"""Export desired threads as a zipfile to out_filename.
"""
with zipfile.ZipFile(out_filename, 'w', zipfile.ZIP_DEFLATED) as arc:
id_list = list(self.get_thread_info())
for num, my_info in enumerate(id_list):
logging.info('Working on item %i : %s', num, my_info['number'])
my_thread = GitHubCommentThread(
self.gh_info.owner, self.gh_info.realm, my_info['title'],
self.gh_info.user, self.gh_info.token,
thread_id=my_info['number'])
csec = my_thread.get_comment_section()
cdict = [item.to_dict() for item in csec.comments]
my_json = json.dumps(cdict)
arc.writestr('%i__%s' % (my_info['number'], my_info['title']),
my_json)
@staticmethod
def _test_export():
"""Simple regression test to make sure export works.
NOTE: this test will hit the github web site unauthenticated. There are
pretty tight rate limits for that so if you are re-running this
test repeatedly, it will fail. To manually verify you can set
user and token and re-run.
>>> user, token = None, None
>>> import tempfile, shlex, os, zipfile
>>> from eyap.core import github_comments
>>> info = github_comments.GitHubInfo('octocat', 'Hello-World', user, token)
>>> group = github_comments.GitHubCommentGroup('.', info, max_threads=3)
>>> fn = tempfile.mktemp(suffix='.zip')
>>> group.export(fn)
>>> zdata = zipfile.ZipFile(fn)
>>> len(zdata.filelist)
3
>>> data = zdata.read(zdata.infolist()[0].filename)
>>> len(data) > 10
True
>>> del zdata
>>> os.remove(fn)
>>> os.path.exists(fn)
False
"""
class GitHubCommentThread(comments.CommentThread):
"""Sub-class of CommentThread using GitHub as a back-end.
"""
__thread_id_cache = {}
# Base url to use in searching for issues.
search_url = 'https://api.github.com/search/issues'
url_extras = '' # useful in testing to add things to URL
def __init__(self, *args, attachment_location='files', **kw):
"""Initializer.
:arg *args, **kw: As for CommentThread.__init__.
"""
comments.CommentThread.__init__(self, *args, **kw)
self.base_url = 'https://api.github.com/repos/%s/%s' % (
self.owner, self.realm)
self.attachment_location = attachment_location
@classmethod
def sleep_if_necessary(cls, user, token, endpoint='search', msg=''):
"""Sleep a little if hit github recently to honor rate limit.
"""
my_kw = {'auth': (user, token)} if user else {}
info = requests.get('https://api.github.com/rate_limit', **my_kw)
info_dict = info.json()
try:
remaining = info_dict['resources'][endpoint]['remaining']
except Exception as problem: # pylint: broad-except
logging.error('Unable to get resources from github; got %s',
str(info_dict))
raise
logging.debug('Search remaining on github is at %s', remaining)
if remaining <= 5:
sleep_time = 120
else:
sleep_time = 0
if sleep_time:
logging.warning('Sleep %i since github requests remaining = %i%s',
sleep_time, remaining, msg)
time.sleep(sleep_time)
return True
return False
@classmethod
def update_cache_key(cls, cache_key, item=None):
"""Get item in cache for cache_key and add item if item is not None.
"""
contents = cls.__thread_id_cache.get(cache_key, None)
if item is not None:
cls.__thread_id_cache[cache_key] = item
return contents
@classmethod
def lookup_cache_key(cls, cache_key):
"Syntactic sugar for update_cache_key(cache_key)"
return cls.update_cache_key(cache_key)
def lookup_thread_id(self):
"""Lookup thread id as required by CommentThread.lookup_thread_id.
This implementation will query GitHub with the required parameters
to try and find the topic for the owner, realm, topic, etc., specified
in init.
"""
query_string = 'in:title "%s" repo:%s/%s' % (
self.topic, self.owner, self.realm)
cache_key = (self.owner, self.realm, self.topic)
result = self.lookup_cache_key(cache_key)
if result is not None:
my_req = self.raw_pull(result)
if my_req.status_code != 200:
result = None # Cached item was no good
elif my_req.json()['title'] != self.topic:
logging.debug('Title must have changed; ignore cache')
result = None
else:
logging.debug('Using cached thread id %s for %s', str(result),
str(cache_key))
return result
data, dummy_hdr = self.raw_search(self.user, self.token, query_string)
if data['total_count'] == 1: # unique match
if data['items'][0]['title'] == self.topic:
result = data['items'][0]['number']
else:
result = None
elif data['total_count'] > 1: # multiple matches since github doesn't
searched_data = [ # have unique search we must filter
item for item in data['items'] if item['title'] == self.topic]
if not searched_data: # no matches
return None
elif len(searched_data) > 1:
raise yap_exceptions.UnableToFindUniqueTopic(
self.topic, data['total_count'], '')
else:
assert len(searched_data) == 1, (
'Confused searching for topic "%s"' % str(self.topic))
result = searched_data[0]['number']
else:
result = None
self.update_cache_key(cache_key, result)
return result
@classmethod
def raw_search(cls, user, token, query, page=0):
"""Do a raw search for github issues.
:arg user: Username to use in accessing github.
:arg token: Token to use in accessing github.
:arg query: String query to use in searching github.
:arg page=0: Number of pages to automatically paginate.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (result, header) representing the result
from github along with the header.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Search for issues on github. If page > 0 then we
will pull out up to page more pages via automatic
pagination. The best way to check if you got the
full results is to check if results['total_count']
matches len(results['items']).
"""
page = int(page)
kwargs = {} if not user else {'auth': (user, token)}
my_url = cls.search_url
data = {'items': []}
while my_url:
cls.sleep_if_necessary(
user, token, msg='\nquery="%s"' % str(query))
my_req = requests.get(my_url, params={'q': query}, **kwargs)
if my_req.status_code != 200:
raise GitHubAngry(
'Bad status code %s finding query %s because %s' % (
my_req.status_code, query, my_req.reason))
my_json = my_req.json()
assert isinstance(my_json['items'], list)
data['items'].extend(my_json.pop('items'))
data.update(my_json)
my_url = None
if page and my_req.links.get('next', False):
my_url = my_req.links['next']['url']
if my_url:
page = page - 1
logging.debug(
'Paginating %s in raw_search (%i more pages allowed)',
my_req.links, page)
return data, my_req.headers
def raw_pull(self, topic):
"""Do a raw pull of data for given topic down from github.
:arg topic: String topic (i.e., issue title).
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: Result of request data from github API.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Encapsulate call that gets raw data from github.
"""
assert topic is not None, 'A topic of None is not allowed'
kwargs = {} if not self.user else {'auth': (self.user, self.token)}
my_req = requests.get('%s/issues/%s' % (
self.base_url, topic), **kwargs)
return my_req
def lookup_comment_list(self):
"""Lookup list of comments for an issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (ISSUE, COMMENTS) where ISSUE is a dict for the
main issue and COMMENTS is a list of comments on the issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Do the work of getting data from github, handling paging,
and so on.
"""
if self.thread_id is None:
return None, None
# Just pulling a single issue here so pagination shouldn't be problem
my_req = self.raw_pull(self.thread_id)
if my_req.status_code != 200:
raise GitHubAngry('Bad status code %s because %s' % (
my_req.status_code, my_req.reason))
issue_json = my_req.json()
comments_url = issue_json['comments_url'] + self.url_extras
kwargs = {} if not self.user else {'auth': (self.user, self.token)}
comments_json = []
while comments_url:
logging.debug('Pulling comments URL: %s', comments_url)
c_req = requests.get(comments_url, **kwargs)
my_json = c_req.json()
assert isinstance(my_json, list)
comments_json.extend(my_json)
comments_url = None
if 'link' in c_req.headers: # need to handle pagination.
logging.debug('Paginating in lookup_comment_list')
link = c_req.headers['link'].split(',')
for thing in link:
potential_url, part = thing.split('; ')
if part == 'rel="next"':
comments_url = potential_url.lstrip(' <').rstrip('> ')
return issue_json, comments_json
def lookup_comments(self, reverse=False):
if self.thread_id is None:
self.thread_id = self.lookup_thread_id()
issue_json, comment_json = self.lookup_comment_list()
if issue_json is None and comment_json is None:
return comments.CommentSection([])
cthread_list = [comments.SingleComment(
issue_json['user']['login'], issue_json['created_at'],
issue_json['body'], issue_json['html_url'],
markup=markdown(issue_json['body'], extensions=[
'fenced_code', 'tables', 'markdown.extensions.nl2br']))]
for item in comment_json:
comment = comments.SingleComment(
item['user']['login'], item['updated_at'], item['body'],
item['html_url'], markup=markdown(
item['body'], extensions=[
'fenced_code', 'tables', 'markdown.extensions.nl2br']))
cthread_list.append(comment)
if reverse:
cthread_list = list(reversed(cthread_list))
return comments.CommentSection(cthread_list)
def add_comment(self, body, allow_create=False, allow_hashes=True,
summary=None, hash_create=False):
"""Implement as required by CommentThread.add_comment.
:arg body: String/text of comment to add.
:arg allow_create=False: Whether to automatically create a new thread
if a thread does not exist (usually by calling
self.create_thread).
:arg allow_hashes=True: Whether to support hashtag mentions of other
topics and automatically insert comment in
body into those topics as well.
*IMPORTANT*: if you recursively call
add_comment to insert the hashes, you should
make sure to set this to False to prevent
infinite hash processing loops.
arg summary=None: Optional summary. If not given, we will
extract one from body automatically if
necessary.
:arg hash_create=False: Whether to allow creating new threads via
hash mentions.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: Response object indicating whether added succesfully.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: This uses the GitHub API to try to add the given comment
to the desired thread.
"""
if self.thread_id is None:
self.thread_id = self.lookup_thread_id()
data = json.dumps({'body': body})
if self.thread_id is None:
if allow_create:
return self.create_thread(body)
else:
raise ValueError(
'Cannot find comment existing comment for %s' % self.topic)
result = requests.post('%s/issues/%s/comments' % (
self.base_url, self.thread_id), data, auth=(self.user, self.token))
if result.status_code != 201:
if result.reason == 'Not Found' and allow_create:
return self.create_thread(body)
else:
raise GitHubAngry(
'Bad status %s add_comment on %s because %s' % (
result.status_code, self.topic, result.reason))
if allow_hashes:
self.process_hashes(body, allow_create=hash_create)
return result
def process_hashes(self, body, allow_create=False):
"""Process any hashes mentioned and push them to related topics.
:arg body: Body of the comment to check for hashes and push out.
:arg allow_create=False: Whether to allow creating new topics
from hash tag mentions.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Look for hashtags matching self.hashtag_re and when found,
add comment from body to those topics.
"""
hash_re = re.compile(self.hashtag_re)
hashes = hash_re.findall(body)
done = {self.topic.lower(): True}
for mention in hashes:
mention = mention.strip('#')
if mention.lower() in done:
continue # Do not duplicate hash mentions
new_thread = self.__class__(
owner=self.owner, realm=self.realm, topic=mention,
user=self.user, token=self.token)
my_comment = '# Hashtag copy from %s:\n%s' % (self.topic, body)
new_thread.add_comment(
my_comment, allow_create=allow_create,
allow_hashes=False) # allow_hashes=False to prevent inf loop
done[mention.lower()] = True
def create_thread(self, body):
data = json.dumps({'body': body, 'title': self.topic})
result = requests.post('%s/issues' % (self.base_url),
data, auth=(self.user, self.token))
if result.status_code != 201:
raise GitHubAngry(
'Bad status %s in create_thread on %s because %s' % (
result.status_code, self.topic, result.reason))
return result
def upload_attachment(self, location, data):
"""Upload attachment as required by CommentThread class.
See CommentThread.upload_attachment for details.
"""
self.validate_attachment_location(location)
content = data.read() if hasattr(data, 'read') else data
orig_content = content
if isinstance(content, bytes):
content = base64.b64encode(orig_content).decode('ascii')
else:
pass # Should be base64 encoded already
apath = '%s/%s' % (self.attachment_location, location)
url = '%s/contents/%s' % (self.base_url, apath)
result = requests.put(
url, auth=(self.user, self.token), data=json.dumps({
'message': 'file attachment %s' % location,
'content': content}))
if result.status_code != 201:
raise ValueError(
"Can't upload attachment %s due to error %s." % (
location, result.reason))
return '[%s](https://github.com/%s/%s/blob/master/%s)' % (
location, self.owner, self.realm, apath)
@staticmethod
def _regr_test_lookup():
"""
NOTE: this test will hit the github web site unauthenticated. There are
pretty tight rate limits for that so if you are re-running this
test repeatedly, it will fail. To manually verify you can set
user and token and re-run.
>>> user, token = None, None
>>> import tempfile, shlex, os, zipfile
>>> from eyap.core import github_comments
>>> t = github_comments.GitHubCommentThread(
... 'emin63', 'eyap', user, token, thread_id='1')
>>> i, c = t.lookup_comment_list()
>>> t.url_extras = '?per_page=1'
>>> more_i, more_c = t.lookup_comment_list()
>>> i == more_i and c == more_c
True
>>> t.url_extras = ''
"""
if __name__ == '__main__':
doctest.testmod()
print('Finished tests')
| bsd-3-clause |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/ctypes/test/test_struct_fields.py | 68 | 1507 | import unittest
from ctypes import *
class StructFieldsTestCase(unittest.TestCase):
# Structure/Union classes must get 'finalized' sooner or
# later, when one of these things happen:
#
# 1. _fields_ is set.
# 2. An instance is created.
# 3. The type is used as field of another Structure/Union.
# 4. The type is subclassed
#
# When they are finalized, assigning _fields_ is no longer allowed.
def test_1_A(self):
class X(Structure):
pass
self.failUnlessEqual(sizeof(X), 0) # not finalized
X._fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_1_B(self):
class X(Structure):
_fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_2(self):
class X(Structure):
pass
X()
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_3(self):
class X(Structure):
pass
class Y(Structure):
_fields_ = [("x", X)] # finalizes X
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_4(self):
class X(Structure):
pass
class Y(X):
pass
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
Y._fields_ = []
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
640Labs/lightblue-0.4 | src/linux/_lightbluecommon.py | 179 | 10831 | # Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
# Defines attributes with common implementations across the different
# platforms.
# public attributes
__all__ = ("L2CAP", "RFCOMM", "OBEX", "BluetoothError", "splitclass")
# Protocol/service class types, used for sockets and advertising services
L2CAP, RFCOMM, OBEX = (10, 11, 12)
class BluetoothError(IOError):
"""
Generic exception raised for Bluetooth errors. This is not raised for
socket-related errors; socket objects raise the socket.error and
socket.timeout exceptions from the standard library socket module.
Note that error codes are currently platform-independent. In particular,
the Mac OS X implementation returns IOReturn error values from the IOKit
framework, and OBEXError codes from <IOBluetooth/OBEX.h> for OBEX operations.
"""
pass
def splitclass(classofdevice):
"""
Splits the given class of device to return a 3-item tuple with the
major service class, major device class and minor device class values.
These values indicate the device's major services and the type of the
device (e.g. mobile phone, laptop, etc.). If you google for
"assigned numbers bluetooth baseband" you might find some documents
that discuss how to extract this information from the class of device.
Example:
>>> splitclass(1057036)
(129, 1, 3)
>>>
"""
if not isinstance(classofdevice, int):
try:
classofdevice = int(classofdevice)
except (TypeError, ValueError):
raise TypeError("Given device class '%s' cannot be split" % \
str(classofdevice))
data = classofdevice >> 2 # skip over the 2 "format" bits
service = data >> 11
major = (data >> 6) & 0x1F
minor = data & 0x3F
return (service, major, minor)
_validbtaddr = None
def _isbtaddr(address):
"""
Returns whether the given address is a valid bluetooth address.
For example, "00:0e:6d:7b:a2:0a" is a valid address.
Returns False if the argument is None or is not a string.
"""
# Define validity regex. Accept either ":" or "-" as separators.
global _validbtaddr
if _validbtaddr is None:
import re
_validbtaddr = re.compile("((\d|[a-f]){2}(:|-)){5}(\d|[a-f]){2}",
re.IGNORECASE)
import types
if not isinstance(address, types.StringTypes):
return False
return _validbtaddr.match(address) is not None
# --------- other attributes ---------
def _joinclass(codtuple):
"""
The opposite of splitclass(). Joins a (service, major, minor) class-of-
device tuple into a whole class of device value.
"""
if not isinstance(codtuple, tuple):
raise TypeError("argument must be tuple, was %s" % type(codtuple))
if len(codtuple) != 3:
raise ValueError("tuple must have 3 items, has %d" % len(codtuple))
serviceclass = codtuple[0] << 2 << 11
majorclass = codtuple[1] << 2 << 6
minorclass = codtuple[2] << 2
return (serviceclass | majorclass | minorclass)
# Docstrings for socket objects.
# Based on std lib socket docs.
_socketdocs = {
"accept":
"""
accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket representing the
connection, and the address of the client. For RFCOMM sockets, the address
info is a pair (hostaddr, channel).
The socket must be bound and listening before calling this method.
""",
"bind":
"""
bind(address)
Bind the socket to a local address. For RFCOMM sockets, the address is a
pair (host, channel); the host must refer to the local host.
A port value of 0 binds the socket to a dynamically assigned port.
(Note that on Mac OS X, the port value must always be 0.)
The socket must not already be bound.
""",
"close":
"""
close()
Close the socket. It cannot be used after this call.
""",
"connect":
"""
connect(address)
Connect the socket to a remote address. The address should be a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
The socket must not be already connected.
""",
"connect_ex":
"""
connect_ex(address) -> errno
This is like connect(address), but returns an error code instead of raising
an exception when an error occurs.
""",
"dup":
"""
dup() -> socket object
Returns a new socket object connected to the same system resource.
""",
"fileno":
"""
fileno() -> integer
Return the integer file descriptor of the socket.
Raises NotImplementedError on Mac OS X and Python For Series 60.
""",
"getpeername":
"""
getpeername() -> address info
Return the address of the remote endpoint. The address info is a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
If the socket has not been connected, socket.error will be raised.
""",
"getsockname":
"""
getsockname() -> address info
Return the address of the local endpoint. The address info is a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
If the socket has not been connected nor bound, this returns the tuple
("00:00:00:00:00:00", 0).
""",
"getsockopt":
"""
getsockopt(level, option[, bufsize]) -> value
Get a socket option. See the Unix manual for level and option.
If a nonzero buffersize argument is given, the return value is a
string of that length; otherwise it is an integer.
Currently support for socket options are platform independent -- i.e.
depends on the underlying Series 60 or BlueZ socket options support.
The Mac OS X implementation currently does not support any options at
all and automatically raises socket.error.
""",
"gettimeout":
"""
gettimeout() -> timeout
Returns the timeout in floating seconds associated with socket
operations. A timeout of None indicates that timeouts on socket
operations are disabled.
Currently not supported on Python For Series 60 implementation, which
will always return None.
""",
"listen":
"""
listen(backlog)
Enable a server to accept connections. The backlog argument must be at
least 1; it specifies the number of unaccepted connection that the system
will allow before refusing new connections.
The socket must not be already listening.
Currently not implemented on Mac OS X.
""",
"makefile":
"""
makefile([mode[, bufsize]]) -> file object
Returns a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function.
""",
"recv":
"""
recv(bufsize[, flags]) -> data
Receive up to bufsize bytes from the socket. For the optional flags
argument, see the Unix manual. When no data is available, block until
at least one byte is available or until the remote end is closed. When
the remote end is closed and all data is read, return the empty string.
Currently the flags argument has no effect on Mac OS X.
""",
"recvfrom":
"""
recvfrom(bufsize[, flags]) -> (data, address info)
Like recv(buffersize, flags) but also return the sender's address info.
""",
"send":
"""
send(data[, flags]) -> count
Send a data string to the socket. For the optional flags
argument, see the Unix manual. Return the number of bytes
sent.
The socket must be connected to a remote socket.
Currently the flags argument has no effect on Mac OS X.
""",
"sendall":
"""
sendall(data[, flags])
Send a data string to the socket. For the optional flags
argument, see the Unix manual. This calls send() repeatedly
until all data is sent. If an error occurs, it's impossible
to tell how much data has been sent.
""",
"sendto":
"""
sendto(data[, flags], address) -> count
Like send(data, flags) but allows specifying the destination address.
For RFCOMM sockets, the address is a pair (hostaddr, channel).
""",
"setblocking":
"""
setblocking(flag)
Set the socket to blocking (flag is true) or non-blocking (false).
setblocking(True) is equivalent to settimeout(None);
setblocking(False) is equivalent to settimeout(0.0).
Initially a socket is in blocking mode. In non-blocking mode, if a
socket operation cannot be performed immediately, socket.error is raised.
The underlying implementation on Python for Series 60 only supports
non-blocking mode for send() and recv(), and ignores it for connect() and
accept().
""",
"setsockopt":
"""
setsockopt(level, option, value)
Set a socket option. See the Unix manual for level and option.
The value argument can either be an integer or a string.
Currently support for socket options are platform independent -- i.e.
depends on the underlying Series 60 or BlueZ socket options support.
The Mac OS X implementation currently does not support any options at
all and automatically raise socket.error.
""",
"settimeout":
"""
settimeout(timeout)
Set a timeout on socket operations. 'timeout' can be a float,
giving in seconds, or None. Setting a timeout of None disables
the timeout feature and is equivalent to setblocking(1).
Setting a timeout of zero is the same as setblocking(0).
If a timeout is set, the connect, accept, send and receive operations will
raise socket.timeout if a timeout occurs.
Raises NotImplementedError on Python For Series 60.
""",
"shutdown":
"""
shutdown(how)
Shut down the reading side of the socket (flag == socket.SHUT_RD), the
writing side of the socket (flag == socket.SHUT_WR), or both ends
(flag == socket.SHUT_RDWR).
"""
}
| gpl-3.0 |
seguijoaquin/taller2-appserver | Appserver/Test/ApiUnitTesting/AppServerApiUnitTests.py | 1 | 4429 | import json
import requests
import unittest
import Utilities
from testLogin import *
from testChat import *
from testBusquedaCandidatos import *
from testPerfil import *
# Precondiciones:
# - La base de datos del servidor no debe contener usuarios.
# - El servidor debe cumplir las especificaciones de la api.#
# - La base de datos tiene un usuario registrado con el nombre "usuario" y password "password"
# Login:
# - La base de datos NO tiene un usuario registrado con el nombre "usuarioFalso" y password "passwordFalso"
# Registro:
# - La base de datos NO tiene un usuario registrado con el nombre "usuarioNuevo" y password "passwordNuevo"
Address = "http://localhost:8000"
#Tal vez mandar las URIs a sus respectivas clases
URILogin = "/login"
URIResgistro = "/registro"
usuarioCorrecto = 'usuario'
passwordCorrecto = 'password'
def crearHeadersDeUsuarioYPassword(usuario, password):
return {'Usuario': usuario,'Password': password, 'TokenGCM': "APA91bFundy4qQCiRnhUbMOcsZEwUBpbuPjBm-wnyBv600MNetW5rp-5Cg32_UA0rY_gmqqQ8pf0Cn-nyqoYrAl6BQTPT3dXNYFuHeWYEIdLz0RwAhN2lGqdoiYnCM2V_O8MonYn3rL6hAtYaIz_b0Jl2xojcKIOqQ" }
class TestRegistro(unittest.TestCase):
headUsuarioYaRegistrado = crearHeadersDeUsuarioYPassword(usuarioCorrecto, 'cualquierPassword')
msgUsuarioYaRegistrado = "Usuario existente"
msgUsuarioNuevoRegistrado = "Se pudo registrar el usuario"
usuarioNuevo = "usuarioNuevo@p.com"
passwordNuevo = "passwordNuevo"
headUsuarioNuevo = crearHeadersDeUsuarioYPassword(usuarioNuevo, passwordNuevo)
def crearBodyUsuario(self, nombre):
body = Utilities.abrirJson("./usuarioCompleto.json")
body["user"]["email"] = nombre
return body
def load_tests(loader, tests, pattern):
suite = TestSuite()
for test_class in test_cases:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
return suite
def test_RegistroDeUnUsuarioYaRegistrado(self):
bodyUsuario = self.crearBodyUsuario(usuarioCorrecto)
request = requests.put(Address + URIResgistro, headers=self.headUsuarioYaRegistrado, data=json.dumps(bodyUsuario) )
self.assertEqual(request.reason,self.msgUsuarioYaRegistrado)
self.assertEqual(request.status_code,400)
def test_RegistroDeUnUsuarioNuevo(self):
bodyUsuario = self.crearBodyUsuario(self.usuarioNuevo)
request = requests.put(Address + URIResgistro, headers=self.headUsuarioNuevo, data=json.dumps(bodyUsuario))
self.assertEqual(request.reason,self.msgUsuarioNuevoRegistrado)
self.assertEqual(request.status_code,201)
class TestRegistroYLogin(unittest.TestCase):
headUsuarioYaRegistrado = crearHeadersDeUsuarioYPassword(usuarioCorrecto, 'cualquierPassword')
msgLoginCorrecto = "Se logueo correctamente"
msgLoginIncorrecto = "No coinciden los datos"
msgUsuarioYaRegistrado = "Usuario existente"
msgUsuarioNuevoRegistrado = "Se pudo registrar el usuario"
def crearBodyUsuario(self, nombre):
body = Utilities.abrirJson("./usuarioCompleto.json")
body["user"]["email"] = nombre
return body
def test_IntentoDeLoginDeUnUsuarioNoRegistrado_SeLoRegistra_SeDebePoderLoguearCorrectamente(self):
#Me intento loguear con un usuario no registrado y no puedo
#headUsuario = crearHeadersDeUsuarioYPassword( "IntentoDeLoginDeUnUsuarioNoRegistrado_SeLoRegistra_SeDebePoderLoguearCorrectamente", "12345")
nombreUsuario = Utilities.transformarEnMail("IntentoDeLoginDeUnUsuarioNoRegistrado")
headUsuario = crearHeadersDeUsuarioYPassword( nombreUsuario, "12345")
request = requests.get(Address + URILogin,headers=headUsuario)
self.assertEqual(request.status_code,400)
self.assertEqual(request.reason,self.msgLoginIncorrecto)
#Lo registro
bodyUsuario = self.crearBodyUsuario(nombreUsuario)
request = requests.put(Address + URIResgistro, headers=headUsuario, data=json.dumps(bodyUsuario))
self.assertEqual(request.status_code,201)
self.assertEqual(request.reason,self.msgUsuarioNuevoRegistrado)
#Ahora me puedo loguear correctamente
request = requests.get(Address + URILogin,headers=headUsuario)
self.assertEqual(request.status_code,200)
self.assertEqual(request.reason,self.msgLoginCorrecto)
self.assertIsNotNone(request.headers["Token"])
if __name__ == '__main__':
unittest.main()
| mit |
jambolo/bitcoin | test/functional/example_test.py | 7 | 8891 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
# Imports should be in PEP8 ordering (std library first, then third party
# libraries then local imports).
from collections import defaultdict
# Avoid wildcard * imports
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv, MSG_BLOCK
from test_framework.p2p import (
P2PInterface,
msg_block,
msg_getdata,
p2p_lock,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
# P2PInterface is a class containing callbacks to be executed when a P2P
# message is received from the node-under-test. Subclass P2PInterface and
# override the on_*() methods if you need custom behaviour.
class BaseNode(P2PInterface):
def __init__(self):
"""Initialize the P2PInterface
Used to initialize custom properties for the Node that aren't
included by default in the base class. Be aware that the P2PInterface
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
Call super().__init__() first for standard initialization and then
initialize custom properties."""
super().__init__()
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
def on_block(self, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
def on_inv(self, message):
"""Override the standard on_inv callback"""
pass
def custom_function():
"""Do some custom behaviour
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
# self.log.info("running custom_function") # Oops! Can't run self.log outside the BitcoinTestFramework
pass
class ExampleTest(BitcoinTestFramework):
# Each functional test is a subclass of the BitcoinTestFramework class.
# Override the set_test_params(), skip_test_if_missing_module(), add_options(), setup_chain(), setup_network()
# and setup_nodes() methods to customize the test setup as required.
def set_test_params(self):
"""Override test parameters for your individual test.
This method must be overridden and num_nodes must be explicitly set."""
self.setup_clean_chain = True
self.num_nodes = 3
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
# self.log.info("I've finished set_test_params") # Oops! Can't run self.log before run_test()
# Use skip_test_if_missing_module() to skip the test if your test requires certain modules to be present.
# This test uses generate which requires wallet to be compiled
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# Use add_options() to add specific command-line options for your test.
# In practice this is not used very much, since the tests are mostly written
# to be run in automated environments without command-line options.
# def add_options()
# pass
# Use setup_chain() to customize the node data directories. In practice
# this is not used very much since the default behaviour is almost always
# fine
# def setup_chain():
# pass
def setup_network(self):
"""Setup the test network topology
Often you won't need to override this, since the standard network topology
(linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
If you do override this method, remember to start the nodes, assign
them to self.nodes, connect them and then sync."""
self.setup_nodes()
# In this test, we're not connecting node2 to node0 or node1. Calls to
# sync_all() should not include node2, since we're not expecting it to
# sync.
self.connect_nodes(0, 1)
self.sync_all(self.nodes[0:2])
# Use setup_nodes() to customize the node start behaviour (for example if
# you don't want to start all nodes at the start of the test).
# def setup_nodes():
# pass
def custom_method(self):
"""Do some custom behaviour for this test
Define it in a method here because you're going to use it repeatedly.
If you think it's useful in general, consider moving it to the base
BitcoinTestFramework class so other tests can use it."""
self.log.info("Running custom_method")
def run_test(self):
"""Main test logic"""
# Create P2P connections will wait for a verack to make sure the connection is fully up
peer_messaging = self.nodes[0].add_p2p_connection(BaseNode())
# Generating a block on one of the nodes will get us out of IBD
blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
self.sync_all(self.nodes[0:2])
# Notice above how we called an RPC by calling a method with the same
# name on the node object. Notice also how we used a keyword argument
# to specify a named RPC argument. Neither of those are defined on the
# node object. Instead there's some __getattr__() magic going on under
# the covers to dispatch unrecognised attribute calls to the RPC
# interface.
# Logs are nice. Do plenty of them. They can be used in place of comments for
# breaking the test into sub-sections.
self.log.info("Starting test!")
self.log.info("Calling a custom function")
custom_function()
self.log.info("Calling a custom method")
self.custom_method()
self.log.info("Create some blocks")
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
height = self.nodes[0].getblockcount()
for _ in range(10):
# Use the blocktools functionality to manually build a block.
# Calling the generate() rpc is easier, but this allows us to exactly
# control the blocks and transactions.
block = create_block(self.tip, create_coinbase(height+1), self.block_time)
block.solve()
block_message = msg_block(block)
# Send message is used to send a P2P message to the node over our P2PInterface
peer_messaging.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
self.block_time += 1
height += 1
self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
self.nodes[1].waitforblockheight(11)
self.log.info("Connect node2 and node1")
self.connect_nodes(1, 2)
self.log.info("Wait for node2 to receive all the blocks from node1")
self.sync_all()
self.log.info("Add P2P connection to node2")
self.nodes[0].disconnect_p2ps()
peer_receiving = self.nodes[2].add_p2p_connection(BaseNode())
self.log.info("Test that node2 propagates all the blocks to us")
getdata_request = msg_getdata()
for block in blocks:
getdata_request.inv.append(CInv(MSG_BLOCK, block))
peer_receiving.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
# P2PInterface objects.
peer_receiving.wait_until(lambda: sorted(blocks) == sorted(list(peer_receiving.block_receive_map.keys())), timeout=5)
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
# and synchronization issues. Note p2p.wait_until() acquires this global lock internally when testing the predicate.
with p2p_lock:
for block in peer_receiving.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main()
| mit |
jeasoft/odoo | addons/website_crm/controllers/main.py | 250 | 5499 | # -*- coding: utf-8 -*-
import base64
import werkzeug
import werkzeug.urls
from openerp import http, SUPERUSER_ID
from openerp.http import request
from openerp.tools.translate import _
class contactus(http.Controller):
def generate_google_map_url(self, street, city, city_zip, country_name):
url = "http://maps.googleapis.com/maps/api/staticmap?center=%s&sensor=false&zoom=8&size=298x298" % werkzeug.url_quote_plus(
'%s, %s %s, %s' % (street, city, city_zip, country_name)
)
return url
@http.route(['/page/website.contactus', '/page/contactus'], type='http', auth="public", website=True)
def contact(self, **kwargs):
values = {}
for field in ['description', 'partner_name', 'phone', 'contact_name', 'email_from', 'name']:
if kwargs.get(field):
values[field] = kwargs.pop(field)
values.update(kwargs=kwargs.items())
return request.website.render("website.contactus", values)
def create_lead(self, request, values, kwargs):
""" Allow to be overrided """
cr, context = request.cr, request.context
return request.registry['crm.lead'].create(cr, SUPERUSER_ID, values, context=dict(context, mail_create_nosubscribe=True))
def preRenderThanks(self, values, kwargs):
""" Allow to be overrided """
company = request.website.company_id
return {
'google_map_url': self.generate_google_map_url(company.street, company.city, company.zip, company.country_id and company.country_id.name_get()[0][1] or ''),
'_values': values,
'_kwargs': kwargs,
}
def get_contactus_response(self, values, kwargs):
values = self.preRenderThanks(values, kwargs)
return request.website.render(kwargs.get("view_callback", "website_crm.contactus_thanks"), values)
@http.route(['/crm/contactus'], type='http', auth="public", website=True)
def contactus(self, **kwargs):
def dict_to_str(title, dictvar):
ret = "\n\n%s" % title
for field in dictvar:
ret += "\n%s" % field
return ret
_TECHNICAL = ['show_info', 'view_from', 'view_callback'] # Only use for behavior, don't stock it
_BLACKLIST = ['id', 'create_uid', 'create_date', 'write_uid', 'write_date', 'user_id', 'active'] # Allow in description
_REQUIRED = ['name', 'contact_name', 'email_from', 'description'] # Could be improved including required from model
post_file = [] # List of file to add to ir_attachment once we have the ID
post_description = [] # Info to add after the message
values = {}
values['medium_id'] = request.registry['ir.model.data'].xmlid_to_res_id(request.cr, SUPERUSER_ID, 'crm.crm_medium_website')
values['section_id'] = request.registry['ir.model.data'].xmlid_to_res_id(request.cr, SUPERUSER_ID, 'website.salesteam_website_sales')
for field_name, field_value in kwargs.items():
if hasattr(field_value, 'filename'):
post_file.append(field_value)
elif field_name in request.registry['crm.lead']._fields and field_name not in _BLACKLIST:
values[field_name] = field_value
elif field_name not in _TECHNICAL: # allow to add some free fields or blacklisted field like ID
post_description.append("%s: %s" % (field_name, field_value))
if "name" not in kwargs and values.get("contact_name"): # if kwarg.name is empty, it's an error, we cannot copy the contact_name
values["name"] = values.get("contact_name")
# fields validation : Check that required field from model crm_lead exists
error = set(field for field in _REQUIRED if not values.get(field))
if error:
values = dict(values, error=error, kwargs=kwargs.items())
return request.website.render(kwargs.get("view_from", "website.contactus"), values)
# description is required, so it is always already initialized
if post_description:
values['description'] += dict_to_str(_("Custom Fields: "), post_description)
if kwargs.get("show_info"):
post_description = []
environ = request.httprequest.headers.environ
post_description.append("%s: %s" % ("IP", environ.get("REMOTE_ADDR")))
post_description.append("%s: %s" % ("USER_AGENT", environ.get("HTTP_USER_AGENT")))
post_description.append("%s: %s" % ("ACCEPT_LANGUAGE", environ.get("HTTP_ACCEPT_LANGUAGE")))
post_description.append("%s: %s" % ("REFERER", environ.get("HTTP_REFERER")))
values['description'] += dict_to_str(_("Environ Fields: "), post_description)
lead_id = self.create_lead(request, dict(values, user_id=False), kwargs)
values.update(lead_id=lead_id)
if lead_id:
for field_value in post_file:
attachment_value = {
'name': field_value.filename,
'res_name': field_value.filename,
'res_model': 'crm.lead',
'res_id': lead_id,
'datas': base64.encodestring(field_value.read()),
'datas_fname': field_value.filename,
}
request.registry['ir.attachment'].create(request.cr, SUPERUSER_ID, attachment_value, context=request.context)
return self.get_contactus_response(values, kwargs)
| agpl-3.0 |
Noviat/odoo | addons/edi/models/edi.py | 277 | 31944 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2014 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import hashlib
import simplejson as json
import logging
import re
import time
import urllib2
import openerp
import openerp.release as release
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
EXTERNAL_ID_PATTERN = re.compile(r'^([^.:]+)(?::([^.]+))?\.(\S+)$')
EDI_VIEW_WEB_URL = '%s/edi/view?db=%s&token=%s'
EDI_PROTOCOL_VERSION = 1 # arbitrary ever-increasing version number
EDI_GENERATOR = 'Odoo' + release.major_version
EDI_GENERATOR_VERSION = release.version_info
def split_external_id(ext_id):
match = EXTERNAL_ID_PATTERN.match(ext_id)
assert match, \
_("'%s' is an invalid external ID") % (ext_id)
return {'module': match.group(1),
'db_uuid': match.group(2),
'id': match.group(3),
'full': match.group(0)}
def safe_unique_id(database_id, model, record_id):
"""Generate a unique string to represent a (database_uuid,model,record_id) pair
without being too long, and with a very low probability of collisions.
"""
msg = "%s-%s-%s-%s" % (time.time(), database_id, model, record_id)
digest = hashlib.sha1(msg).digest()
# fold the sha1 20 bytes digest to 9 bytes
digest = ''.join(chr(ord(x) ^ ord(y)) for (x,y) in zip(digest[:9], digest[9:-2]))
# b64-encode the 9-bytes folded digest to a reasonable 12 chars ASCII ID
digest = base64.urlsafe_b64encode(digest)
return '%s-%s' % (model.replace('.','_'), digest)
def last_update_for(record):
"""Returns the last update timestamp for the given record,
if available, otherwise False
"""
if record._log_access:
record_log = record.get_metadata()[0]
return record_log.get('write_date') or record_log.get('create_date') or False
return False
class edi(osv.AbstractModel):
_name = 'edi.edi'
_description = 'EDI Subsystem'
def new_edi_token(self, cr, uid, record):
"""Return a new, random unique token to identify this model record,
and to be used as token when exporting it as an EDI document.
:param browse_record record: model record for which a token is needed
"""
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
edi_token = hashlib.sha256('%s-%s-%s-%s' % (time.time(), db_uuid, record._name, record.id)).hexdigest()
return edi_token
def serialize(self, edi_documents):
"""Serialize the given EDI document structures (Python dicts holding EDI data),
using JSON serialization.
:param [dict] edi_documents: list of EDI document structures to serialize
:return: UTF-8 encoded string containing the serialized document
"""
serialized_list = json.dumps(edi_documents)
return serialized_list
def generate_edi(self, cr, uid, records, context=None):
"""Generates a final EDI document containing the EDI serialization
of the given records, which should all be instances of a Model
that has the :meth:`~.edi` mixin. The document is not saved in the
database.
:param list(browse_record) records: records to export as EDI
:return: UTF-8 encoded string containing the serialized records
"""
edi_list = []
for record in records:
record_model = record._model
edi_list += record_model.edi_export(cr, uid, [record], context=context)
return self.serialize(edi_list)
def load_edi(self, cr, uid, edi_documents, context=None):
"""Import the given EDI document structures into the system, using
:meth:`~.import_edi`.
:param edi_documents: list of Python dicts containing the deserialized
version of EDI documents
:return: list of (model, id, action) tuple containing the model and database ID
of all records that were imported in the system, plus a suggested
action definition dict for displaying each document.
"""
ir_module = self.pool.get('ir.module.module')
res = []
for edi_document in edi_documents:
module = edi_document.get('__import_module') or edi_document.get('__module')
assert module, 'a `__module` or `__import_module` attribute is required in each EDI document.'
if module != 'base' and not ir_module.search(cr, uid, [('name','=',module),('state','=','installed')]):
raise osv.except_osv(_('Missing Application.'),
_("The document you are trying to import requires the Odoo `%s` application. "
"You can install it by connecting as the administrator and opening the configuration assistant.")%(module,))
model = edi_document.get('__import_model') or edi_document.get('__model')
assert model, 'a `__model` or `__import_model` attribute is required in each EDI document.'
assert model in self.pool, 'model `%s` cannot be found, despite module `%s` being available - '\
'this EDI document seems invalid or unsupported.' % (model,module)
model_obj = self.pool[model]
record_id = model_obj.edi_import(cr, uid, edi_document, context=context)
record_action = model_obj._edi_record_display_action(cr, uid, record_id, context=context)
res.append((model, record_id, record_action))
return res
def deserialize(self, edi_documents_string):
"""Return deserialized version of the given EDI Document string.
:param str|unicode edi_documents_string: UTF-8 string (or unicode) containing
JSON-serialized EDI document(s)
:return: Python object representing the EDI document(s) (usually a list of dicts)
"""
return json.loads(edi_documents_string)
def import_edi(self, cr, uid, edi_document=None, edi_url=None, context=None):
"""Import a JSON serialized EDI Document string into the system, first retrieving it
from the given ``edi_url`` if provided.
:param str|unicode edi: UTF-8 string or unicode containing JSON-serialized
EDI Document to import. Must not be provided if
``edi_url`` is given.
:param str|unicode edi_url: URL where the EDI document (same format as ``edi``)
may be retrieved, without authentication.
"""
if edi_url:
assert not edi_document, 'edi must not be provided if edi_url is given.'
edi_document = urllib2.urlopen(edi_url).read()
assert edi_document, 'EDI Document is empty!'
edi_documents = self.deserialize(edi_document)
return self.load_edi(cr, uid, edi_documents, context=context)
class EDIMixin(object):
"""Mixin class for Model objects that want be exposed as EDI documents.
Classes that inherit from this mixin class should override the
``edi_import()`` and ``edi_export()`` methods to implement their
specific behavior, based on the primitives provided by this mixin."""
def _edi_requires_attributes(self, attributes, edi):
model_name = edi.get('__imported_model') or edi.get('__model') or self._name
for attribute in attributes:
assert edi.get(attribute),\
'Attribute `%s` is required in %s EDI documents.' % (attribute, model_name)
# private method, not RPC-exposed as it creates ir.model.data entries as
# SUPERUSER based on its parameters
def _edi_external_id(self, cr, uid, record, existing_id=None, existing_module=None,
context=None):
"""Generate/Retrieve unique external ID for ``record``.
Each EDI record and each relationship attribute in it is identified by a
unique external ID, which includes the database's UUID, as a way to
refer to any record within any Odoo instance, without conflict.
For Odoo records that have an existing "External ID" (i.e. an entry in
ir.model.data), the EDI unique identifier for this record will be made of
"%s:%s:%s" % (module, database UUID, ir.model.data ID). The database's
UUID MUST NOT contain a colon characters (this is guaranteed by the
UUID algorithm).
For records that have no existing ir.model.data entry, a new one will be
created during the EDI export. It is recommended that the generated external ID
contains a readable reference to the record model, plus a unique value that
hides the database ID. If ``existing_id`` is provided (because it came from
an import), it will be used instead of generating a new one.
If ``existing_module`` is provided (because it came from
an import), it will be used instead of using local values.
:param browse_record record: any browse_record needing an EDI external ID
:param string existing_id: optional existing external ID value, usually coming
from a just-imported EDI record, to be used instead
of generating a new one
:param string existing_module: optional existing module name, usually in the
format ``module:db_uuid`` and coming from a
just-imported EDI record, to be used instead
of local values
:return: the full unique External ID to use for record
"""
ir_model_data = self.pool.get('ir.model.data')
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
ext_id = record.get_external_id()[record.id]
if not ext_id:
ext_id = existing_id or safe_unique_id(db_uuid, record._name, record.id)
# ID is unique cross-db thanks to db_uuid (already included in existing_module)
module = existing_module or "%s:%s" % (record._original_module, db_uuid)
_logger.debug("%s: Generating new external ID `%s.%s` for %r.", self._name,
module, ext_id, record)
ir_model_data.create(cr, openerp.SUPERUSER_ID,
{'name': ext_id,
'model': record._name,
'module': module,
'res_id': record.id})
else:
module, ext_id = ext_id.split('.')
if not ':' in module:
# this record was not previously EDI-imported
if not module == record._original_module:
# this could happen for data records defined in a module that depends
# on the module that owns the model, e.g. purchase defines
# product.pricelist records.
_logger.debug('Mismatching module: expected %s, got %s, for %s.',
module, record._original_module, record)
# ID is unique cross-db thanks to db_uuid
module = "%s:%s" % (module, db_uuid)
return '%s.%s' % (module, ext_id)
def _edi_record_display_action(self, cr, uid, id, context=None):
"""Returns an appropriate action definition dict for displaying
the record with ID ``rec_id``.
:param int id: database ID of record to display
:return: action definition dict
"""
return {'type': 'ir.actions.act_window',
'view_mode': 'form,tree',
'view_type': 'form',
'res_model': self._name,
'res_id': id}
def edi_metadata(self, cr, uid, records, context=None):
"""Return a list containing the boilerplate EDI structures for
exporting ``records`` as EDI, including
the metadata fields
The metadata fields always include::
{
'__model': 'some.model', # record model
'__module': 'module', # require module
'__id': 'module:db-uuid:model.id', # unique global external ID for the record
'__last_update': '2011-01-01 10:00:00', # last update date in UTC!
'__version': 1, # EDI spec version
'__generator' : 'Odoo', # EDI generator
'__generator_version' : [6,1,0], # server version, to check compatibility.
'__attachments_':
}
:param list(browse_record) records: records to export
:return: list of dicts containing boilerplate EDI metadata for each record,
at the corresponding index from ``records``.
"""
ir_attachment = self.pool.get('ir.attachment')
results = []
for record in records:
ext_id = self._edi_external_id(cr, uid, record, context=context)
edi_dict = {
'__id': ext_id,
'__last_update': last_update_for(record),
'__model' : record._name,
'__module' : record._original_module,
'__version': EDI_PROTOCOL_VERSION,
'__generator': EDI_GENERATOR,
'__generator_version': EDI_GENERATOR_VERSION,
}
attachment_ids = ir_attachment.search(cr, uid, [('res_model','=', record._name), ('res_id', '=', record.id)])
if attachment_ids:
attachments = []
for attachment in ir_attachment.browse(cr, uid, attachment_ids, context=context):
attachments.append({
'name' : attachment.name,
'content': attachment.datas, # already base64 encoded!
'file_name': attachment.datas_fname,
})
edi_dict.update(__attachments=attachments)
results.append(edi_dict)
return results
def edi_m2o(self, cr, uid, record, context=None):
"""Return a m2o EDI representation for the given record.
The EDI format for a many2one is::
['unique_external_id', 'Document Name']
"""
edi_ext_id = self._edi_external_id(cr, uid, record, context=context)
relation_model = record._model
name = relation_model.name_get(cr, uid, [record.id], context=context)
name = name and name[0][1] or False
return [edi_ext_id, name]
def edi_o2m(self, cr, uid, records, edi_struct=None, context=None):
"""Return a list representing a O2M EDI relationship containing
all the given records, according to the given ``edi_struct``.
This is basically the same as exporting all the record using
:meth:`~.edi_export` with the given ``edi_struct``, and wrapping
the results in a list.
Example::
[ # O2M fields would be a list of dicts, with their
{ '__id': 'module:db-uuid.id', # own __id.
'__last_update': 'iso date', # update date
'name': 'some name',
#...
},
# ...
],
"""
result = []
for record in records:
result += record._model.edi_export(cr, uid, [record], edi_struct=edi_struct, context=context)
return result
def edi_m2m(self, cr, uid, records, context=None):
"""Return a list representing a M2M EDI relationship directed towards
all the given records.
This is basically the same as exporting all the record using
:meth:`~.edi_m2o` and wrapping the results in a list.
Example::
# M2M fields are exported as a list of pairs, like a list of M2O values
[
['module:db-uuid.id1', 'Task 01: bla bla'],
['module:db-uuid.id2', 'Task 02: bla bla']
]
"""
return [self.edi_m2o(cr, uid, r, context=context) for r in records]
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Returns a list of dicts representing EDI documents containing the
records, and matching the given ``edi_struct``, if provided.
:param edi_struct: if provided, edi_struct should be a dictionary
with a skeleton of the fields to export.
Basic fields can have any key as value, but o2m
values should have a sample skeleton dict as value,
to act like a recursive export.
For example, for a res.partner record::
edi_struct: {
'name': True,
'company_id': True,
'address': {
'name': True,
'street': True,
}
}
Any field not specified in the edi_struct will not
be included in the exported data. Fields with no
value (False) will be omitted in the EDI struct.
If edi_struct is omitted, no fields will be exported
"""
if edi_struct is None:
edi_struct = {}
fields_to_export = edi_struct.keys()
results = []
for record in records:
edi_dict = self.edi_metadata(cr, uid, [record], context=context)[0]
for field_name in fields_to_export:
field = self._fields[field_name]
value = getattr(record, field_name)
if not value and value not in ('', 0):
continue
elif field.type == 'many2one':
value = self.edi_m2o(cr, uid, value, context=context)
elif field.type == 'many2many':
value = self.edi_m2m(cr, uid, value, context=context)
elif field.type == 'one2many':
value = self.edi_o2m(cr, uid, value, edi_struct=edi_struct.get(field_name, {}), context=context)
edi_dict[field_name] = value
results.append(edi_dict)
return results
def _edi_get_object_by_name(self, cr, uid, name, model_name, context=None):
model = self.pool[model_name]
search_results = model.name_search(cr, uid, name, operator='=', context=context)
if len(search_results) == 1:
return model.browse(cr, uid, search_results[0][0], context=context)
return False
def _edi_generate_report_attachment(self, cr, uid, record, context=None):
"""Utility method to generate the first PDF-type report declared for the
current model with ``usage`` attribute set to ``default``.
This must be called explicitly by models that need it, usually
at the beginning of ``edi_export``, before the call to ``super()``."""
ir_actions_report = self.pool.get('ir.actions.report.xml')
matching_reports = ir_actions_report.search(cr, uid, [('model','=',self._name),
('report_type','=','pdf'),
('usage','=','default')])
if matching_reports:
report = ir_actions_report.browse(cr, uid, matching_reports[0])
result, format = openerp.report.render_report(cr, uid, [record.id], report.report_name, {'model': self._name}, context=context)
eval_context = {'time': time, 'object': record}
if not report.attachment or not eval(report.attachment, eval_context):
# no auto-saving of report as attachment, need to do it manually
result = base64.b64encode(result)
file_name = record.name_get()[0][1]
file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', file_name)
file_name += ".pdf"
self.pool.get('ir.attachment').create(cr, uid,
{
'name': file_name,
'datas': result,
'datas_fname': file_name,
'res_model': self._name,
'res_id': record.id,
'type': 'binary'
},
context=context)
def _edi_import_attachments(self, cr, uid, record_id, edi, context=None):
ir_attachment = self.pool.get('ir.attachment')
for attachment in edi.get('__attachments', []):
# check attachment data is non-empty and valid
file_data = None
try:
file_data = base64.b64decode(attachment.get('content'))
except TypeError:
pass
assert file_data, 'Incorrect/Missing attachment file content.'
assert attachment.get('name'), 'Incorrect/Missing attachment name.'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
ir_attachment.create(cr, uid, {'name': attachment['name'],
'datas_fname': attachment['file_name'],
'res_model': self._name,
'res_id': record_id,
# should be pure 7bit ASCII
'datas': str(attachment['content']),
}, context=context)
def _edi_get_object_by_external_id(self, cr, uid, external_id, model, context=None):
"""Returns browse_record representing object identified by the model and external_id,
or None if no record was found with this external id.
:param external_id: fully qualified external id, in the EDI form
``module:db_uuid:identifier``.
:param model: model name the record belongs to.
"""
ir_model_data = self.pool.get('ir.model.data')
# external_id is expected to have the form: ``module:db_uuid:model.random_name``
ext_id_members = split_external_id(external_id)
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
module = ext_id_members['module']
ext_id = ext_id_members['id']
modules = []
ext_db_uuid = ext_id_members['db_uuid']
if ext_db_uuid:
modules.append('%s:%s' % (module, ext_id_members['db_uuid']))
if ext_db_uuid is None or ext_db_uuid == db_uuid:
# local records may also be registered without the db_uuid
modules.append(module)
data_ids = ir_model_data.search(cr, uid, [('model','=',model),
('name','=',ext_id),
('module','in',modules)])
if data_ids:
model = self.pool[model]
data = ir_model_data.browse(cr, uid, data_ids[0], context=context)
if model.exists(cr, uid, [data.res_id]):
return model.browse(cr, uid, data.res_id, context=context)
# stale external-id, cleanup to allow re-import, as the corresponding record is gone
ir_model_data.unlink(cr, 1, [data_ids[0]])
def edi_import_relation(self, cr, uid, model, value, external_id, context=None):
"""Imports a M2O/M2M relation EDI specification ``[external_id,value]`` for the
given model, returning the corresponding database ID:
* First, checks if the ``external_id`` is already known, in which case the corresponding
database ID is directly returned, without doing anything else;
* If the ``external_id`` is unknown, attempts to locate an existing record
with the same ``value`` via name_search(). If found, the given external_id will
be assigned to this local record (in addition to any existing one)
* If previous steps gave no result, create a new record with the given
value in the target model, assign it the given external_id, and return
the new database ID
:param str value: display name of the record to import
:param str external_id: fully-qualified external ID of the record
:return: database id of newly-imported or pre-existing record
"""
_logger.debug("%s: Importing EDI relationship [%r,%r]", model, external_id, value)
target = self._edi_get_object_by_external_id(cr, uid, external_id, model, context=context)
need_new_ext_id = False
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - ID not found, trying name_get.",
self._name, external_id, value)
target = self._edi_get_object_by_name(cr, uid, value, model, context=context)
need_new_ext_id = True
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - name not found, creating it.",
self._name, external_id, value)
# also need_new_ext_id here, but already been set above
model = self.pool[model]
res_id, _ = model.name_create(cr, uid, value, context=context)
target = model.browse(cr, uid, res_id, context=context)
else:
_logger.debug("%s: Importing EDI relationship [%r,%r] - record already exists with ID %s, using it",
self._name, external_id, value, target.id)
if need_new_ext_id:
ext_id_members = split_external_id(external_id)
# module name is never used bare when creating ir.model.data entries, in order
# to avoid being taken as part of the module's data, and cleanup up at next update
module = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
# create a new ir.model.data entry for this value
self._edi_external_id(cr, uid, target, existing_id=ext_id_members['id'], existing_module=module, context=context)
return target.id
def edi_import(self, cr, uid, edi, context=None):
"""Imports a dict representing an EDI document into the system.
:param dict edi: EDI document to import
:return: the database ID of the imported record
"""
assert self._name == edi.get('__import_model') or \
('__import_model' not in edi and self._name == edi.get('__model')), \
"EDI Document Model and current model do not match: '%s' (EDI) vs '%s' (current)." % \
(edi.get('__model'), self._name)
# First check the record is now already known in the database, in which case it is ignored
ext_id_members = split_external_id(edi['__id'])
existing = self._edi_get_object_by_external_id(cr, uid, ext_id_members['full'], self._name, context=context)
if existing:
_logger.info("'%s' EDI Document with ID '%s' is already known, skipping import!", self._name, ext_id_members['full'])
return existing.id
record_values = {}
o2m_todo = {} # o2m values are processed after their parent already exists
for field_name, field_value in edi.iteritems():
# skip metadata and empty fields
if field_name.startswith('__') or field_value is None or field_value is False:
continue
field = self._fields.get(field_name)
if not field:
_logger.warning('Ignoring unknown field `%s` when importing `%s` EDI document.', field_name, self._name)
continue
# skip function/related fields
if not field.store:
_logger.warning("Unexpected function field value is found in '%s' EDI document: '%s'." % (self._name, field_name))
continue
relation_model = field.comodel_name
if field.type == 'many2one':
record_values[field_name] = self.edi_import_relation(cr, uid, relation_model,
field_value[1], field_value[0],
context=context)
elif field.type == 'many2many':
record_values[field_name] = [self.edi_import_relation(cr, uid, relation_model, m2m_value[1],
m2m_value[0], context=context)
for m2m_value in field_value]
elif field.type == 'one2many':
# must wait until parent report is imported, as the parent relationship
# is often required in o2m child records
o2m_todo[field_name] = field_value
else:
record_values[field_name] = field_value
module_ref = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
record_id = self.pool.get('ir.model.data')._update(cr, uid, self._name, module_ref, record_values,
xml_id=ext_id_members['id'], context=context)
record_display, = self.name_get(cr, uid, [record_id], context=context)
# process o2m values, connecting them to their parent on-the-fly
for o2m_field, o2m_value in o2m_todo.iteritems():
field = self._fields[o2m_field]
dest_model = self.pool[field.comodel_name]
dest_field = field.inverse_name
for o2m_line in o2m_value:
# link to parent record: expects an (ext_id, name) pair
o2m_line[dest_field] = (ext_id_members['full'], record_display[1])
dest_model.edi_import(cr, uid, o2m_line, context=context)
# process the attachments, if any
self._edi_import_attachments(cr, uid, record_id, edi, context=context)
return record_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Salat-Cx65/python-for-android | python-build/python-libs/gdata/build/lib/gdata/Crypto/PublicKey/qNEW.py | 228 | 5545 | #
# qNEW.py : The q-NEW signature algorithm.
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $"
from Crypto.PublicKey import pubkey
from Crypto.Util.number import *
from Crypto.Hash import SHA
class error (Exception):
pass
HASHBITS = 160 # Size of SHA digests
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a qNEW key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=qNEWobj()
# Generate prime numbers p and q. q is a 160-bit prime
# number. p is another prime number (the modulus) whose bit
# size is chosen by the caller, and is generated so that p-1
# is a multiple of q.
#
# Note that only a single seed is used to
# generate p and q; if someone generates a key for you, you can
# use the seed to duplicate the key generation. This can
# protect you from someone generating values of p,q that have
# some special form that's easy to break.
if progress_func:
progress_func('p,q\n')
while (1):
obj.q = getPrime(160, randfunc)
# assert pow(2, 159L)<obj.q<pow(2, 160L)
obj.seed = S = long_to_bytes(obj.q)
C, N, V = 0, 2, {}
# Compute b and n such that bits-1 = b + n*HASHBITS
n= (bits-1) / HASHBITS
b= (bits-1) % HASHBITS ; powb=2L << b
powL1=pow(long(2), bits-1)
while C<4096:
# The V array will contain (bits-1) bits of random
# data, that are assembled to produce a candidate
# value for p.
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
p = V[n] % powb
for k in range(n-1, -1, -1):
p= (p << long(HASHBITS) )+V[k]
p = p+powL1 # Ensure the high bit is set
# Ensure that p-1 is a multiple of q
p = p - (p % (2*obj.q)-1)
# If p is still the right size, and it's prime, we're done!
if powL1<=p and isPrime(p):
break
# Otherwise, increment the counter and try again
C, N = C+1, N+n+1
if C<4096:
break # Ended early, so exit the while loop
if progress_func:
progress_func('4096 values of p tried\n')
obj.p = p
power=(p-1)/obj.q
# Next parameter: g = h**((p-1)/q) mod p, such that h is any
# number <p-1, and g>1. g is kept; h can be discarded.
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
# x is the private key information, and is
# just a random number between 0 and q.
# y=g**x mod p, and is part of the public information.
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y=x, pow(g, x, p)
return obj
# Construct a qNEW object
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)
Construct a qNEW object from a 4- or 5-tuple of numbers.
"""
obj=qNEWobj()
if len(tuple) not in [4,5]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class qNEWobj(pubkey.pubkey):
keydata=['p', 'q', 'g', 'y', 'x']
def _sign(self, M, K=''):
if (self.q<=K):
raise error, 'K is greater than q'
if M<0:
raise error, 'Illegal value of M (<0)'
if M>=pow(2,161L):
raise error, 'Illegal value of M (too large)'
r=pow(self.g, K, self.p) % self.q
s=(K- (r*M*self.x % self.q)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
if M<0:
raise error, 'Illegal value of M (<0)'
if M<=0 or M>=pow(2,161L):
return 0
v1 = pow(self.g, s, self.p)
v2 = pow(self.y, M*r, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return 160
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
return hasattr(self, 'x')
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.p, self.q, self.g, self.y))
object = qNEWobj
| apache-2.0 |
nirmeshk/oh-mainline | vendor/packages/Django/tests/regressiontests/generic_views/list.py | 41 | 9756 | from __future__ import absolute_import
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import override_settings
from django.views.generic.base import View
from django.utils.encoding import force_str
from .models import Author, Artist
class ListViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertTrue(isinstance(res.context['view'], View))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
| agpl-3.0 |
Jeebeevee/DouweBot_JJ15 | plugins_org/oblique.py | 23 | 1509 | import time
from util import hook, http
commands_modtime = 0
commands = {}
def update_commands(force=False):
global commands_modtime, commands
if force or time.time() - commands_modtime > 60 * 60: # update hourly
h = http.get_html('http://wiki.github.com/nslater/oblique/')
lines = h.xpath('//li/text()')
commands = {}
for line in lines:
if not line.strip():
continue
if line.strip().find(" ") == -1:
continue
name, url = line.strip().split(None, 1)
commands[name] = url
commands_modtime = time.time()
@hook.command('o')
@hook.command
def oblique(inp, nick='', chan=''):
'.o/.oblique <command> <args> -- runs <command> using oblique web'
' services. see http://wiki.github.com/nslater/oblique/'
update_commands()
if ' ' in inp:
command, args = inp.split(None, 1)
else:
command = inp
args = ''
command = command.lower()
if command == 'refresh':
update_commands(True)
return '%d commands loaded.' % len(commands)
if command in commands:
url = commands[command]
url = url.replace('${nick}', nick)
url = url.replace('${sender}', chan)
url = url.replace('${args}', http.quote(args.encode('utf8')))
try:
return http.get(url)
except http.HTTPError, e:
return "http error %d" % e.code
else:
return 'no such service'
| unlicense |
johnw424/airflow | airflow/example_dags/example_http_operator.py | 31 | 2157 | """
### Example HTTP operator and sensor
"""
from airflow import DAG
from airflow.operators import SimpleHttpOperator, HttpSensor
from datetime import datetime, timedelta
import json
seven_days_ago = datetime.combine(datetime.today() - timedelta(7),
datetime.min.time())
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': seven_days_ago,
'email': ['airflow@airflow.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
dag = DAG('example_http_operator', default_args=default_args)
dag.doc_md = __doc__
# t1, t2 and t3 are examples of tasks created by instatiating operators
t1 = SimpleHttpOperator(
task_id='post_op',
endpoint='api/v1.0/nodes',
data=json.dumps({"priority": 5}),
headers={"Content-Type": "application/json"},
response_check=lambda response: True if len(response.json()) == 0 else False,
dag=dag)
t5 = SimpleHttpOperator(
task_id='post_op_formenc',
endpoint='nodes/url',
data="name=Joe",
headers={"Content-Type": "application/x-www-form-urlencoded"},
dag=dag)
t2 = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='api/v1.0/nodes',
data={"param1": "value1", "param2": "value2"},
headers={},
dag=dag)
t3 = SimpleHttpOperator(
task_id='put_op',
method='PUT',
endpoint='api/v1.0/nodes',
data=json.dumps({"priority": 5}),
headers={"Content-Type": "application/json"},
dag=dag)
t4 = SimpleHttpOperator(
task_id='del_op',
method='DELETE',
endpoint='api/v1.0/nodes',
data="some=data",
headers={"Content-Type": "application/x-www-form-urlencoded"},
dag=dag)
sensor = HttpSensor(
task_id='http_sensor_check',
conn_id='http_default',
endpoint='api/v1.0/apps',
params={},
headers={"Content-Type": "application/json"},
response_check=lambda response: True if "collation" in response.content else False,
poke_interval=5,
dag=dag)
t1.set_upstream(sensor)
t2.set_upstream(t1)
t3.set_upstream(t2)
t4.set_upstream(t3)
t5.set_upstream(t4)
| apache-2.0 |
RandallDW/Aruba_plugin | plugins/org.python.pydev/pysrc/pydevd_attach_to_process/winappdbg/textio.py | 102 | 62691 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Functions for text input, logging or text output.
@group Helpers:
HexDump,
HexInput,
HexOutput,
Color,
Table,
Logger
DebugLog
CrashDump
"""
__revision__ = "$Id$"
__all__ = [
'HexDump',
'HexInput',
'HexOutput',
'Color',
'Table',
'CrashDump',
'DebugLog',
'Logger',
]
import sys
from winappdbg import win32
from winappdbg import compat
from winappdbg.util import StaticClass
import re
import time
import struct
import traceback
#------------------------------------------------------------------------------
class HexInput (StaticClass):
"""
Static functions for user input parsing.
The counterparts for each method are in the L{HexOutput} class.
"""
@staticmethod
def integer(token):
"""
Convert numeric strings into integers.
@type token: str
@param token: String to parse.
@rtype: int
@return: Parsed integer value.
"""
token = token.strip()
neg = False
if token.startswith(compat.b('-')):
token = token[1:]
neg = True
if token.startswith(compat.b('0x')):
result = int(token, 16) # hexadecimal
elif token.startswith(compat.b('0b')):
result = int(token[2:], 2) # binary
elif token.startswith(compat.b('0o')):
result = int(token, 8) # octal
else:
try:
result = int(token) # decimal
except ValueError:
result = int(token, 16) # hexadecimal (no "0x" prefix)
if neg:
result = -result
return result
@staticmethod
def address(token):
"""
Convert numeric strings into memory addresses.
@type token: str
@param token: String to parse.
@rtype: int
@return: Parsed integer value.
"""
return int(token, 16)
@staticmethod
def hexadecimal(token):
"""
Convert a strip of hexadecimal numbers into binary data.
@type token: str
@param token: String to parse.
@rtype: str
@return: Parsed string value.
"""
token = ''.join([ c for c in token if c.isalnum() ])
if len(token) % 2 != 0:
raise ValueError("Missing characters in hex data")
data = ''
for i in compat.xrange(0, len(token), 2):
x = token[i:i+2]
d = int(x, 16)
s = struct.pack('<B', d)
data += s
return data
@staticmethod
def pattern(token):
"""
Convert an hexadecimal search pattern into a POSIX regular expression.
For example, the following pattern::
"B8 0? ?0 ?? ??"
Would match the following data::
"B8 0D F0 AD BA" # mov eax, 0xBAADF00D
@type token: str
@param token: String to parse.
@rtype: str
@return: Parsed string value.
"""
token = ''.join([ c for c in token if c == '?' or c.isalnum() ])
if len(token) % 2 != 0:
raise ValueError("Missing characters in hex data")
regexp = ''
for i in compat.xrange(0, len(token), 2):
x = token[i:i+2]
if x == '??':
regexp += '.'
elif x[0] == '?':
f = '\\x%%.1x%s' % x[1]
x = ''.join([ f % c for c in compat.xrange(0, 0x10) ])
regexp = '%s[%s]' % (regexp, x)
elif x[1] == '?':
f = '\\x%s%%.1x' % x[0]
x = ''.join([ f % c for c in compat.xrange(0, 0x10) ])
regexp = '%s[%s]' % (regexp, x)
else:
regexp = '%s\\x%s' % (regexp, x)
return regexp
@staticmethod
def is_pattern(token):
"""
Determine if the given argument is a valid hexadecimal pattern to be
used with L{pattern}.
@type token: str
@param token: String to parse.
@rtype: bool
@return:
C{True} if it's a valid hexadecimal pattern, C{False} otherwise.
"""
return re.match(r"^(?:[\?A-Fa-f0-9][\?A-Fa-f0-9]\s*)+$", token)
@classmethod
def integer_list_file(cls, filename):
"""
Read a list of integers from a file.
The file format is:
- # anywhere in the line begins a comment
- leading and trailing spaces are ignored
- empty lines are ignored
- integers can be specified as:
- decimal numbers ("100" is 100)
- hexadecimal numbers ("0x100" is 256)
- binary numbers ("0b100" is 4)
- octal numbers ("0100" is 64)
@type filename: str
@param filename: Name of the file to read.
@rtype: list( int )
@return: List of integers read from the file.
"""
count = 0
result = list()
fd = open(filename, 'r')
for line in fd:
count = count + 1
if '#' in line:
line = line[ : line.find('#') ]
line = line.strip()
if line:
try:
value = cls.integer(line)
except ValueError:
e = sys.exc_info()[1]
msg = "Error in line %d of %s: %s"
msg = msg % (count, filename, str(e))
raise ValueError(msg)
result.append(value)
return result
@classmethod
def string_list_file(cls, filename):
"""
Read a list of string values from a file.
The file format is:
- # anywhere in the line begins a comment
- leading and trailing spaces are ignored
- empty lines are ignored
- strings cannot span over a single line
@type filename: str
@param filename: Name of the file to read.
@rtype: list
@return: List of integers and strings read from the file.
"""
count = 0
result = list()
fd = open(filename, 'r')
for line in fd:
count = count + 1
if '#' in line:
line = line[ : line.find('#') ]
line = line.strip()
if line:
result.append(line)
return result
@classmethod
def mixed_list_file(cls, filename):
"""
Read a list of mixed values from a file.
The file format is:
- # anywhere in the line begins a comment
- leading and trailing spaces are ignored
- empty lines are ignored
- strings cannot span over a single line
- integers can be specified as:
- decimal numbers ("100" is 100)
- hexadecimal numbers ("0x100" is 256)
- binary numbers ("0b100" is 4)
- octal numbers ("0100" is 64)
@type filename: str
@param filename: Name of the file to read.
@rtype: list
@return: List of integers and strings read from the file.
"""
count = 0
result = list()
fd = open(filename, 'r')
for line in fd:
count = count + 1
if '#' in line:
line = line[ : line.find('#') ]
line = line.strip()
if line:
try:
value = cls.integer(line)
except ValueError:
value = line
result.append(value)
return result
#------------------------------------------------------------------------------
class HexOutput (StaticClass):
"""
Static functions for user output parsing.
The counterparts for each method are in the L{HexInput} class.
@type integer_size: int
@cvar integer_size: Default size in characters of an outputted integer.
This value is platform dependent.
@type address_size: int
@cvar address_size: Default Number of bits of the target architecture.
This value is platform dependent.
"""
integer_size = (win32.SIZEOF(win32.DWORD) * 2) + 2
address_size = (win32.SIZEOF(win32.SIZE_T) * 2) + 2
@classmethod
def integer(cls, integer, bits = None):
"""
@type integer: int
@param integer: Integer.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexOutput.integer_size}
@rtype: str
@return: Text output.
"""
if bits is None:
integer_size = cls.integer_size
else:
integer_size = (bits / 4) + 2
if integer >= 0:
return ('0x%%.%dx' % (integer_size - 2)) % integer
return ('-0x%%.%dx' % (integer_size - 2)) % -integer
@classmethod
def address(cls, address, bits = None):
"""
@type address: int
@param address: Memory address.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexOutput.address_size}
@rtype: str
@return: Text output.
"""
if bits is None:
address_size = cls.address_size
bits = win32.bits
else:
address_size = (bits / 4) + 2
if address < 0:
address = ((2 ** bits) - 1) ^ ~address
return ('0x%%.%dx' % (address_size - 2)) % address
@staticmethod
def hexadecimal(data):
"""
Convert binary data to a string of hexadecimal numbers.
@type data: str
@param data: Binary data.
@rtype: str
@return: Hexadecimal representation.
"""
return HexDump.hexadecimal(data, separator = '')
@classmethod
def integer_list_file(cls, filename, values, bits = None):
"""
Write a list of integers to a file.
If a file of the same name exists, it's contents are replaced.
See L{HexInput.integer_list_file} for a description of the file format.
@type filename: str
@param filename: Name of the file to write.
@type values: list( int )
@param values: List of integers to write to the file.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexOutput.integer_size}
"""
fd = open(filename, 'w')
for integer in values:
print >> fd, cls.integer(integer, bits)
fd.close()
@classmethod
def string_list_file(cls, filename, values):
"""
Write a list of strings to a file.
If a file of the same name exists, it's contents are replaced.
See L{HexInput.string_list_file} for a description of the file format.
@type filename: str
@param filename: Name of the file to write.
@type values: list( int )
@param values: List of strings to write to the file.
"""
fd = open(filename, 'w')
for string in values:
print >> fd, string
fd.close()
@classmethod
def mixed_list_file(cls, filename, values, bits):
"""
Write a list of mixed values to a file.
If a file of the same name exists, it's contents are replaced.
See L{HexInput.mixed_list_file} for a description of the file format.
@type filename: str
@param filename: Name of the file to write.
@type values: list( int )
@param values: List of mixed values to write to the file.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexOutput.integer_size}
"""
fd = open(filename, 'w')
for original in values:
try:
parsed = cls.integer(original, bits)
except TypeError:
parsed = repr(original)
print >> fd, parsed
fd.close()
#------------------------------------------------------------------------------
class HexDump (StaticClass):
"""
Static functions for hexadecimal dumps.
@type integer_size: int
@cvar integer_size: Size in characters of an outputted integer.
This value is platform dependent.
@type address_size: int
@cvar address_size: Size in characters of an outputted address.
This value is platform dependent.
"""
integer_size = (win32.SIZEOF(win32.DWORD) * 2)
address_size = (win32.SIZEOF(win32.SIZE_T) * 2)
@classmethod
def integer(cls, integer, bits = None):
"""
@type integer: int
@param integer: Integer.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.integer_size}
@rtype: str
@return: Text output.
"""
if bits is None:
integer_size = cls.integer_size
else:
integer_size = bits / 4
return ('%%.%dX' % integer_size) % integer
@classmethod
def address(cls, address, bits = None):
"""
@type address: int
@param address: Memory address.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text output.
"""
if bits is None:
address_size = cls.address_size
bits = win32.bits
else:
address_size = bits / 4
if address < 0:
address = ((2 ** bits) - 1) ^ ~address
return ('%%.%dX' % address_size) % address
@staticmethod
def printable(data):
"""
Replace unprintable characters with dots.
@type data: str
@param data: Binary data.
@rtype: str
@return: Printable text.
"""
result = ''
for c in data:
if 32 < ord(c) < 128:
result += c
else:
result += '.'
return result
@staticmethod
def hexadecimal(data, separator = ''):
"""
Convert binary data to a string of hexadecimal numbers.
@type data: str
@param data: Binary data.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each character.
@rtype: str
@return: Hexadecimal representation.
"""
return separator.join( [ '%.2x' % ord(c) for c in data ] )
@staticmethod
def hexa_word(data, separator = ' '):
"""
Convert binary data to a string of hexadecimal WORDs.
@type data: str
@param data: Binary data.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each WORD.
@rtype: str
@return: Hexadecimal representation.
"""
if len(data) & 1 != 0:
data += '\0'
return separator.join( [ '%.4x' % struct.unpack('<H', data[i:i+2])[0] \
for i in compat.xrange(0, len(data), 2) ] )
@staticmethod
def hexa_dword(data, separator = ' '):
"""
Convert binary data to a string of hexadecimal DWORDs.
@type data: str
@param data: Binary data.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each DWORD.
@rtype: str
@return: Hexadecimal representation.
"""
if len(data) & 3 != 0:
data += '\0' * (4 - (len(data) & 3))
return separator.join( [ '%.8x' % struct.unpack('<L', data[i:i+4])[0] \
for i in compat.xrange(0, len(data), 4) ] )
@staticmethod
def hexa_qword(data, separator = ' '):
"""
Convert binary data to a string of hexadecimal QWORDs.
@type data: str
@param data: Binary data.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each QWORD.
@rtype: str
@return: Hexadecimal representation.
"""
if len(data) & 7 != 0:
data += '\0' * (8 - (len(data) & 7))
return separator.join( [ '%.16x' % struct.unpack('<Q', data[i:i+8])[0]\
for i in compat.xrange(0, len(data), 8) ] )
@classmethod
def hexline(cls, data, separator = ' ', width = None):
"""
Dump a line of hexadecimal numbers from binary data.
@type data: str
@param data: Binary data.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each character.
@type width: int
@param width:
(Optional) Maximum number of characters to convert per text line.
This value is also used for padding.
@rtype: str
@return: Multiline output text.
"""
if width is None:
fmt = '%s %s'
else:
fmt = '%%-%ds %%-%ds' % ((len(separator)+2)*width-1, width)
return fmt % (cls.hexadecimal(data, separator), cls.printable(data))
@classmethod
def hexblock(cls, data, address = None,
bits = None,
separator = ' ',
width = 8):
"""
Dump a block of hexadecimal numbers from binary data.
Also show a printable text version of the data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each character.
@type width: int
@param width:
(Optional) Maximum number of characters to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexline, data, address, bits, width,
cb_kwargs = {'width' : width, 'separator' : separator})
@classmethod
def hexblock_cb(cls, callback, data, address = None,
bits = None,
width = 16,
cb_args = (),
cb_kwargs = {}):
"""
Dump a block of binary data using a callback function to convert each
line of text.
@type callback: function
@param callback: Callback function to convert each line of data.
@type data: str
@param data: Binary data.
@type address: str
@param address:
(Optional) Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type cb_args: str
@param cb_args:
(Optional) Arguments to pass to the callback function.
@type cb_kwargs: str
@param cb_kwargs:
(Optional) Keyword arguments to pass to the callback function.
@type width: int
@param width:
(Optional) Maximum number of bytes to convert per text line.
@rtype: str
@return: Multiline output text.
"""
result = ''
if address is None:
for i in compat.xrange(0, len(data), width):
result = '%s%s\n' % ( result, \
callback(data[i:i+width], *cb_args, **cb_kwargs) )
else:
for i in compat.xrange(0, len(data), width):
result = '%s%s: %s\n' % (
result,
cls.address(address, bits),
callback(data[i:i+width], *cb_args, **cb_kwargs)
)
address += width
return result
@classmethod
def hexblock_byte(cls, data, address = None,
bits = None,
separator = ' ',
width = 16):
"""
Dump a block of hexadecimal BYTEs from binary data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each BYTE.
@type width: int
@param width:
(Optional) Maximum number of BYTEs to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexadecimal, data,
address, bits, width,
cb_kwargs = {'separator': separator})
@classmethod
def hexblock_word(cls, data, address = None,
bits = None,
separator = ' ',
width = 8):
"""
Dump a block of hexadecimal WORDs from binary data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each WORD.
@type width: int
@param width:
(Optional) Maximum number of WORDs to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexa_word, data,
address, bits, width * 2,
cb_kwargs = {'separator': separator})
@classmethod
def hexblock_dword(cls, data, address = None,
bits = None,
separator = ' ',
width = 4):
"""
Dump a block of hexadecimal DWORDs from binary data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each DWORD.
@type width: int
@param width:
(Optional) Maximum number of DWORDs to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexa_dword, data,
address, bits, width * 4,
cb_kwargs = {'separator': separator})
@classmethod
def hexblock_qword(cls, data, address = None,
bits = None,
separator = ' ',
width = 2):
"""
Dump a block of hexadecimal QWORDs from binary data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each QWORD.
@type width: int
@param width:
(Optional) Maximum number of QWORDs to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexa_qword, data,
address, bits, width * 8,
cb_kwargs = {'separator': separator})
#------------------------------------------------------------------------------
# TODO: implement an ANSI parser to simplify using colors
class Color (object):
"""
Colored console output.
"""
@staticmethod
def _get_text_attributes():
return win32.GetConsoleScreenBufferInfo().wAttributes
@staticmethod
def _set_text_attributes(wAttributes):
win32.SetConsoleTextAttribute(wAttributes = wAttributes)
#--------------------------------------------------------------------------
@classmethod
def can_use_colors(cls):
"""
Determine if we can use colors.
Colored output only works when the output is a real console, and fails
when redirected to a file or pipe. Call this method before issuing a
call to any other method of this class to make sure it's actually
possible to use colors.
@rtype: bool
@return: C{True} if it's possible to output text with color,
C{False} otherwise.
"""
try:
cls._get_text_attributes()
return True
except Exception:
return False
@classmethod
def reset(cls):
"Reset the colors to the default values."
cls._set_text_attributes(win32.FOREGROUND_GREY)
#--------------------------------------------------------------------------
#@classmethod
#def underscore(cls, on = True):
# wAttributes = cls._get_text_attributes()
# if on:
# wAttributes |= win32.COMMON_LVB_UNDERSCORE
# else:
# wAttributes &= ~win32.COMMON_LVB_UNDERSCORE
# cls._set_text_attributes(wAttributes)
#--------------------------------------------------------------------------
@classmethod
def default(cls):
"Make the current foreground color the default."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_GREY
wAttributes &= ~win32.FOREGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def light(cls):
"Make the current foreground color light."
wAttributes = cls._get_text_attributes()
wAttributes |= win32.FOREGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def dark(cls):
"Make the current foreground color dark."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def black(cls):
"Make the text foreground color black."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
#wAttributes |= win32.FOREGROUND_BLACK
cls._set_text_attributes(wAttributes)
@classmethod
def white(cls):
"Make the text foreground color white."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_GREY
cls._set_text_attributes(wAttributes)
@classmethod
def red(cls):
"Make the text foreground color red."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_RED
cls._set_text_attributes(wAttributes)
@classmethod
def green(cls):
"Make the text foreground color green."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_GREEN
cls._set_text_attributes(wAttributes)
@classmethod
def blue(cls):
"Make the text foreground color blue."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_BLUE
cls._set_text_attributes(wAttributes)
@classmethod
def cyan(cls):
"Make the text foreground color cyan."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_CYAN
cls._set_text_attributes(wAttributes)
@classmethod
def magenta(cls):
"Make the text foreground color magenta."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_MAGENTA
cls._set_text_attributes(wAttributes)
@classmethod
def yellow(cls):
"Make the text foreground color yellow."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_YELLOW
cls._set_text_attributes(wAttributes)
#--------------------------------------------------------------------------
@classmethod
def bk_default(cls):
"Make the current background color the default."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
#wAttributes |= win32.BACKGROUND_BLACK
wAttributes &= ~win32.BACKGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def bk_light(cls):
"Make the current background color light."
wAttributes = cls._get_text_attributes()
wAttributes |= win32.BACKGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def bk_dark(cls):
"Make the current background color dark."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def bk_black(cls):
"Make the text background color black."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
#wAttributes |= win32.BACKGROUND_BLACK
cls._set_text_attributes(wAttributes)
@classmethod
def bk_white(cls):
"Make the text background color white."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_GREY
cls._set_text_attributes(wAttributes)
@classmethod
def bk_red(cls):
"Make the text background color red."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_RED
cls._set_text_attributes(wAttributes)
@classmethod
def bk_green(cls):
"Make the text background color green."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_GREEN
cls._set_text_attributes(wAttributes)
@classmethod
def bk_blue(cls):
"Make the text background color blue."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_BLUE
cls._set_text_attributes(wAttributes)
@classmethod
def bk_cyan(cls):
"Make the text background color cyan."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_CYAN
cls._set_text_attributes(wAttributes)
@classmethod
def bk_magenta(cls):
"Make the text background color magenta."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_MAGENTA
cls._set_text_attributes(wAttributes)
@classmethod
def bk_yellow(cls):
"Make the text background color yellow."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_YELLOW
cls._set_text_attributes(wAttributes)
#------------------------------------------------------------------------------
# TODO: another class for ASCII boxes
class Table (object):
"""
Text based table. The number of columns and the width of each column
is automatically calculated.
"""
def __init__(self, sep = ' '):
"""
@type sep: str
@param sep: Separator between cells in each row.
"""
self.__cols = list()
self.__width = list()
self.__sep = sep
def addRow(self, *row):
"""
Add a row to the table. All items are converted to strings.
@type row: tuple
@keyword row: Each argument is a cell in the table.
"""
row = [ str(item) for item in row ]
len_row = [ len(item) for item in row ]
width = self.__width
len_old = len(width)
len_new = len(row)
known = min(len_old, len_new)
missing = len_new - len_old
if missing > 0:
width.extend( len_row[ -missing : ] )
elif missing < 0:
len_row.extend( [0] * (-missing) )
self.__width = [ max( width[i], len_row[i] ) for i in compat.xrange(len(len_row)) ]
self.__cols.append(row)
def justify(self, column, direction):
"""
Make the text in a column left or right justified.
@type column: int
@param column: Index of the column.
@type direction: int
@param direction:
C{-1} to justify left,
C{1} to justify right.
@raise IndexError: Bad column index.
@raise ValueError: Bad direction value.
"""
if direction == -1:
self.__width[column] = abs(self.__width[column])
elif direction == 1:
self.__width[column] = - abs(self.__width[column])
else:
raise ValueError("Bad direction value.")
def getWidth(self):
"""
Get the width of the text output for the table.
@rtype: int
@return: Width in characters for the text output,
including the newline character.
"""
width = 0
if self.__width:
width = sum( abs(x) for x in self.__width )
width = width + len(self.__width) * len(self.__sep) + 1
return width
def getOutput(self):
"""
Get the text output for the table.
@rtype: str
@return: Text output.
"""
return '%s\n' % '\n'.join( self.yieldOutput() )
def yieldOutput(self):
"""
Generate the text output for the table.
@rtype: generator of str
@return: Text output.
"""
width = self.__width
if width:
num_cols = len(width)
fmt = ['%%%ds' % -w for w in width]
if width[-1] > 0:
fmt[-1] = '%s'
fmt = self.__sep.join(fmt)
for row in self.__cols:
row.extend( [''] * (num_cols - len(row)) )
yield fmt % tuple(row)
def show(self):
"""
Print the text output for the table.
"""
print(self.getOutput())
#------------------------------------------------------------------------------
class CrashDump (StaticClass):
"""
Static functions for crash dumps.
@type reg_template: str
@cvar reg_template: Template for the L{dump_registers} method.
"""
# Templates for the dump_registers method.
reg_template = {
win32.ARCH_I386 : (
'eax=%(Eax).8x ebx=%(Ebx).8x ecx=%(Ecx).8x edx=%(Edx).8x esi=%(Esi).8x edi=%(Edi).8x\n'
'eip=%(Eip).8x esp=%(Esp).8x ebp=%(Ebp).8x %(efl_dump)s\n'
'cs=%(SegCs).4x ss=%(SegSs).4x ds=%(SegDs).4x es=%(SegEs).4x fs=%(SegFs).4x gs=%(SegGs).4x efl=%(EFlags).8x\n'
),
win32.ARCH_AMD64 : (
'rax=%(Rax).16x rbx=%(Rbx).16x rcx=%(Rcx).16x\n'
'rdx=%(Rdx).16x rsi=%(Rsi).16x rdi=%(Rdi).16x\n'
'rip=%(Rip).16x rsp=%(Rsp).16x rbp=%(Rbp).16x\n'
' r8=%(R8).16x r9=%(R9).16x r10=%(R10).16x\n'
'r11=%(R11).16x r12=%(R12).16x r13=%(R13).16x\n'
'r14=%(R14).16x r15=%(R15).16x\n'
'%(efl_dump)s\n'
'cs=%(SegCs).4x ss=%(SegSs).4x ds=%(SegDs).4x es=%(SegEs).4x fs=%(SegFs).4x gs=%(SegGs).4x efl=%(EFlags).8x\n'
),
}
@staticmethod
def dump_flags(efl):
"""
Dump the x86 processor flags.
The output mimics that of the WinDBG debugger.
Used by L{dump_registers}.
@type efl: int
@param efl: Value of the eFlags register.
@rtype: str
@return: Text suitable for logging.
"""
if efl is None:
return ''
efl_dump = 'iopl=%1d' % ((efl & 0x3000) >> 12)
if efl & 0x100000:
efl_dump += ' vip'
else:
efl_dump += ' '
if efl & 0x80000:
efl_dump += ' vif'
else:
efl_dump += ' '
# 0x20000 ???
if efl & 0x800:
efl_dump += ' ov' # Overflow
else:
efl_dump += ' no' # No overflow
if efl & 0x400:
efl_dump += ' dn' # Downwards
else:
efl_dump += ' up' # Upwards
if efl & 0x200:
efl_dump += ' ei' # Enable interrupts
else:
efl_dump += ' di' # Disable interrupts
# 0x100 trap flag
if efl & 0x80:
efl_dump += ' ng' # Negative
else:
efl_dump += ' pl' # Positive
if efl & 0x40:
efl_dump += ' zr' # Zero
else:
efl_dump += ' nz' # Nonzero
if efl & 0x10:
efl_dump += ' ac' # Auxiliary carry
else:
efl_dump += ' na' # No auxiliary carry
# 0x8 ???
if efl & 0x4:
efl_dump += ' pe' # Parity odd
else:
efl_dump += ' po' # Parity even
# 0x2 ???
if efl & 0x1:
efl_dump += ' cy' # Carry
else:
efl_dump += ' nc' # No carry
return efl_dump
@classmethod
def dump_registers(cls, registers, arch = None):
"""
Dump the x86/x64 processor register values.
The output mimics that of the WinDBG debugger.
@type registers: dict( str S{->} int )
@param registers: Dictionary mapping register names to their values.
@type arch: str
@param arch: Architecture of the machine whose registers were dumped.
Defaults to the current architecture.
Currently only the following architectures are supported:
- L{win32.ARCH_I386}
- L{win32.ARCH_AMD64}
@rtype: str
@return: Text suitable for logging.
"""
if registers is None:
return ''
if arch is None:
if 'Eax' in registers:
arch = win32.ARCH_I386
elif 'Rax' in registers:
arch = win32.ARCH_AMD64
else:
arch = 'Unknown'
if arch not in cls.reg_template:
msg = "Don't know how to dump the registers for architecture: %s"
raise NotImplementedError(msg % arch)
registers = registers.copy()
registers['efl_dump'] = cls.dump_flags( registers['EFlags'] )
return cls.reg_template[arch] % registers
@staticmethod
def dump_registers_peek(registers, data, separator = ' ', width = 16):
"""
Dump data pointed to by the given registers, if any.
@type registers: dict( str S{->} int )
@param registers: Dictionary mapping register names to their values.
This value is returned by L{Thread.get_context}.
@type data: dict( str S{->} str )
@param data: Dictionary mapping register names to the data they point to.
This value is returned by L{Thread.peek_pointers_in_registers}.
@rtype: str
@return: Text suitable for logging.
"""
if None in (registers, data):
return ''
names = compat.keys(data)
names.sort()
result = ''
for reg_name in names:
tag = reg_name.lower()
dumped = HexDump.hexline(data[reg_name], separator, width)
result += '%s -> %s\n' % (tag, dumped)
return result
@staticmethod
def dump_data_peek(data, base = 0,
separator = ' ',
width = 16,
bits = None):
"""
Dump data from pointers guessed within the given binary data.
@type data: str
@param data: Dictionary mapping offsets to the data they point to.
@type base: int
@param base: Base offset.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if data is None:
return ''
pointers = compat.keys(data)
pointers.sort()
result = ''
for offset in pointers:
dumped = HexDump.hexline(data[offset], separator, width)
address = HexDump.address(base + offset, bits)
result += '%s -> %s\n' % (address, dumped)
return result
@staticmethod
def dump_stack_peek(data, separator = ' ', width = 16, arch = None):
"""
Dump data from pointers guessed within the given stack dump.
@type data: str
@param data: Dictionary mapping stack offsets to the data they point to.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each character.
@type width: int
@param width:
(Optional) Maximum number of characters to convert per text line.
This value is also used for padding.
@type arch: str
@param arch: Architecture of the machine whose registers were dumped.
Defaults to the current architecture.
@rtype: str
@return: Text suitable for logging.
"""
if data is None:
return ''
if arch is None:
arch = win32.arch
pointers = compat.keys(data)
pointers.sort()
result = ''
if pointers:
if arch == win32.ARCH_I386:
spreg = 'esp'
elif arch == win32.ARCH_AMD64:
spreg = 'rsp'
else:
spreg = 'STACK' # just a generic tag
tag_fmt = '[%s+0x%%.%dx]' % (spreg, len( '%x' % pointers[-1] ) )
for offset in pointers:
dumped = HexDump.hexline(data[offset], separator, width)
tag = tag_fmt % offset
result += '%s -> %s\n' % (tag, dumped)
return result
@staticmethod
def dump_stack_trace(stack_trace, bits = None):
"""
Dump a stack trace, as returned by L{Thread.get_stack_trace} with the
C{bUseLabels} parameter set to C{False}.
@type stack_trace: list( int, int, str )
@param stack_trace: Stack trace as a list of tuples of
( return address, frame pointer, module filename )
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not stack_trace:
return ''
table = Table()
table.addRow('Frame', 'Origin', 'Module')
for (fp, ra, mod) in stack_trace:
fp_d = HexDump.address(fp, bits)
ra_d = HexDump.address(ra, bits)
table.addRow(fp_d, ra_d, mod)
return table.getOutput()
@staticmethod
def dump_stack_trace_with_labels(stack_trace, bits = None):
"""
Dump a stack trace,
as returned by L{Thread.get_stack_trace_with_labels}.
@type stack_trace: list( int, int, str )
@param stack_trace: Stack trace as a list of tuples of
( return address, frame pointer, module filename )
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not stack_trace:
return ''
table = Table()
table.addRow('Frame', 'Origin')
for (fp, label) in stack_trace:
table.addRow( HexDump.address(fp, bits), label )
return table.getOutput()
# TODO
# + Instead of a star when EIP points to, it would be better to show
# any register value (or other values like the exception address) that
# points to a location in the dissassembled code.
# + It'd be very useful to show some labels here.
# + It'd be very useful to show register contents for code at EIP
@staticmethod
def dump_code(disassembly, pc = None,
bLowercase = True,
bits = None):
"""
Dump a disassembly. Optionally mark where the program counter is.
@type disassembly: list of tuple( int, int, str, str )
@param disassembly: Disassembly dump as returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type pc: int
@param pc: (Optional) Program counter.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not disassembly:
return ''
table = Table(sep = ' | ')
for (addr, size, code, dump) in disassembly:
if bLowercase:
code = code.lower()
if addr == pc:
addr = ' * %s' % HexDump.address(addr, bits)
else:
addr = ' %s' % HexDump.address(addr, bits)
table.addRow(addr, dump, code)
table.justify(1, 1)
return table.getOutput()
@staticmethod
def dump_code_line(disassembly_line, bShowAddress = True,
bShowDump = True,
bLowercase = True,
dwDumpWidth = None,
dwCodeWidth = None,
bits = None):
"""
Dump a single line of code. To dump a block of code use L{dump_code}.
@type disassembly_line: tuple( int, int, str, str )
@param disassembly_line: Single item of the list returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type bShowAddress: bool
@param bShowAddress: (Optional) If C{True} show the memory address.
@type bShowDump: bool
@param bShowDump: (Optional) If C{True} show the hexadecimal dump.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type dwDumpWidth: int or None
@param dwDumpWidth: (Optional) Width in characters of the hex dump.
@type dwCodeWidth: int or None
@param dwCodeWidth: (Optional) Width in characters of the code.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if bits is None:
address_size = HexDump.address_size
else:
address_size = bits / 4
(addr, size, code, dump) = disassembly_line
dump = dump.replace(' ', '')
result = list()
fmt = ''
if bShowAddress:
result.append( HexDump.address(addr, bits) )
fmt += '%%%ds:' % address_size
if bShowDump:
result.append(dump)
if dwDumpWidth:
fmt += ' %%-%ds' % dwDumpWidth
else:
fmt += ' %s'
if bLowercase:
code = code.lower()
result.append(code)
if dwCodeWidth:
fmt += ' %%-%ds' % dwCodeWidth
else:
fmt += ' %s'
return fmt % tuple(result)
@staticmethod
def dump_memory_map(memoryMap, mappedFilenames = None, bits = None):
"""
Dump the memory map of a process. Optionally show the filenames for
memory mapped files as well.
@type memoryMap: list( L{win32.MemoryBasicInformation} )
@param memoryMap: Memory map returned by L{Process.get_memory_map}.
@type mappedFilenames: dict( int S{->} str )
@param mappedFilenames: (Optional) Memory mapped filenames
returned by L{Process.get_mapped_filenames}.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not memoryMap:
return ''
table = Table()
if mappedFilenames:
table.addRow("Address", "Size", "State", "Access", "Type", "File")
else:
table.addRow("Address", "Size", "State", "Access", "Type")
# For each memory block in the map...
for mbi in memoryMap:
# Address and size of memory block.
BaseAddress = HexDump.address(mbi.BaseAddress, bits)
RegionSize = HexDump.address(mbi.RegionSize, bits)
# State (free or allocated).
mbiState = mbi.State
if mbiState == win32.MEM_RESERVE:
State = "Reserved"
elif mbiState == win32.MEM_COMMIT:
State = "Commited"
elif mbiState == win32.MEM_FREE:
State = "Free"
else:
State = "Unknown"
# Page protection bits (R/W/X/G).
if mbiState != win32.MEM_COMMIT:
Protect = ""
else:
mbiProtect = mbi.Protect
if mbiProtect & win32.PAGE_NOACCESS:
Protect = "--- "
elif mbiProtect & win32.PAGE_READONLY:
Protect = "R-- "
elif mbiProtect & win32.PAGE_READWRITE:
Protect = "RW- "
elif mbiProtect & win32.PAGE_WRITECOPY:
Protect = "RC- "
elif mbiProtect & win32.PAGE_EXECUTE:
Protect = "--X "
elif mbiProtect & win32.PAGE_EXECUTE_READ:
Protect = "R-X "
elif mbiProtect & win32.PAGE_EXECUTE_READWRITE:
Protect = "RWX "
elif mbiProtect & win32.PAGE_EXECUTE_WRITECOPY:
Protect = "RCX "
else:
Protect = "??? "
if mbiProtect & win32.PAGE_GUARD:
Protect += "G"
else:
Protect += "-"
if mbiProtect & win32.PAGE_NOCACHE:
Protect += "N"
else:
Protect += "-"
if mbiProtect & win32.PAGE_WRITECOMBINE:
Protect += "W"
else:
Protect += "-"
# Type (file mapping, executable image, or private memory).
mbiType = mbi.Type
if mbiType == win32.MEM_IMAGE:
Type = "Image"
elif mbiType == win32.MEM_MAPPED:
Type = "Mapped"
elif mbiType == win32.MEM_PRIVATE:
Type = "Private"
elif mbiType == 0:
Type = ""
else:
Type = "Unknown"
# Output a row in the table.
if mappedFilenames:
FileName = mappedFilenames.get(mbi.BaseAddress, '')
table.addRow( BaseAddress, RegionSize, State, Protect, Type, FileName )
else:
table.addRow( BaseAddress, RegionSize, State, Protect, Type )
# Return the table output.
return table.getOutput()
#------------------------------------------------------------------------------
class DebugLog (StaticClass):
'Static functions for debug logging.'
@staticmethod
def log_text(text):
"""
Log lines of text, inserting a timestamp.
@type text: str
@param text: Text to log.
@rtype: str
@return: Log line.
"""
if text.endswith('\n'):
text = text[:-len('\n')]
#text = text.replace('\n', '\n\t\t') # text CSV
ltime = time.strftime("%X")
msecs = (time.time() % 1) * 1000
return '[%s.%04d] %s' % (ltime, msecs, text)
#return '[%s.%04d]\t%s' % (ltime, msecs, text) # text CSV
@classmethod
def log_event(cls, event, text = None):
"""
Log lines of text associated with a debug event.
@type event: L{Event}
@param event: Event object.
@type text: str
@param text: (Optional) Text to log. If no text is provided the default
is to show a description of the event itself.
@rtype: str
@return: Log line.
"""
if not text:
if event.get_event_code() == win32.EXCEPTION_DEBUG_EVENT:
what = event.get_exception_description()
if event.is_first_chance():
what = '%s (first chance)' % what
else:
what = '%s (second chance)' % what
try:
address = event.get_fault_address()
except NotImplementedError:
address = event.get_exception_address()
else:
what = event.get_event_name()
address = event.get_thread().get_pc()
process = event.get_process()
label = process.get_label_at_address(address)
address = HexDump.address(address, process.get_bits())
if label:
where = '%s (%s)' % (address, label)
else:
where = address
text = '%s at %s' % (what, where)
text = 'pid %d tid %d: %s' % (event.get_pid(), event.get_tid(), text)
#text = 'pid %d tid %d:\t%s' % (event.get_pid(), event.get_tid(), text) # text CSV
return cls.log_text(text)
#------------------------------------------------------------------------------
class Logger(object):
"""
Logs text to standard output and/or a text file.
@type logfile: str or None
@ivar logfile: Append messages to this text file.
@type verbose: bool
@ivar verbose: C{True} to print messages to standard output.
@type fd: file
@ivar fd: File object where log messages are printed to.
C{None} if no log file is used.
"""
def __init__(self, logfile = None, verbose = True):
"""
@type logfile: str or None
@param logfile: Append messages to this text file.
@type verbose: bool
@param verbose: C{True} to print messages to standard output.
"""
self.verbose = verbose
self.logfile = logfile
if self.logfile:
self.fd = open(self.logfile, 'a+')
def __logfile_error(self, e):
"""
Shows an error message to standard error
if the log file can't be written to.
Used internally.
@type e: Exception
@param e: Exception raised when trying to write to the log file.
"""
from sys import stderr
msg = "Warning, error writing log file %s: %s\n"
msg = msg % (self.logfile, str(e))
stderr.write(DebugLog.log_text(msg))
self.logfile = None
self.fd = None
def __do_log(self, text):
"""
Writes the given text verbatim into the log file (if any)
and/or standard input (if the verbose flag is turned on).
Used internally.
@type text: str
@param text: Text to print.
"""
if isinstance(text, compat.unicode):
text = text.encode('cp1252')
if self.verbose:
print(text)
if self.logfile:
try:
self.fd.writelines('%s\n' % text)
except IOError:
e = sys.exc_info()[1]
self.__logfile_error(e)
def log_text(self, text):
"""
Log lines of text, inserting a timestamp.
@type text: str
@param text: Text to log.
"""
self.__do_log( DebugLog.log_text(text) )
def log_event(self, event, text = None):
"""
Log lines of text associated with a debug event.
@type event: L{Event}
@param event: Event object.
@type text: str
@param text: (Optional) Text to log. If no text is provided the default
is to show a description of the event itself.
"""
self.__do_log( DebugLog.log_event(event, text) )
def log_exc(self):
"""
Log lines of text associated with the last Python exception.
"""
self.__do_log( 'Exception raised: %s' % traceback.format_exc() )
def is_enabled(self):
"""
Determines if the logger will actually print anything when the log_*
methods are called.
This may save some processing if the log text requires a lengthy
calculation to prepare. If no log file is set and stdout logging
is disabled, there's no point in preparing a log text that won't
be shown to anyone.
@rtype: bool
@return: C{True} if a log file was set and/or standard output logging
is enabled, or C{False} otherwise.
"""
return self.verbose or self.logfile
| epl-1.0 |
frankyrumple/ope | libs/paramiko/transport.py | 31 | 97327 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Core protocol implementation
"""
import os
import socket
import sys
import threading
import time
import weakref
from hashlib import md5, sha1
import paramiko
from paramiko import util
from paramiko.auth_handler import AuthHandler
from paramiko.ssh_gss import GSSAuth
from paramiko.channel import Channel
from paramiko.common import xffffffff, cMSG_CHANNEL_OPEN, cMSG_IGNORE, \
cMSG_GLOBAL_REQUEST, DEBUG, MSG_KEXINIT, MSG_IGNORE, MSG_DISCONNECT, \
MSG_DEBUG, ERROR, WARNING, cMSG_UNIMPLEMENTED, INFO, cMSG_KEXINIT, \
cMSG_NEWKEYS, MSG_NEWKEYS, cMSG_REQUEST_SUCCESS, cMSG_REQUEST_FAILURE, \
CONNECTION_FAILED_CODE, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, \
OPEN_SUCCEEDED, cMSG_CHANNEL_OPEN_FAILURE, cMSG_CHANNEL_OPEN_SUCCESS, \
MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE, \
MSG_CHANNEL_OPEN_SUCCESS, MSG_CHANNEL_OPEN_FAILURE, MSG_CHANNEL_OPEN, \
MSG_CHANNEL_SUCCESS, MSG_CHANNEL_FAILURE, MSG_CHANNEL_DATA, \
MSG_CHANNEL_EXTENDED_DATA, MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_REQUEST, \
MSG_CHANNEL_EOF, MSG_CHANNEL_CLOSE, MIN_WINDOW_SIZE, MIN_PACKET_SIZE, \
MAX_WINDOW_SIZE, DEFAULT_WINDOW_SIZE, DEFAULT_MAX_PACKET_SIZE
from paramiko.compress import ZlibCompressor, ZlibDecompressor
from paramiko.dsskey import DSSKey
from paramiko.kex_gex import KexGex
from paramiko.kex_group1 import KexGroup1
from paramiko.kex_group14 import KexGroup14
from paramiko.kex_gss import KexGSSGex, KexGSSGroup1, KexGSSGroup14, NullHostKey
from paramiko.message import Message
from paramiko.packet import Packetizer, NeedRekeyException
from paramiko.primes import ModulusPack
from paramiko.py3compat import string_types, long, byte_ord, b
from paramiko.rsakey import RSAKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.server import ServerInterface
from paramiko.sftp_client import SFTPClient
from paramiko.ssh_exception import (SSHException, BadAuthenticationType,
ChannelException, ProxyCommandFailure)
from paramiko.util import retry_on_signal, ClosingContextManager, clamp_value
from Crypto.Cipher import Blowfish, AES, DES3, ARC4
try:
from Crypto.Util import Counter
except ImportError:
from paramiko.util import Counter
# for thread cleanup
_active_threads = []
def _join_lingering_threads():
for thr in _active_threads:
thr.stop_thread()
import atexit
atexit.register(_join_lingering_threads)
class Transport (threading.Thread, ClosingContextManager):
"""
An SSH Transport attaches to a stream (usually a socket), negotiates an
encrypted session, authenticates, and then creates stream tunnels, called
`channels <.Channel>`, across the session. Multiple channels can be
multiplexed across a single session (and often are, in the case of port
forwardings).
Instances of this class may be used as context managers.
"""
_PROTO_ID = '2.0'
_CLIENT_ID = 'paramiko_%s' % paramiko.__version__
_preferred_ciphers = ('aes128-ctr', 'aes256-ctr', 'aes128-cbc', 'blowfish-cbc',
'aes256-cbc', '3des-cbc', 'arcfour128', 'arcfour256')
_preferred_macs = ('hmac-sha1', 'hmac-md5', 'hmac-sha1-96', 'hmac-md5-96')
_preferred_keys = ('ssh-rsa', 'ssh-dss', 'ecdsa-sha2-nistp256')
_preferred_kex = ( 'diffie-hellman-group14-sha1', 'diffie-hellman-group-exchange-sha1' , 'diffie-hellman-group1-sha1')
_preferred_compression = ('none',)
_cipher_info = {
'aes128-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 16},
'aes256-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 32},
'blowfish-cbc': {'class': Blowfish, 'mode': Blowfish.MODE_CBC, 'block-size': 8, 'key-size': 16},
'aes128-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 16},
'aes256-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 32},
'3des-cbc': {'class': DES3, 'mode': DES3.MODE_CBC, 'block-size': 8, 'key-size': 24},
'arcfour128': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 16},
'arcfour256': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 32},
}
_mac_info = {
'hmac-sha1': {'class': sha1, 'size': 20},
'hmac-sha1-96': {'class': sha1, 'size': 12},
'hmac-md5': {'class': md5, 'size': 16},
'hmac-md5-96': {'class': md5, 'size': 12},
}
_key_info = {
'ssh-rsa': RSAKey,
'ssh-dss': DSSKey,
'ecdsa-sha2-nistp256': ECDSAKey,
}
_kex_info = {
'diffie-hellman-group1-sha1': KexGroup1,
'diffie-hellman-group14-sha1': KexGroup14,
'diffie-hellman-group-exchange-sha1': KexGex,
'gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==': KexGSSGroup1,
'gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==': KexGSSGroup14,
'gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==': KexGSSGex
}
_compression_info = {
# zlib@openssh.com is just zlib, but only turned on after a successful
# authentication. openssh servers may only offer this type because
# they've had troubles with security holes in zlib in the past.
'zlib@openssh.com': (ZlibCompressor, ZlibDecompressor),
'zlib': (ZlibCompressor, ZlibDecompressor),
'none': (None, None),
}
_modulus_pack = None
def __init__(self,
sock,
default_window_size=DEFAULT_WINDOW_SIZE,
default_max_packet_size=DEFAULT_MAX_PACKET_SIZE,
gss_kex=False,
gss_deleg_creds=True):
"""
Create a new SSH session over an existing socket, or socket-like
object. This only creates the `.Transport` object; it doesn't begin the
SSH session yet. Use `connect` or `start_client` to begin a client
session, or `start_server` to begin a server session.
If the object is not actually a socket, it must have the following
methods:
- ``send(str)``: Writes from 1 to ``len(str)`` bytes, and returns an
int representing the number of bytes written. Returns
0 or raises ``EOFError`` if the stream has been closed.
- ``recv(int)``: Reads from 1 to ``int`` bytes and returns them as a
string. Returns 0 or raises ``EOFError`` if the stream has been
closed.
- ``close()``: Closes the socket.
- ``settimeout(n)``: Sets a (float) timeout on I/O operations.
For ease of use, you may also pass in an address (as a tuple) or a host
string as the ``sock`` argument. (A host string is a hostname with an
optional port (separated by ``":"``) which will be converted into a
tuple of ``(hostname, port)``.) A socket will be connected to this
address and used for communication. Exceptions from the ``socket``
call may be thrown in this case.
.. note::
Modifying the the window and packet sizes might have adverse
effects on your channels created from this transport. The default
values are the same as in the OpenSSH code base and have been
battle tested.
:param socket sock:
a socket or socket-like object to create the session over.
:param int default_window_size:
sets the default window size on the transport. (defaults to
2097152)
:param int default_max_packet_size:
sets the default max packet size on the transport. (defaults to
32768)
.. versionchanged:: 1.15
Added the ``default_window_size`` and ``default_max_packet_size``
arguments.
"""
self.active = False
if isinstance(sock, string_types):
# convert "host:port" into (host, port)
hl = sock.split(':', 1)
if len(hl) == 1:
sock = (hl[0], 22)
else:
sock = (hl[0], int(hl[1]))
if type(sock) is tuple:
# connect to the given (host, port)
hostname, port = sock
reason = 'No suitable address family'
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
if socktype == socket.SOCK_STREAM:
af = family
addr = sockaddr
sock = socket.socket(af, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: sock.connect((hostname, port)))
except socket.error as e:
reason = str(e)
else:
break
else:
raise SSHException(
'Unable to connect to %s: %s' % (hostname, reason))
# okay, normal socket-ish flow here...
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
# Python < 2.3 doesn't have the settimeout method - RogerB
try:
# we set the timeout so we can check self.active periodically to
# see if we should bail. socket.timeout exception is never
# propagated.
self.sock.settimeout(0.1)
except AttributeError:
pass
# negotiated crypto parameters
self.packetizer = Packetizer(sock)
self.local_version = 'SSH-' + self._PROTO_ID + '-' + self._CLIENT_ID
self.remote_version = ''
self.local_cipher = self.remote_cipher = ''
self.local_kex_init = self.remote_kex_init = None
self.local_mac = self.remote_mac = None
self.local_compression = self.remote_compression = None
self.session_id = None
self.host_key_type = None
self.host_key = None
# GSS-API / SSPI Key Exchange
self.use_gss_kex = gss_kex
# This will be set to True if GSS-API Key Exchange was performed
self.gss_kex_used = False
self.kexgss_ctxt = None
self.gss_host = None
if self.use_gss_kex:
self.kexgss_ctxt = GSSAuth("gssapi-keyex", gss_deleg_creds)
self._preferred_kex = ('gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==',
'gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==',
'gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==',
'diffie-hellman-group-exchange-sha1',
'diffie-hellman-group14-sha1',
'diffie-hellman-group1-sha1')
# state used during negotiation
self.kex_engine = None
self.H = None
self.K = None
self.initial_kex_done = False
self.in_kex = False
self.authenticated = False
self._expected_packet = tuple()
self.lock = threading.Lock() # synchronization (always higher level than write_lock)
# tracking open channels
self._channels = ChannelMap()
self.channel_events = {} # (id -> Event)
self.channels_seen = {} # (id -> True)
self._channel_counter = 0
self.default_max_packet_size = default_max_packet_size
self.default_window_size = default_window_size
self._forward_agent_handler = None
self._x11_handler = None
self._tcp_handler = None
self.saved_exception = None
self.clear_to_send = threading.Event()
self.clear_to_send_lock = threading.Lock()
self.clear_to_send_timeout = 30.0
self.log_name = 'paramiko.transport'
self.logger = util.get_logger(self.log_name)
self.packetizer.set_log(self.logger)
self.auth_handler = None
self.global_response = None # response Message from an arbitrary global request
self.completion_event = None # user-defined event callbacks
self.banner_timeout = 15 # how long (seconds) to wait for the SSH banner
# server mode:
self.server_mode = False
self.server_object = None
self.server_key_dict = {}
self.server_accepts = []
self.server_accept_cv = threading.Condition(self.lock)
self.subsystem_table = {}
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
out = '<paramiko.Transport at %s' % hex(long(id(self)) & xffffffff)
if not self.active:
out += ' (unconnected)'
else:
if self.local_cipher != '':
out += ' (cipher %s, %d bits)' % (self.local_cipher,
self._cipher_info[self.local_cipher]['key-size'] * 8)
if self.is_authenticated():
out += ' (active; %d open channel(s))' % len(self._channels)
elif self.initial_kex_done:
out += ' (connected; awaiting auth)'
else:
out += ' (connecting)'
out += '>'
return out
def atfork(self):
"""
Terminate this Transport without closing the session. On posix
systems, if a Transport is open during process forking, both parent
and child will share the underlying socket, but only one process can
use the connection (without corrupting the session). Use this method
to clean up a Transport object without disrupting the other process.
.. versionadded:: 1.5.3
"""
self.sock.close()
self.close()
def get_security_options(self):
"""
Return a `.SecurityOptions` object which can be used to tweak the
encryption algorithms this transport will permit (for encryption,
digest/hash operations, public keys, and key exchanges) and the order
of preference for them.
"""
return SecurityOptions(self)
def set_gss_host(self, gss_host):
"""
Setter for C{gss_host} if GSS-API Key Exchange is performed.
:param str gss_host: The targets name in the kerberos database
Default: The name of the host to connect to
:rtype: Void
"""
# We need the FQDN to get this working with SSPI
self.gss_host = socket.getfqdn(gss_host)
def start_client(self, event=None):
"""
Negotiate a new SSH2 session as a client. This is the first step after
creating a new `.Transport`. A separate thread is created for protocol
negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, you will usually want to authenticate,
calling `auth_password <Transport.auth_password>` or
`auth_publickey <Transport.auth_publickey>`.
.. note:: `connect` is a simpler method for connecting as a client.
.. note::
After calling this method (or `start_server` or `connect`), you
should no longer directly read from or write to the original socket
object.
:param .threading.Event event:
an event to trigger when negotiation is complete (optional)
:raises SSHException: if negotiation fails (and no ``event`` was passed
in)
"""
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.is_set():
break
def start_server(self, event=None, server=None):
"""
Negotiate a new SSH2 session as a server. This is the first step after
creating a new `.Transport` and setting up your server host key(s). A
separate thread is created for protocol negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, the client will need to authenticate.
Override the methods `get_allowed_auths
<.ServerInterface.get_allowed_auths>`, `check_auth_none
<.ServerInterface.check_auth_none>`, `check_auth_password
<.ServerInterface.check_auth_password>`, and `check_auth_publickey
<.ServerInterface.check_auth_publickey>` in the given ``server`` object
to control the authentication process.
After a successful authentication, the client should request to open a
channel. Override `check_channel_request
<.ServerInterface.check_channel_request>` in the given ``server``
object to allow channels to be opened.
.. note::
After calling this method (or `start_client` or `connect`), you
should no longer directly read from or write to the original socket
object.
:param .threading.Event event:
an event to trigger when negotiation is complete.
:param .ServerInterface server:
an object used to perform authentication and create `channels
<.Channel>`
:raises SSHException: if negotiation fails (and no ``event`` was passed
in)
"""
if server is None:
server = ServerInterface()
self.server_mode = True
self.server_object = server
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.is_set():
break
def add_server_key(self, key):
"""
Add a host key to the list of keys used for server mode. When behaving
as a server, the host key is used to sign certain packets during the
SSH2 negotiation, so that the client can trust that we are who we say
we are. Because this is used for signing, the key must contain private
key info, not just the public half. Only one key of each type (RSA or
DSS) is kept.
:param .PKey key:
the host key to add, usually an `.RSAKey` or `.DSSKey`.
"""
self.server_key_dict[key.get_name()] = key
def get_server_key(self):
"""
Return the active host key, in server mode. After negotiating with the
client, this method will return the negotiated host key. If only one
type of host key was set with `add_server_key`, that's the only key
that will ever be returned. But in cases where you have set more than
one type of host key (for example, an RSA key and a DSS key), the key
type will be negotiated by the client, and this method will return the
key of the type agreed on. If the host key has not been negotiated
yet, ``None`` is returned. In client mode, the behavior is undefined.
:return:
host key (`.PKey`) of the type negotiated by the client, or
``None``.
"""
try:
return self.server_key_dict[self.host_key_type]
except KeyError:
pass
return None
@staticmethod
def load_server_moduli(filename=None):
"""
(optional)
Load a file of prime moduli for use in doing group-exchange key
negotiation in server mode. It's a rather obscure option and can be
safely ignored.
In server mode, the remote client may request "group-exchange" key
negotiation, which asks the server to send a random prime number that
fits certain criteria. These primes are pretty difficult to compute,
so they can't be generated on demand. But many systems contain a file
of suitable primes (usually named something like ``/etc/ssh/moduli``).
If you call `load_server_moduli` and it returns ``True``, then this
file of primes has been loaded and we will support "group-exchange" in
server mode. Otherwise server mode will just claim that it doesn't
support that method of key negotiation.
:param str filename:
optional path to the moduli file, if you happen to know that it's
not in a standard location.
:return:
True if a moduli file was successfully loaded; False otherwise.
.. note:: This has no effect when used in client mode.
"""
Transport._modulus_pack = ModulusPack()
# places to look for the openssh "moduli" file
file_list = ['/etc/ssh/moduli', '/usr/local/etc/moduli']
if filename is not None:
file_list.insert(0, filename)
for fn in file_list:
try:
Transport._modulus_pack.read_file(fn)
return True
except IOError:
pass
# none succeeded
Transport._modulus_pack = None
return False
def close(self):
"""
Close this session, and any open channels that are tied to it.
"""
if not self.active:
return
self.stop_thread()
for chan in list(self._channels.values()):
chan._unlink()
self.sock.close()
def get_remote_server_key(self):
"""
Return the host key of the server (in client mode).
.. note::
Previously this call returned a tuple of ``(key type, key
string)``. You can get the same effect by calling `.PKey.get_name`
for the key type, and ``str(key)`` for the key string.
:raises SSHException: if no session is currently active.
:return: public key (`.PKey`) of the remote server
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
return self.host_key
def is_active(self):
"""
Return true if this session is active (open).
:return:
True if the session is still active (open); False if the session is
closed
"""
return self.active
def open_session(self, window_size=None, max_packet_size=None):
"""
Request a new channel to the server, of type ``"session"``. This is
just an alias for calling `open_channel` with an argument of
``"session"``.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the session created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:return: a new `.Channel`
:raises SSHException: if the request is rejected or the session ends
prematurely
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
return self.open_channel('session',
window_size=window_size,
max_packet_size=max_packet_size)
def open_x11_channel(self, src_addr=None):
"""
Request a new channel to the client, of type ``"x11"``. This
is just an alias for ``open_channel('x11', src_addr=src_addr)``.
:param tuple src_addr:
the source address (``(str, int)``) of the x11 server (port is the
x11 port, ie. 6010)
:return: a new `.Channel`
:raises SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('x11', src_addr=src_addr)
def open_forward_agent_channel(self):
"""
Request a new channel to the client, of type
``"auth-agent@openssh.com"``.
This is just an alias for ``open_channel('auth-agent@openssh.com')``.
:return: a new `.Channel`
:raises SSHException:
if the request is rejected or the session ends prematurely
"""
return self.open_channel('auth-agent@openssh.com')
def open_forwarded_tcpip_channel(self, src_addr, dest_addr):
"""
Request a new channel back to the client, of type ``"forwarded-tcpip"``.
This is used after a client has requested port forwarding, for sending
incoming connections back to the client.
:param src_addr: originator's address
:param dest_addr: local (server) connected address
"""
return self.open_channel('forwarded-tcpip', dest_addr, src_addr)
def open_channel(self,
kind,
dest_addr=None,
src_addr=None,
window_size=None,
max_packet_size=None):
"""
Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the channel created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param str kind:
the kind of channel requested (usually ``"session"``,
``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
:param tuple dest_addr:
the destination address (address + port tuple) of this port
forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
``"direct-tcpip"`` (ignored for other channel types)
:param src_addr: the source address of this port forwarding, if
``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:return: a new `.Channel` on success
:raises SSHException: if the request is rejected or the session ends
prematurely
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
if not self.active:
raise SSHException('SSH session not active')
self.lock.acquire()
try:
window_size = self._sanitize_window_size(window_size)
max_packet_size = self._sanitize_packet_size(max_packet_size)
chanid = self._next_channel()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN)
m.add_string(kind)
m.add_int(chanid)
m.add_int(window_size)
m.add_int(max_packet_size)
if (kind == 'forwarded-tcpip') or (kind == 'direct-tcpip'):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == 'x11':
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(window_size, max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
if event.is_set():
break
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where ``server_addr`` and ``server_port`` are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
`accept`.
:param str address: the address to bind when forwarding
:param int port:
the port to forward, or 0 to ask the server to allocate any port
:param callable handler:
optional handler for incoming forwarded connections, of the form
``func(Channel, (str, int), (str, int))``.
:return: the port number (`int`) allocated by the server
:raises SSHException: if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, src_addr, dest_addr_port):
#src_addr, src_port = src_addr_port
#dest_addr, dest_port = dest_addr_port
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port
def cancel_port_forward(self, address, port):
"""
Ask the server to cancel a previous port-forwarding request. No more
connections to the given address & port will be forwarded across this
ssh connection.
:param str address: the address to stop forwarding
:param int port: the port to stop forwarding
"""
if not self.active:
return
self._tcp_handler = None
self.global_request('cancel-tcpip-forward', (address, port), wait=True)
def open_sftp_client(self):
"""
Create an SFTP client channel from an open transport. On success, an
SFTP session will be opened with the remote host, and a new
`.SFTPClient` object will be returned.
:return:
a new `.SFTPClient` referring to an sftp session (channel) across
this transport
"""
return SFTPClient.from_transport(self)
def send_ignore(self, byte_count=None):
"""
Send a junk packet across the encrypted link. This is sometimes used
to add "noise" to a connection to confuse would-be attackers. It can
also be used as a keep-alive for long lived connections traversing
firewalls.
:param int byte_count:
the number of random bytes to send in the payload of the ignored
packet -- defaults to a random number from 10 to 41.
"""
m = Message()
m.add_byte(cMSG_IGNORE)
if byte_count is None:
byte_count = (byte_ord(os.urandom(1)) % 32) + 10
m.add_bytes(os.urandom(byte_count))
self._send_user_message(m)
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
:raises SSHException: if the key renegotiation failed (which causes the
session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if self.completion_event.is_set():
break
return
def set_keepalive(self, interval):
"""
Turn on/off keepalive packets (default is off). If this is set, after
``interval`` seconds without sending any data over the connection, a
"keepalive" packet will be sent (and ignored by the remote host). This
can be useful to keep connections alive over a NAT, for example.
:param int interval:
seconds to wait before sending a keepalive packet (or
0 to disable keepalives).
"""
self.packetizer.set_keepalive(interval,
lambda x=weakref.proxy(self): x.global_request('keepalive@lag.net', wait=False))
def global_request(self, kind, data=None, wait=True):
"""
Make a global request to the remote host. These are normally
extensions to the SSH2 protocol.
:param str kind: name of the request.
:param tuple data:
an optional tuple containing additional data to attach to the
request.
:param bool wait:
``True`` if this method should not return until a response is
received; ``False`` otherwise.
:return:
a `.Message` containing possible additional data if the request was
successful (or an empty `.Message` if ``wait`` was ``False``);
``None`` if the request was denied.
"""
if wait:
self.completion_event = threading.Event()
m = Message()
m.add_byte(cMSG_GLOBAL_REQUEST)
m.add_string(kind)
m.add_boolean(wait)
if data is not None:
m.add(*data)
self._log(DEBUG, 'Sending global request "%s"' % kind)
self._send_user_message(m)
if not wait:
return None
while True:
self.completion_event.wait(0.1)
if not self.active:
return None
if self.completion_event.is_set():
break
return self.global_response
def accept(self, timeout=None):
"""
Return the next channel opened by the client over this transport, in
server mode. If no channel is opened before the given timeout, ``None``
is returned.
:param int timeout:
seconds to wait for a channel, or ``None`` to wait forever
:return: a new `.Channel` opened by the client
"""
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
# timeout
chan = None
finally:
self.lock.release()
return chan
def connect(self, hostkey=None, username='', password=None, pkey=None,
gss_host=None, gss_auth=False, gss_kex=False, gss_deleg_creds=True):
"""
Negotiate an SSH2 session, and optionally verify the server's host key
and authenticate using a password or private key. This is a shortcut
for `start_client`, `get_remote_server_key`, and
`Transport.auth_password` or `Transport.auth_publickey`. Use those
methods if you want more control.
You can use this method immediately after creating a Transport to
negotiate encryption with a server. If it fails, an exception will be
thrown. On success, the method will return cleanly, and an encrypted
session exists. You may immediately call `open_channel` or
`open_session` to get a `.Channel` object, which is used for data
transfer.
.. note::
If you fail to supply a password or private key, this method may
succeed, but a subsequent `open_channel` or `open_session` call may
fail because you haven't authenticated yet.
:param .PKey hostkey:
the host key expected from the server, or ``None`` if you don't
want to do host key verification.
:param str username: the username to authenticate as.
:param str password:
a password to use for authentication, if you want to use password
authentication; otherwise ``None``.
:param .PKey pkey:
a private key to use for authentication, if you want to use private
key authentication; otherwise ``None``.
:param str gss_host:
The target's name in the kerberos database. Default: hostname
:param bool gss_auth:
``True`` if you want to use GSS-API authentication.
:param bool gss_kex:
Perform GSS-API Key Exchange and user authentication.
:param bool gss_deleg_creds:
Whether to delegate GSS-API client credentials.
:raises SSHException: if the SSH2 negotiation fails, the host key
supplied by the server is incorrect, or authentication fails.
"""
if hostkey is not None:
self._preferred_keys = [hostkey.get_name()]
self.start_client()
# check host key if we were given one
# If GSS-API Key Exchange was performed, we are not required to check
# the host key.
if (hostkey is not None) and not gss_kex:
key = self.get_remote_server_key()
if (key.get_name() != hostkey.get_name()) or (key.asbytes() != hostkey.asbytes()):
self._log(DEBUG, 'Bad host key from server')
self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(hostkey.asbytes())))
self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(key.asbytes())))
raise SSHException('Bad host key from server')
self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name())
if (pkey is not None) or (password is not None) or gss_auth or gss_kex:
if gss_auth:
self._log(DEBUG, 'Attempting GSS-API auth... (gssapi-with-mic)')
self.auth_gssapi_with_mic(username, gss_host, gss_deleg_creds)
elif gss_kex:
self._log(DEBUG, 'Attempting GSS-API auth... (gssapi-keyex)')
self.auth_gssapi_keyex(username)
elif pkey is not None:
self._log(DEBUG, 'Attempting public-key auth...')
self.auth_publickey(username, pkey)
else:
self._log(DEBUG, 'Attempting password auth...')
self.auth_password(username, password)
return
def get_exception(self):
"""
Return any exception that happened during the last server request.
This can be used to fetch more specific error information after using
calls like `start_client`. The exception (if any) is cleared after
this call.
:return:
an exception, or ``None`` if there is no stored exception.
.. versionadded:: 1.1
"""
self.lock.acquire()
try:
e = self.saved_exception
self.saved_exception = None
return e
finally:
self.lock.release()
def set_subsystem_handler(self, name, handler, *larg, **kwarg):
"""
Set the handler class for a subsystem in server mode. If a request
for this subsystem is made on an open ssh channel later, this handler
will be constructed and called -- see `.SubsystemHandler` for more
detailed documentation.
Any extra parameters (including keyword arguments) are saved and
passed to the `.SubsystemHandler` constructor later.
:param str name: name of the subsystem.
:param class handler:
subclass of `.SubsystemHandler` that handles this subsystem.
"""
try:
self.lock.acquire()
self.subsystem_table[name] = (handler, larg, kwarg)
finally:
self.lock.release()
def is_authenticated(self):
"""
Return true if this session is active and authenticated.
:return:
True if the session is still open and has been authenticated
successfully; False if authentication failed and/or the session is
closed.
"""
return self.active and (self.auth_handler is not None) and self.auth_handler.is_authenticated()
def get_username(self):
"""
Return the username this connection is authenticated for. If the
session is not authenticated (or authentication failed), this method
returns ``None``.
:return: username that was authenticated (a `str`), or ``None``.
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.get_username()
def get_banner(self):
"""
Return the banner supplied by the server upon connect. If no banner is
supplied, this method returns ``None``.
:returns: server supplied banner (`str`), or ``None``.
.. versionadded:: 1.13
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.banner
def auth_none(self, username):
"""
Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
`.BadAuthenticationType` exception raised.
:param str username: the username to authenticate as
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if "none" authentication isn't allowed
by the server for this user
:raises SSHException: if the authentication failed due to a network
error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_password(self, username, password, event=None, fallback=True):
"""
Authenticate to the server using a password. The username and password
are sent over an encrypted link.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
Since 1.5, if no event is passed and ``fallback`` is ``True`` (the
default), if the server doesn't support plain password authentication
but does support so-called "keyboard-interactive" mode, an attempt
will be made to authenticate using this interactive mode. If it fails,
the normal exception will be thrown as if the attempt had never been
made. This is useful for some recent Gentoo and Debian distributions,
which turn off plain password authentication in a misguided belief
that interactive authentication is "more secure". (It's not.)
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param basestring password: the password to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:param bool fallback:
``True`` if an attempt at an automated "interactive" password auth
should be made if the server doesn't support normal password auth
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if password authentication isn't
allowed by the server for this user (and no event was passed in)
:raises AuthenticationException: if the authentication failed (and no
event was passed in)
:raises SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to send the password unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_password(username, password, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType as e:
# if password auth isn't allowed, but keyboard-interactive *is*, try to fudge it
if not fallback or ('keyboard-interactive' not in e.allowed_types):
raise
try:
def handler(title, instructions, fields):
if len(fields) > 1:
raise SSHException('Fallback authentication failed.')
if len(fields) == 0:
# for some reason, at least on os x, a 2nd request will
# be made with zero fields requested. maybe it's just
# to try to fake out automated scripting of the exact
# type we're doing here. *shrug* :)
return []
return [password]
return self.auth_interactive(username, handler)
except SSHException:
# attempt failed; just raise the original exception
raise e
def auth_publickey(self, username, key, event=None):
"""
Authenticate to the server using a private key. The key is used to
sign data from the server, so it must include the private part.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param .PKey key: the private key to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user (and no event was passed in)
:raises AuthenticationException: if the authentication failed (and no
event was passed in)
:raises SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_publickey(username, key, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
return self.auth_handler.wait_for_response(my_event)
def auth_interactive(self, username, handler, submethods=''):
"""
Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: ``handler(title, instructions, prompt_list)``. The ``title`` is
meant to be a dialog-window title, and the ``instructions`` are user
instructions (both are strings). ``prompt_list`` will be a list of
prompts, each prompt being a tuple of ``(str, bool)``. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
``handler('title', 'instructions', [('Password:', False)])``.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param callable handler: a handler for responding to server questions
:param str submethods: a string list of desired submethods (optional)
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty).
:raises BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user
:raises AuthenticationException: if the authentication failed
:raises SSHException: if there was a network error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(username, handler, my_event, submethods)
return self.auth_handler.wait_for_response(my_event)
def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds):
"""
Authenticate to the Server using GSS-API / SSPI.
:param str username: The username to authenticate as
:param str gss_host: The target host
:param bool gss_deleg_creds: Delegate credentials or not
:return: list of auth types permissible for the next stage of
authentication (normally empty)
:rtype: list
:raise BadAuthenticationType: if gssapi-with-mic isn't
allowed by the server (and no event was passed in)
:raise AuthenticationException: if the authentication failed (and no
event was passed in)
:raise SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_with_mic(username, gss_host, gss_deleg_creds, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_gssapi_keyex(self, username):
"""
Authenticate to the Server with GSS-API / SSPI if GSS-API Key Exchange
was the used key exchange method.
:param str username: The username to authenticate as
:param str gss_host: The target host
:param bool gss_deleg_creds: Delegate credentials or not
:return: list of auth types permissible for the next stage of
authentication (normally empty)
:rtype: list
:raise BadAuthenticationType: if GSS-API Key Exchange was not performed
(and no event was passed in)
:raise AuthenticationException: if the authentication failed (and no
event was passed in)
:raise SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_keyex(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def set_log_channel(self, name):
"""
Set the channel for this transport's logging. The default is
``"paramiko.transport"`` but it can be set to anything you want. (See
the `.logging` module for more info.) SSH Channels will log to a
sub-channel of the one specified.
:param str name: new channel name for logging
.. versionadded:: 1.1
"""
self.log_name = name
self.logger = util.get_logger(name)
self.packetizer.set_log(self.logger)
def get_log_channel(self):
"""
Return the channel name used for this transport's logging.
:return: channel name as a `str`
.. versionadded:: 1.2
"""
return self.log_name
def set_hexdump(self, hexdump):
"""
Turn on/off logging a hex dump of protocol traffic at DEBUG level in
the logs. Normally you would want this off (which is the default),
but if you are debugging something, it may be useful.
:param bool hexdump:
``True`` to log protocol traffix (in hex) to the log; ``False``
otherwise.
"""
self.packetizer.set_hexdump(hexdump)
def get_hexdump(self):
"""
Return ``True`` if the transport is currently logging hex dumps of
protocol traffic.
:return: ``True`` if hex dumps are being logged, else ``False``.
.. versionadded:: 1.4
"""
return self.packetizer.get_hexdump()
def use_compression(self, compress=True):
"""
Turn on/off compression. This will only have an affect before starting
the transport (ie before calling `connect`, etc). By default,
compression is off since it negatively affects interactive sessions.
:param bool compress:
``True`` to ask the remote client/server to compress traffic;
``False`` to refuse compression
.. versionadded:: 1.5.2
"""
if compress:
self._preferred_compression = ('zlib@openssh.com', 'zlib', 'none')
else:
self._preferred_compression = ('none',)
def getpeername(self):
"""
Return the address of the remote side of this Transport, if possible.
This is effectively a wrapper around ``'getpeername'`` on the underlying
socket. If the socket-like object has no ``'getpeername'`` method,
then ``("unknown", 0)`` is returned.
:return:
the address of the remote host, if known, as a ``(str, int)``
tuple.
"""
gp = getattr(self.sock, 'getpeername', None)
if gp is None:
return 'unknown', 0
return gp()
def stop_thread(self):
self.active = False
self.packetizer.close()
while self.is_alive() and (self is not threading.current_thread()):
self.join(10)
### internals...
def _log(self, level, msg, *args):
if issubclass(type(msg), list):
for m in msg:
self.logger.log(level, m)
else:
self.logger.log(level, msg, *args)
def _get_modulus_pack(self):
"""used by KexGex to find primes for group exchange"""
return self._modulus_pack
def _next_channel(self):
"""you are holding the lock"""
chanid = self._channel_counter
while self._channels.get(chanid) is not None:
self._channel_counter = (self._channel_counter + 1) & 0xffffff
chanid = self._channel_counter
self._channel_counter = (self._channel_counter + 1) & 0xffffff
return chanid
def _unlink_channel(self, chanid):
"""used by a Channel to remove itself from the active channel list"""
self._channels.delete(chanid)
def _send_message(self, data):
self.packetizer.send_message(data)
def _send_user_message(self, data):
"""
send a message, but block if we're in key negotiation. this is used
for user-initiated requests.
"""
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(DEBUG, 'Dropping user packet because connection is dead.')
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.is_set():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException('Key-exchange timed out waiting for key negotiation')
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release()
def _set_K_H(self, k, h):
"""used by a kex object to set the K (root key) and H (exchange hash)"""
self.K = k
self.H = h
if self.session_id is None:
self.session_id = h
def _expect_packet(self, *ptypes):
"""used by a kex object to register the next packet type it expects to see"""
self._expected_packet = tuple(ptypes)
def _verify_key(self, host_key, sig):
key = self._key_info[self.host_key_type](Message(host_key))
if key is None:
raise SSHException('Unknown host key type')
if not key.verify_ssh_sig(self.H, Message(sig)):
raise SSHException('Signature verification (%s) failed.' % self.host_key_type)
self.host_key = key
def _compute_key(self, id, nbytes):
"""id is 'A' - 'F' for the various keys used by ssh"""
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(b(id))
m.add_bytes(self.session_id)
out = sofar = sha1(m.asbytes()).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = sha1(m.asbytes()).digest()
out += digest
sofar += digest
return out[:nbytes]
def _get_cipher(self, name, key, iv):
if name not in self._cipher_info:
raise SSHException('Unknown client cipher ' + name)
if name in ('arcfour128', 'arcfour256'):
# arcfour cipher
cipher = self._cipher_info[name]['class'].new(key)
# as per RFC 4345, the first 1536 bytes of keystream
# generated by the cipher MUST be discarded
cipher.encrypt(" " * 1536)
return cipher
elif name.endswith("-ctr"):
# CTR modes, we need a counter
counter = Counter.new(nbits=self._cipher_info[name]['block-size'] * 8, initial_value=util.inflate_long(iv, True))
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv, counter)
else:
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv)
def _set_forward_agent_handler(self, handler):
if handler is None:
def default_handler(channel):
self._queue_incoming_channel(channel)
self._forward_agent_handler = default_handler
else:
self._forward_agent_handler = handler
def _set_x11_handler(self, handler):
# only called if a channel has turned on x11 forwarding
if handler is None:
# by default, use the same mechanism as accept()
def default_handler(channel, src_addr_port):
self._queue_incoming_channel(channel)
self._x11_handler = default_handler
else:
self._x11_handler = handler
def _queue_incoming_channel(self, channel):
self.lock.acquire()
try:
self.server_accepts.append(channel)
self.server_accept_cv.notify()
finally:
self.lock.release()
def _sanitize_window_size(self, window_size):
if window_size is None:
window_size = self.default_window_size
return clamp_value(MIN_WINDOW_SIZE, window_size, MAX_WINDOW_SIZE)
def _sanitize_packet_size(self, max_packet_size):
if max_packet_size is None:
max_packet_size = self.default_max_packet_size
return clamp_value(MIN_PACKET_SIZE, max_packet_size, MAX_WINDOW_SIZE)
def run(self):
# (use the exposed "run" method, because if we specify a thread target
# of a private method, threading.Thread will keep a reference to it
# indefinitely, creating a GC cycle and not letting Transport ever be
# GC'd. it's a bug in Thread.)
# Hold reference to 'sys' so we can test sys.modules to detect
# interpreter shutdown.
self.sys = sys
# active=True occurs before the thread is launched, to avoid a race
_active_threads.append(self)
if self.server_mode:
self._log(DEBUG, 'starting thread (server mode): %s' % hex(long(id(self)) & xffffffff))
else:
self._log(DEBUG, 'starting thread (client mode): %s' % hex(long(id(self)) & xffffffff))
try:
try:
self.packetizer.write_all(b(self.local_version + '\r\n'))
self._check_banner()
self._send_kex_init()
self._expect_packet(MSG_KEXINIT)
while self.active:
if self.packetizer.need_rekey() and not self.in_kex:
self._send_kex_init()
try:
ptype, m = self.packetizer.read_message()
except NeedRekeyException:
continue
if ptype == MSG_IGNORE:
continue
elif ptype == MSG_DISCONNECT:
self._parse_disconnect(m)
self.active = False
self.packetizer.close()
break
elif ptype == MSG_DEBUG:
self._parse_debug(m)
continue
if len(self._expected_packet) > 0:
if ptype not in self._expected_packet:
raise SSHException('Expecting packet from %r, got %d' % (self._expected_packet, ptype))
self._expected_packet = tuple()
if (ptype >= 30) and (ptype <= 41):
self.kex_engine.parse_next(ptype, m)
continue
if ptype in self._handler_table:
self._handler_table[ptype](self, m)
elif ptype in self._channel_handler_table:
chanid = m.get_int()
chan = self._channels.get(chanid)
if chan is not None:
self._channel_handler_table[ptype](chan, m)
elif chanid in self.channels_seen:
self._log(DEBUG, 'Ignoring message for dead channel %d' % chanid)
else:
self._log(ERROR, 'Channel request for unknown channel %d' % chanid)
self.active = False
self.packetizer.close()
elif (self.auth_handler is not None) and (ptype in self.auth_handler._handler_table):
self.auth_handler._handler_table[ptype](self.auth_handler, m)
else:
self._log(WARNING, 'Oops, unhandled type %d' % ptype)
msg = Message()
msg.add_byte(cMSG_UNIMPLEMENTED)
msg.add_int(m.seqno)
self._send_message(msg)
except SSHException as e:
self._log(ERROR, 'Exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
except EOFError as e:
self._log(DEBUG, 'EOF in transport thread')
#self._log(DEBUG, util.tb_strings())
self.saved_exception = e
except socket.error as e:
if type(e.args) is tuple:
if e.args:
emsg = '%s (%d)' % (e.args[1], e.args[0])
else: # empty tuple, e.g. socket.timeout
emsg = str(e) or repr(e)
else:
emsg = e.args
self._log(ERROR, 'Socket exception: ' + emsg)
self.saved_exception = e
except Exception as e:
self._log(ERROR, 'Unknown exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
_active_threads.remove(self)
for chan in list(self._channels.values()):
chan._unlink()
if self.active:
self.active = False
self.packetizer.close()
if self.completion_event is not None:
self.completion_event.set()
if self.auth_handler is not None:
self.auth_handler.abort()
for event in self.channel_events.values():
event.set()
try:
self.lock.acquire()
self.server_accept_cv.notify()
finally:
self.lock.release()
self.sock.close()
except:
# Don't raise spurious 'NoneType has no attribute X' errors when we
# wake up during interpreter shutdown. Or rather -- raise
# everything *if* sys.modules (used as a convenient sentinel)
# appears to still exist.
if self.sys.modules is not None:
raise
### protocol stages
def _negotiate_keys(self, m):
# throws SSHException on anything unusual
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
if self.local_kex_init is None:
# remote side wants to renegotiate
self._send_kex_init()
self._parse_kex_init(m)
self.kex_engine.start_kex()
def _check_banner(self):
# this is slow, but we only have to do it once
for i in range(100):
# give them 15 seconds for the first line, then just 2 seconds
# each additional line. (some sites have very high latency.)
if i == 0:
timeout = self.banner_timeout
else:
timeout = 2
try:
buf = self.packetizer.readline(timeout)
except ProxyCommandFailure:
raise
except Exception as e:
raise SSHException('Error reading SSH protocol banner' + str(e))
if buf[:4] == 'SSH-':
break
self._log(DEBUG, 'Banner: ' + buf)
if buf[:4] != 'SSH-':
raise SSHException('Indecipherable protocol version "' + buf + '"')
# save this server version string for later
self.remote_version = buf
# pull off any attached comment
comment = ''
i = buf.find(' ')
if i >= 0:
comment = buf[i+1:]
buf = buf[:i]
# parse out version string and make sure it matches
segs = buf.split('-', 2)
if len(segs) < 3:
raise SSHException('Invalid SSH banner')
version = segs[1]
client = segs[2]
if version != '1.99' and version != '2.0':
raise SSHException('Incompatible version (%s instead of 2.0)' % (version,))
self._log(INFO, 'Connected (version %s, client %s)' % (version, client))
def _send_kex_init(self):
"""
announce to the other side that we'd like to negotiate keys, and what
kind of key negotiation we support.
"""
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
self.in_kex = True
if self.server_mode:
if (self._modulus_pack is None) and ('diffie-hellman-group-exchange-sha1' in self._preferred_kex):
# can't do group-exchange if we don't have a pack of potential primes
pkex = list(self.get_security_options().kex)
pkex.remove('diffie-hellman-group-exchange-sha1')
self.get_security_options().kex = pkex
available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__,
self._preferred_keys))
else:
available_server_keys = self._preferred_keys
m = Message()
m.add_byte(cMSG_KEXINIT)
m.add_bytes(os.urandom(16))
m.add_list(self._preferred_kex)
m.add_list(available_server_keys)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_compression)
m.add_list(self._preferred_compression)
m.add_string(bytes())
m.add_string(bytes())
m.add_boolean(False)
m.add_int(0)
# save a copy for later (needed to compute a hash)
self.local_kex_init = m.asbytes()
self._send_message(m)
def _parse_kex_init(self, m):
cookie = m.get_bytes(16)
kex_algo_list = m.get_list()
server_key_algo_list = m.get_list()
client_encrypt_algo_list = m.get_list()
server_encrypt_algo_list = m.get_list()
client_mac_algo_list = m.get_list()
server_mac_algo_list = m.get_list()
client_compress_algo_list = m.get_list()
server_compress_algo_list = m.get_list()
client_lang_list = m.get_list()
server_lang_list = m.get_list()
kex_follows = m.get_boolean()
unused = m.get_int()
self._log(DEBUG, 'kex algos:' + str(kex_algo_list) + ' server key:' + str(server_key_algo_list) +
' client encrypt:' + str(client_encrypt_algo_list) +
' server encrypt:' + str(server_encrypt_algo_list) +
' client mac:' + str(client_mac_algo_list) +
' server mac:' + str(server_mac_algo_list) +
' client compress:' + str(client_compress_algo_list) +
' server compress:' + str(server_compress_algo_list) +
' client lang:' + str(client_lang_list) +
' server lang:' + str(server_lang_list) +
' kex follows?' + str(kex_follows))
# as a server, we pick the first item in the client's list that we support.
# as a client, we pick the first item in our list that the server supports.
if self.server_mode:
agreed_kex = list(filter(self._preferred_kex.__contains__, kex_algo_list))
else:
agreed_kex = list(filter(kex_algo_list.__contains__, self._preferred_kex))
if len(agreed_kex) == 0:
raise SSHException('Incompatible ssh peer (no acceptable kex algorithm)')
self.kex_engine = self._kex_info[agreed_kex[0]](self)
if self.server_mode:
available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__,
self._preferred_keys))
agreed_keys = list(filter(available_server_keys.__contains__, server_key_algo_list))
else:
agreed_keys = list(filter(server_key_algo_list.__contains__, self._preferred_keys))
if len(agreed_keys) == 0:
raise SSHException('Incompatible ssh peer (no acceptable host key)')
self.host_key_type = agreed_keys[0]
if self.server_mode and (self.get_server_key() is None):
raise SSHException('Incompatible ssh peer (can\'t match requested host key type)')
if self.server_mode:
agreed_local_ciphers = list(filter(self._preferred_ciphers.__contains__,
server_encrypt_algo_list))
agreed_remote_ciphers = list(filter(self._preferred_ciphers.__contains__,
client_encrypt_algo_list))
else:
agreed_local_ciphers = list(filter(client_encrypt_algo_list.__contains__,
self._preferred_ciphers))
agreed_remote_ciphers = list(filter(server_encrypt_algo_list.__contains__,
self._preferred_ciphers))
if (len(agreed_local_ciphers) == 0) or (len(agreed_remote_ciphers) == 0):
raise SSHException('Incompatible ssh server (no acceptable ciphers)')
self.local_cipher = agreed_local_ciphers[0]
self.remote_cipher = agreed_remote_ciphers[0]
self._log(DEBUG, 'Ciphers agreed: local=%s, remote=%s' % (self.local_cipher, self.remote_cipher))
if self.server_mode:
agreed_remote_macs = list(filter(self._preferred_macs.__contains__, client_mac_algo_list))
agreed_local_macs = list(filter(self._preferred_macs.__contains__, server_mac_algo_list))
else:
agreed_local_macs = list(filter(client_mac_algo_list.__contains__, self._preferred_macs))
agreed_remote_macs = list(filter(server_mac_algo_list.__contains__, self._preferred_macs))
if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
raise SSHException('Incompatible ssh server (no acceptable macs)')
self.local_mac = agreed_local_macs[0]
self.remote_mac = agreed_remote_macs[0]
if self.server_mode:
agreed_remote_compression = list(filter(self._preferred_compression.__contains__, client_compress_algo_list))
agreed_local_compression = list(filter(self._preferred_compression.__contains__, server_compress_algo_list))
else:
agreed_local_compression = list(filter(client_compress_algo_list.__contains__, self._preferred_compression))
agreed_remote_compression = list(filter(server_compress_algo_list.__contains__, self._preferred_compression))
if (len(agreed_local_compression) == 0) or (len(agreed_remote_compression) == 0):
raise SSHException('Incompatible ssh server (no acceptable compression) %r %r %r' % (agreed_local_compression, agreed_remote_compression, self._preferred_compression))
self.local_compression = agreed_local_compression[0]
self.remote_compression = agreed_remote_compression[0]
self._log(DEBUG, 'using kex %s; server key type %s; cipher: local %s, remote %s; mac: local %s, remote %s; compression: local %s, remote %s' %
(agreed_kex[0], self.host_key_type, self.local_cipher, self.remote_cipher, self.local_mac,
self.remote_mac, self.local_compression, self.remote_compression))
# save for computing hash later...
# now wait! openssh has a bug (and others might too) where there are
# actually some extra bytes (one NUL byte in openssh's case) added to
# the end of the packet but not parsed. turns out we need to throw
# away those bytes because they aren't part of the hash.
self.remote_kex_init = cMSG_KEXINIT + m.get_so_far()
def _activate_inbound(self):
"""switch on newly negotiated encryption parameters for inbound traffic"""
block_size = self._cipher_info[self.remote_cipher]['block-size']
if self.server_mode:
IV_in = self._compute_key('A', block_size)
key_in = self._compute_key('C', self._cipher_info[self.remote_cipher]['key-size'])
else:
IV_in = self._compute_key('B', block_size)
key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size'])
engine = self._get_cipher(self.remote_cipher, key_in, IV_in)
mac_size = self._mac_info[self.remote_mac]['size']
mac_engine = self._mac_info[self.remote_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('E', mac_engine().digest_size)
else:
mac_key = self._compute_key('F', mac_engine().digest_size)
self.packetizer.set_inbound_cipher(engine, block_size, mac_engine, mac_size, mac_key)
compress_in = self._compression_info[self.remote_compression][1]
if (compress_in is not None) and ((self.remote_compression != 'zlib@openssh.com') or self.authenticated):
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _activate_outbound(self):
"""switch on newly negotiated encryption parameters for outbound traffic"""
m = Message()
m.add_byte(cMSG_NEWKEYS)
self._send_message(m)
block_size = self._cipher_info[self.local_cipher]['block-size']
if self.server_mode:
IV_out = self._compute_key('B', block_size)
key_out = self._compute_key('D', self._cipher_info[self.local_cipher]['key-size'])
else:
IV_out = self._compute_key('A', block_size)
key_out = self._compute_key('C', self._cipher_info[self.local_cipher]['key-size'])
engine = self._get_cipher(self.local_cipher, key_out, IV_out)
mac_size = self._mac_info[self.local_mac]['size']
mac_engine = self._mac_info[self.local_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('F', mac_engine().digest_size)
else:
mac_key = self._compute_key('E', mac_engine().digest_size)
sdctr = self.local_cipher.endswith('-ctr')
self.packetizer.set_outbound_cipher(engine, block_size, mac_engine, mac_size, mac_key, sdctr)
compress_out = self._compression_info[self.local_compression][0]
if (compress_out is not None) and ((self.local_compression != 'zlib@openssh.com') or self.authenticated):
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if not self.packetizer.need_rekey():
self.in_kex = False
# we always expect to receive NEWKEYS now
self._expect_packet(MSG_NEWKEYS)
def _auth_trigger(self):
self.authenticated = True
# delayed initiation of compression
if self.local_compression == 'zlib@openssh.com':
compress_out = self._compression_info[self.local_compression][0]
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if self.remote_compression == 'zlib@openssh.com':
compress_in = self._compression_info[self.remote_compression][1]
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _parse_newkeys(self, m):
self._log(DEBUG, 'Switch to new keys ...')
self._activate_inbound()
# can also free a bunch of stuff here
self.local_kex_init = self.remote_kex_init = None
self.K = None
self.kex_engine = None
if self.server_mode and (self.auth_handler is None):
# create auth handler for server mode
self.auth_handler = AuthHandler(self)
if not self.initial_kex_done:
# this was the first key exchange
self.initial_kex_done = True
# send an event?
if self.completion_event is not None:
self.completion_event.set()
# it's now okay to send data again (if this was a re-key)
if not self.packetizer.need_rekey():
self.in_kex = False
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.set()
finally:
self.clear_to_send_lock.release()
return
def _parse_disconnect(self, m):
code = m.get_int()
desc = m.get_text()
self._log(INFO, 'Disconnect (code %d): %s' % (code, desc))
def _parse_global_request(self, m):
kind = m.get_text()
self._log(DEBUG, 'Received global request "%s"' % kind)
want_reply = m.get_boolean()
if not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" global request from server.' % kind)
ok = False
elif kind == 'tcpip-forward':
address = m.get_text()
port = m.get_int()
ok = self.server_object.check_port_forward_request(address, port)
if ok:
ok = (ok,)
elif kind == 'cancel-tcpip-forward':
address = m.get_text()
port = m.get_int()
self.server_object.cancel_port_forward_request(address, port)
ok = True
else:
ok = self.server_object.check_global_request(kind, m)
extra = ()
if type(ok) is tuple:
extra = ok
ok = True
if want_reply:
msg = Message()
if ok:
msg.add_byte(cMSG_REQUEST_SUCCESS)
msg.add(*extra)
else:
msg.add_byte(cMSG_REQUEST_FAILURE)
self._send_message(msg)
def _parse_request_success(self, m):
self._log(DEBUG, 'Global request successful.')
self.global_response = m
if self.completion_event is not None:
self.completion_event.set()
def _parse_request_failure(self, m):
self._log(DEBUG, 'Global request denied.')
self.global_response = None
if self.completion_event is not None:
self.completion_event.set()
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
self._log(WARNING, 'Success for unrequested channel! [??]')
return
self.lock.acquire()
try:
chan._set_remote_channel(server_chanid, server_window_size, server_max_packet_size)
self._log(DEBUG, 'Secsh channel %d opened.' % chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open_failure(self, m):
chanid = m.get_int()
reason = m.get_int()
reason_str = m.get_text()
lang = m.get_text()
reason_text = CONNECTION_FAILED_CODE.get(reason, '(unknown code)')
self._log(ERROR, 'Secsh channel %d open FAILED: %s: %s' % (chanid, reason_str, reason_text))
self.lock.acquire()
try:
self.saved_exception = ChannelException(reason, reason_text)
if chanid in self.channel_events:
self._channels.delete(chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open(self, m):
kind = m.get_text()
chanid = m.get_int()
initial_window_size = m.get_int()
max_packet_size = m.get_int()
reject = False
if (kind == 'auth-agent@openssh.com') and (self._forward_agent_handler is not None):
self._log(DEBUG, 'Incoming forward agent connection')
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'x11') and (self._x11_handler is not None):
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming x11 connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'forwarded-tcpip') and (self._tcp_handler is not None):
server_addr = m.get_text()
server_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming tcp forwarded connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" channel request from server.' % kind)
reject = True
reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
else:
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
if kind == 'direct-tcpip':
# handle direct-tcpip requests comming from the client
dest_addr = m.get_text()
dest_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
reason = self.server_object.check_channel_direct_tcpip_request(
my_chanid, (origin_addr, origin_port), (dest_addr, dest_port))
else:
reason = self.server_object.check_channel_request(kind, my_chanid)
if reason != OPEN_SUCCEEDED:
self._log(DEBUG, 'Rejecting "%s" channel request from client.' % kind)
reject = True
if reject:
msg = Message()
msg.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
msg.add_int(chanid)
msg.add_int(reason)
msg.add_string('')
msg.add_string('en')
self._send_message(msg)
return
chan = Channel(my_chanid)
self.lock.acquire()
try:
self._channels.put(my_chanid, chan)
self.channels_seen[my_chanid] = True
chan._set_transport(self)
chan._set_window(self.default_window_size, self.default_max_packet_size)
chan._set_remote_channel(chanid, initial_window_size, max_packet_size)
finally:
self.lock.release()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN_SUCCESS)
m.add_int(chanid)
m.add_int(my_chanid)
m.add_int(self.default_window_size)
m.add_int(self.default_max_packet_size)
self._send_message(m)
self._log(DEBUG, 'Secsh channel %d (%s) opened.', my_chanid, kind)
if kind == 'auth-agent@openssh.com':
self._forward_agent_handler(chan)
elif kind == 'x11':
self._x11_handler(chan, (origin_addr, origin_port))
elif kind == 'forwarded-tcpip':
chan.origin_addr = (origin_addr, origin_port)
self._tcp_handler(chan, (origin_addr, origin_port), (server_addr, server_port))
else:
self._queue_incoming_channel(chan)
def _parse_debug(self, m):
always_display = m.get_boolean()
msg = m.get_string()
lang = m.get_string()
self._log(DEBUG, 'Debug msg: {0}'.format(util.safe_string(msg)))
def _get_subsystem_handler(self, name):
try:
self.lock.acquire()
if name not in self.subsystem_table:
return None, [], {}
return self.subsystem_table[name]
finally:
self.lock.release()
_handler_table = {
MSG_NEWKEYS: _parse_newkeys,
MSG_GLOBAL_REQUEST: _parse_global_request,
MSG_REQUEST_SUCCESS: _parse_request_success,
MSG_REQUEST_FAILURE: _parse_request_failure,
MSG_CHANNEL_OPEN_SUCCESS: _parse_channel_open_success,
MSG_CHANNEL_OPEN_FAILURE: _parse_channel_open_failure,
MSG_CHANNEL_OPEN: _parse_channel_open,
MSG_KEXINIT: _negotiate_keys,
}
_channel_handler_table = {
MSG_CHANNEL_SUCCESS: Channel._request_success,
MSG_CHANNEL_FAILURE: Channel._request_failed,
MSG_CHANNEL_DATA: Channel._feed,
MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
MSG_CHANNEL_REQUEST: Channel._handle_request,
MSG_CHANNEL_EOF: Channel._handle_eof,
MSG_CHANNEL_CLOSE: Channel._handle_close,
}
class SecurityOptions (object):
"""
Simple object containing the security preferences of an ssh transport.
These are tuples of acceptable ciphers, digests, key types, and key
exchange algorithms, listed in order of preference.
Changing the contents and/or order of these fields affects the underlying
`.Transport` (but only if you change them before starting the session).
If you try to add an algorithm that paramiko doesn't recognize,
``ValueError`` will be raised. If you try to assign something besides a
tuple to one of the fields, ``TypeError`` will be raised.
"""
#__slots__ = [ 'ciphers', 'digests', 'key_types', 'kex', 'compression', '_transport' ]
__slots__ = '_transport'
def __init__(self, transport):
self._transport = transport
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
return '<paramiko.SecurityOptions for %s>' % repr(self._transport)
def _set(self, name, orig, x):
if type(x) is list:
x = tuple(x)
if type(x) is not tuple:
raise TypeError('expected tuple or list')
possible = list(getattr(self._transport, orig).keys())
forbidden = [n for n in x if n not in possible]
if len(forbidden) > 0:
raise ValueError('unknown cipher')
setattr(self._transport, name, x)
@property
def ciphers(self):
"""Symmetric encryption ciphers"""
return self._transport._preferred_ciphers
@ciphers.setter
def ciphers(self, x):
self._set('_preferred_ciphers', '_cipher_info', x)
@property
def digests(self):
"""Digest (one-way hash) algorithms"""
return self._transport._preferred_macs
@digests.setter
def digests(self, x):
self._set('_preferred_macs', '_mac_info', x)
@property
def key_types(self):
"""Public-key algorithms"""
return self._transport._preferred_keys
@key_types.setter
def key_types(self, x):
self._set('_preferred_keys', '_key_info', x)
@property
def kex(self):
"""Key exchange algorithms"""
return self._transport._preferred_kex
@kex.setter
def kex(self, x):
self._set('_preferred_kex', '_kex_info', x)
@property
def compression(self):
"""Compression algorithms"""
return self._transport._preferred_compression
@compression.setter
def compression(self, x):
self._set('_preferred_compression', '_compression_info', x)
class ChannelMap (object):
def __init__(self):
# (id -> Channel)
self._map = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def put(self, chanid, chan):
self._lock.acquire()
try:
self._map[chanid] = chan
finally:
self._lock.release()
def get(self, chanid):
self._lock.acquire()
try:
return self._map.get(chanid, None)
finally:
self._lock.release()
def delete(self, chanid):
self._lock.acquire()
try:
try:
del self._map[chanid]
except KeyError:
pass
finally:
self._lock.release()
def values(self):
self._lock.acquire()
try:
return list(self._map.values())
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self._map)
finally:
self._lock.release()
| mit |
nojhan/weboob-devel | modules/seloger/test.py | 4 | 1349 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import itertools
from weboob.capabilities.housing import Query
from weboob.tools.test import BackendTest
class SeLogerTest(BackendTest):
MODULE = 'seloger'
def test_seloger(self):
query = Query()
query.area_min = 20
query.cost_max = 1000
query.cities = []
for city in self.backend.search_city(u'Ferté'):
city.backend = self.backend.name
query.cities.append(city)
results = list(itertools.islice(self.backend.search_housings(query), 0, 20))
self.assertTrue(len(results) > 0)
self.backend.fillobj(results[0], 'phone')
| agpl-3.0 |
opencord/voltha | tests/itests/voltha/test_voltha_alarm_filters.py | 1 | 6502 | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import main
import simplejson
from google.protobuf.json_format import MessageToDict
from common.utils.consulhelpers import get_endpoint_from_consul
from tests.itests.test_utils import get_pod_ip, \
run_long_running_command_with_timeout
from tests.itests.voltha.rest_base import RestBase
from voltha.protos.device_pb2 import Device
from voltha.protos.voltha_pb2 import AlarmFilter
from testconfig import config
# ~~~~~~~ Common variables ~~~~~~~
LOCAL_CONSUL = "localhost:8500"
ENV_DOCKER_COMPOSE = 'docker-compose'
ENV_K8S_SINGLE_NODE = 'k8s-single-node'
orch_env = ENV_DOCKER_COMPOSE
if 'test_parameters' in config and 'orch_env' in config['test_parameters']:
orch_env = config['test_parameters']['orch_env']
print 'orchestration-environment: %s' % orch_env
COMMANDS = dict(
kafka_client_run="kafkacat -b {} -L",
kafka_client_send_msg='echo hello | kafkacat -b {} -P -t voltha.alarms -c 1',
kafka_client_alarm_check="kafkacat -o end -b {} -C -t voltha.alarms -c 2",
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class VolthaAlarmFilterTests(RestBase):
# Get endpoint info
if orch_env == ENV_K8S_SINGLE_NODE:
rest_endpoint = get_pod_ip('voltha') + ':8443'
kafka_endpoint = get_pod_ip('kafka')
else:
rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'voltha-envoy-8443')
kafka_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'kafka')
# Construct the base_url
base_url = 'https://' + rest_endpoint
# ~~~~~~~~~~~~ Tests ~~~~~~~~~~~~
def test_1_alarm_topic_exists(self):
# Produce a message to ensure that the topic exists
cmd = COMMANDS['kafka_client_send_msg'].format(self.kafka_endpoint)
run_long_running_command_with_timeout(cmd, 5)
# We want to make sure that the topic is available on the system
expected_pattern = ['voltha.alarms']
# Start the kafka client to retrieve details on topics
cmd = COMMANDS['kafka_client_run'].format(self.kafka_endpoint)
kafka_client_output = run_long_running_command_with_timeout(cmd, 20)
# Loop through the kafka client output to find the topic
found = False
for out in kafka_client_output:
if all(ep in out for ep in expected_pattern):
found = True
break
self.assertTrue(found,
'Failed to find topic {}'.format(expected_pattern))
def test_2_alarm_generated_by_adapter(self):
# Verify that REST calls can be made
self.verify_rest()
# Create a new device
device_not_filtered = self.add_device('00:00:00:00:00:01')
device_filtered = self.add_device('00:00:00:00:00:02')
self.add_device_id_filter(device_filtered['id'])
# Activate the new device
self.activate_device(device_not_filtered['id'])
self.activate_device(device_filtered['id'])
# The simulated olt devices should start generating alarms periodically
# We should see alarms generated for the non filtered device
self.get_alarm_event(device_not_filtered['id'])
# We should not see any alarms from the filtered device
self.get_alarm_event(device_filtered['id'], True)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Make sure the Voltha REST interface is available
def verify_rest(self):
self.get('/api/v1')
# Create a new simulated device
def add_device(self, mac_address):
device = Device(
type='simulated_olt',
mac_address=mac_address
)
device = self.post('/api/v1/devices', MessageToDict(device),
expected_http_code=200)
return device
# Create a filter against a specific device id
def add_device_id_filter(self, device_id):
rules = list()
rule = dict()
# Create a filter with a single rule
rule['key'] = 'device_id'
rule['value'] = device_id
rules.append(rule)
alarm_filter = AlarmFilter(rules=rules)
alarm_filter = self.post('/api/v1/alarm_filters', MessageToDict(alarm_filter),
expected_http_code=200)
return alarm_filter
# Active the simulated device.
# This will trigger the simulation of random alarms
def activate_device(self, device_id):
path = '/api/v1/devices/{}'.format(device_id)
self.post(path + '/enable', expected_http_code=200)
device = self.get(path)
self.assertEqual(device['admin_state'], 'ENABLED')
# Retrieve a sample alarm for a specific device
def get_alarm_event(self, device_id, expect_failure=False):
cmd = COMMANDS['kafka_client_alarm_check'].format(self.kafka_endpoint)
kafka_client_output = run_long_running_command_with_timeout(cmd, 30)
# Verify the kafka client output
found = False
alarm_data = None
for out in kafka_client_output:
# Catch any error that might occur while reading the kafka messages
try:
alarm_data = simplejson.loads(out)
print alarm_data
if not alarm_data or 'resource_id' not in alarm_data:
continue
elif alarm_data['resource_id'] == device_id:
found = True
break
except Exception as e:
continue
if not expect_failure:
self.assertTrue(
found,
'Failed to find kafka alarm with device id:{}'.format(device_id))
else:
self.assertFalse(
found,
'Found a kafka alarm with device id:{}. It should have been filtered'.format(
device_id))
return alarm_data
if __name__ == '__main__':
main()
| apache-2.0 |
ryanahall/django | tests/middleware/test_security.py | 37 | 7769 | from django.http import HttpResponse
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
class SecurityMiddlewareTest(TestCase):
@property
def middleware(self):
from django.middleware.security import SecurityMiddleware
return SecurityMiddleware()
@property
def secure_request_kwargs(self):
return {"wsgi.url_scheme": "https"}
def response(self, *args, **kwargs):
headers = kwargs.pop("headers", {})
response = HttpResponse(*args, **kwargs)
for k, v in headers.items():
response[k] = v
return response
def process_response(self, *args, **kwargs):
request_kwargs = {}
if kwargs.pop("secure", False):
request_kwargs.update(self.secure_request_kwargs)
request = (kwargs.pop("request", None) or
self.request.get("/some/url", **request_kwargs))
ret = self.middleware.process_request(request)
if ret:
return ret
return self.middleware.process_response(
request, self.response(*args, **kwargs))
request = RequestFactory()
def process_request(self, method, *args, **kwargs):
if kwargs.pop("secure", False):
kwargs.update(self.secure_request_kwargs)
req = getattr(self.request, method.lower())(*args, **kwargs)
return self.middleware.process_request(req)
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_on(self):
"""
With HSTS_SECONDS=3600, the middleware adds
"strict-transport-security: max-age=3600" to the response.
"""
self.assertEqual(
self.process_response(secure=True)["strict-transport-security"],
"max-age=3600")
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_already_present(self):
"""
The middleware will not override a "strict-transport-security" header
already present in the response.
"""
response = self.process_response(
secure=True,
headers={"strict-transport-security": "max-age=7200"})
self.assertEqual(response["strict-transport-security"], "max-age=7200")
@override_settings(HSTS_SECONDS=3600)
def test_sts_only_if_secure(self):
"""
The "strict-transport-security" header is not added to responses going
over an insecure connection.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=False))
@override_settings(HSTS_SECONDS=0)
def test_sts_off(self):
"""
With HSTS_SECONDS of 0, the middleware does not add a
"strict-transport-security" header to the response.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=True))
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=True)
def test_sts_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
True, the middleware adds a "strict-transport-security" header with the
"includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(
response["strict-transport-security"],
"max-age=600; includeSubDomains",
)
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=False)
def test_sts_no_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
False, the middleware adds a "strict-transport-security" header without
the "includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(response["strict-transport-security"], "max-age=600")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_content_type_on(self):
"""
With CONTENT_TYPE_NOSNIFF set to True, the middleware adds
"x-content-type-options: nosniff" header to the response.
"""
self.assertEqual(self.process_response()["x-content-type-options"], "nosniff")
@override_settings(SECURE_CONTENT_TYPE_NO_SNIFF=True)
def test_content_type_already_present(self):
"""
The middleware will not override an "x-content-type-options" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-content-type-options": "foo"})
self.assertEqual(response["x-content-type-options"], "foo")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_content_type_off(self):
"""
With CONTENT_TYPE_NOSNIFF False, the middleware does not add an
"x-content-type-options" header to the response.
"""
self.assertNotIn("x-content-type-options", self.process_response())
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_on(self):
"""
With BROWSER_XSS_FILTER set to True, the middleware adds
"s-xss-protection: 1; mode=block" header to the response.
"""
self.assertEqual(
self.process_response()["x-xss-protection"],
"1; mode=block")
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_already_present(self):
"""
The middleware will not override an "x-xss-protection" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-xss-protection": "foo"})
self.assertEqual(response["x-xss-protection"], "foo")
@override_settings(BROWSER_XSS_FILTER=False)
def test_xss_filter_off(self):
"""
With BROWSER_XSS_FILTER set to False, the middleware does not add an
"x-xss-protection" header to the response.
"""
self.assertNotIn("x-xss-protection", self.process_response())
@override_settings(SECURE_SSL_REDIRECT=True)
def test_ssl_redirect_on(self):
"""
With SSL_REDIRECT True, the middleware redirects any non-secure
requests to the https:// version of the same URL.
"""
ret = self.process_request("get", "/some/url?query=string")
self.assertEqual(ret.status_code, 301)
self.assertEqual(
ret["Location"], "https://testserver/some/url?query=string")
@override_settings(SECURE_SSL_REDIRECT=True)
def test_no_redirect_ssl(self):
"""
The middleware does not redirect secure requests.
"""
ret = self.process_request("get", "/some/url", secure=True)
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_REDIRECT_EXEMPT=["^insecure/"])
def test_redirect_exempt(self):
"""
The middleware does not redirect requests with URL path matching an
exempt pattern.
"""
ret = self.process_request("get", "/insecure/page")
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_SSL_HOST="secure.example.com")
def test_redirect_ssl_host(self):
"""
The middleware redirects to SSL_HOST if given.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret.status_code, 301)
self.assertEqual(ret["Location"], "https://secure.example.com/some/url")
@override_settings(SECURE_SSL_REDIRECT=False)
def test_ssl_redirect_off(self):
"""
With SSL_REDIRECT False, the middleware does no redirect.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret, None)
| bsd-3-clause |
talkincode/ToughPORTAL | toughportal/common/pyforms/rules.py | 1 | 2384 | #!/usr/bin/env python
#coding=utf-8
from toughportal.console.libs import pyforms
not_null = pyforms.notnull
is_not_empty = pyforms.regexp('.+', u"不允许为空")
is_date = pyforms.regexp('(\d{4})-(\d{2}-(\d\d))', u"日期格式:yyyy-MM-dd")
is_email = pyforms.regexp('[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$', u"email格式,比如name@domain.com")
is_chars = pyforms.regexp("^[A-Za-z]+$", u"必须是英文字符串")
is_alphanum = lambda x: pyforms.regexp("^[A-Za-z0-9]{%s}$" % x, u"必须是长度为%s的数字字母组合" % x)
is_alphanum2 = lambda x, y: pyforms.regexp("^[A-Za-z0-9]{%s,%s}$" % (x, y), u"必须是长度为%s到%s的数字字母组合" % (x, y))
is_number = pyforms.regexp("^[0-9]*$", u"必须是数字")
is_number2 = pyforms.regexp("^[1-9]\d*$",u'必须是大于0的正整数')
is_number3 = pyforms.regexp('^(([1-9]\d*)|0)(\.\d{1,3})?$', u"支持包含(最大3位)小数点 xx.xxxxx")
is_numberOboveZore = pyforms.regexp("^\\d+$",u"必须为大于等于0的整数")
is_cn = pyforms.regexp("^[\u4e00-\u9fa5],{0,}$", u"必须是汉字")
is_url = pyforms.regexp('[a-zA-z]+://[^\s]*', u"url格式 xxxx://xxx")
is_phone = pyforms.regexp('^(\(\d{3,4}\)|\d{3,4}-)?\d{7,8}$', u"固定电话号码格式:0000-00000000")
is_idcard = pyforms.regexp('^\d{15}$|^\d{18}$|^\d{17}[Xx]$', u"身份证号码格式")
is_ip = pyforms.regexp("(^$)|\d+\.\d+\.\d+\.\d+", u"ip格式:xxx.xxx.xxx.xxx")
is_rmb = pyforms.regexp('^(([1-9]\d*)|0)(\.\d{1,2})?$', u"人民币金额 xx.xx")
len_of = lambda x, y: pyforms.regexp("[\s\S]{%s,%s}$" % (x, y), u"长度必须为%s到%s" % (x, y))
is_alphanum3 = lambda x, y: pyforms.regexp("^[A-Za-z0-9\_\-]{%s,%s}$" % (x, y), u"必须是长度为%s到%s的数字字母与下划线组合" % (x, y))
is_period = pyforms.regexp("(^$)|^([01][0-9]|2[0-3]):[0-5][0-9]-([01][0-9]|2[0-3]):[0-5][0-9]$",u"时间段,hh:mm-hh:mm,支持跨天,如 19:00-09:20")
is_telephone = pyforms.regexp("^1[0-9]{10}$", u"必须是手机号码")
is_time = pyforms.regexp('(\d{4})-(\d{2}-(\d\d))\s([01][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]', u"时间格式:yyyy-MM-dd hh:mm:ss")
is_time_hm = pyforms.regexp('^([01][0-9]|2[0-3]):[0-5][0-9]$', u"时间格式: hh:mm")
input_style = {"class": "form-control"}
button_style = {"class": "btn btn-primary"}
button_style_block = {"class": "btn btn-block"}
if __name__ == "__main__":
print is_period.valid("") == True | agpl-3.0 |
fhaoquan/kbengine | kbe/src/lib/python/Lib/test/test_dict.py | 77 | 29052 | import unittest
from test import support
import collections, random, string
import collections.abc
import gc, weakref
import pickle
class DictTest(unittest.TestCase):
def test_invalid_keyword_arguments(self):
class Custom(dict):
pass
for invalid in {1 : 2}, Custom({1 : 2}):
with self.assertRaises(TypeError):
dict(**invalid)
with self.assertRaises(TypeError):
{}.update(**invalid)
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(dict(), {})
self.assertIsNot(dict(), {})
def test_literal_constructor(self):
# check literal constructor for different sized dicts
# (to exercise the BUILD_MAP oparg).
for n in (0, 1, 6, 256, 400):
items = [(''.join(random.sample(string.ascii_letters, 8)), i)
for i in range(n)]
random.shuffle(items)
formatted_items = ('{!r}: {:d}'.format(k, v) for k, v in items)
dictliteral = '{' + ', '.join(formatted_items) + '}'
self.assertEqual(eval(dictliteral), dict(items))
def test_bool(self):
self.assertIs(not {}, True)
self.assertTrue({1: 2})
self.assertIs(bool({}), False)
self.assertIs(bool({1: 2}), True)
def test_keys(self):
d = {}
self.assertEqual(set(d.keys()), set())
d = {'a': 1, 'b': 2}
k = d.keys()
self.assertEqual(set(k), {'a', 'b'})
self.assertIn('a', k)
self.assertIn('b', k)
self.assertIn('a', d)
self.assertIn('b', d)
self.assertRaises(TypeError, d.keys, None)
self.assertEqual(repr(dict(a=1).keys()), "dict_keys(['a'])")
def test_values(self):
d = {}
self.assertEqual(set(d.values()), set())
d = {1:2}
self.assertEqual(set(d.values()), {2})
self.assertRaises(TypeError, d.values, None)
self.assertEqual(repr(dict(a=1).values()), "dict_values([1])")
def test_items(self):
d = {}
self.assertEqual(set(d.items()), set())
d = {1:2}
self.assertEqual(set(d.items()), {(1, 2)})
self.assertRaises(TypeError, d.items, None)
self.assertEqual(repr(dict(a=1).items()), "dict_items([('a', 1)])")
def test_contains(self):
d = {}
self.assertNotIn('a', d)
self.assertFalse('a' in d)
self.assertTrue('a' not in d)
d = {'a': 1, 'b': 2}
self.assertIn('a', d)
self.assertIn('b', d)
self.assertNotIn('c', d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = {}
self.assertEqual(len(d), 0)
d = {'a': 1, 'b': 2}
self.assertEqual(len(d), 2)
def test_getitem(self):
d = {'a': 1, 'b': 2}
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = {}
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = {1:1, 2:2, 3:3}
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = {}
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def __next__(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = {}
self.assertIsNot(d.fromkeys('abc'), d)
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assertIsInstance(dictlike.fromkeys('a'), dictlike)
self.assertIsInstance(dictlike().fromkeys('a'), dictlike)
class mydict(dict):
def __new__(cls):
return collections.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, collections.UserDict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
class baddict3(dict):
def __new__(cls):
return d
d = {i : i for i in range(10)}
res = d.copy()
res.update(a=None, b=None, c=None)
self.assertEqual(baddict3.fromkeys({"a", "b", "c"}), res)
def test_copy(self):
d = {1:1, 2:2, 3:3}
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
d = {}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
d = {'a': 1, 'b': 2}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = {}
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key0', [])
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_setdefault_atomic(self):
# Issue #13521: setdefault() calls __hash__ and __eq__ only once.
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
y = {hashed1: 5}
hashed2 = Hashed()
y.setdefault(hashed2, [])
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_setitem_atomic_at_resize(self):
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
# 5 items
y = {hashed1: 5, 0: 0, 1: 1, 2: 2, 3: 3}
hashed2 = Hashed()
# 6th item forces a resize
y[hashed2] = []
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assertFalse(copymode < 0 and ta != tb)
self.assertFalse(a)
self.assertFalse(b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = {}
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutating_iteration(self):
# changing dict size during iteration
d = {}
d[1] = 1
with self.assertRaises(RuntimeError):
for i in d:
d[i+1] = 1
def test_mutating_lookup(self):
# changing dict during a lookup (issue #14417)
class NastyKey:
mutate_dict = None
def __init__(self, value):
self.value = value
def __hash__(self):
# hash collision!
return 1
def __eq__(self, other):
if NastyKey.mutate_dict:
mydict, key = NastyKey.mutate_dict
NastyKey.mutate_dict = None
del mydict[key]
return self.value == other.value
key1 = NastyKey(1)
key2 = NastyKey(2)
d = {key1: 1}
NastyKey.mutate_dict = (d, key1)
d[key2] = 2
self.assertEqual(d, {key2: 2})
def test_repr(self):
d = {}
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = {}
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
def test_eq(self):
self.assertEqual({}, {})
self.assertEqual({1: 2}, {1: 2})
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 1
d1 = {BadCmp(): 1}
d2 = {1: 1}
with self.assertRaises(Exc):
d1 == d2
def test_keys_contained(self):
self.helper_keys_contained(lambda x: x.keys())
self.helper_keys_contained(lambda x: x.items())
def helper_keys_contained(self, fn):
# Test rich comparisons against dict key views, which should behave the
# same as sets.
empty = fn(dict())
empty2 = fn(dict())
smaller = fn({1:1, 2:2})
larger = fn({1:1, 2:2, 3:3})
larger2 = fn({1:1, 2:2, 3:3})
larger3 = fn({4:1, 2:2, 3:3})
self.assertTrue(smaller < larger)
self.assertTrue(smaller <= larger)
self.assertTrue(larger > smaller)
self.assertTrue(larger >= smaller)
self.assertFalse(smaller >= larger)
self.assertFalse(smaller > larger)
self.assertFalse(larger <= smaller)
self.assertFalse(larger < smaller)
self.assertFalse(smaller < larger3)
self.assertFalse(smaller <= larger3)
self.assertFalse(larger3 > smaller)
self.assertFalse(larger3 >= smaller)
# Inequality strictness
self.assertTrue(larger2 >= larger)
self.assertTrue(larger2 <= larger)
self.assertFalse(larger2 > larger)
self.assertFalse(larger2 < larger)
self.assertTrue(larger == larger2)
self.assertTrue(smaller != larger)
# There is an optimization on the zero-element case.
self.assertTrue(empty == empty2)
self.assertFalse(empty != empty2)
self.assertFalse(empty == smaller)
self.assertTrue(empty != smaller)
# With the same size, an elementwise compare happens
self.assertTrue(larger != larger3)
self.assertFalse(larger == larger3)
def test_errors_in_view_containment_check(self):
class C:
def __eq__(self, other):
raise RuntimeError
d1 = {1: C()}
d2 = {1: C()}
with self.assertRaises(RuntimeError):
d1.items() == d2.items()
with self.assertRaises(RuntimeError):
d1.items() != d2.items()
with self.assertRaises(RuntimeError):
d1.items() <= d2.items()
with self.assertRaises(RuntimeError):
d1.items() >= d2.items()
d3 = {1: C(), 2: C()}
with self.assertRaises(RuntimeError):
d2.items() < d3.items()
with self.assertRaises(RuntimeError):
d3.items() > d2.items()
def test_dictview_set_operations_on_keys(self):
k1 = {1:1, 2:2}.keys()
k2 = {1:1, 2:2, 3:3}.keys()
k3 = {4:4}.keys()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {1,2})
self.assertEqual(k2 - k1, {3})
self.assertEqual(k3 - k1, {4})
self.assertEqual(k1 & k2, {1,2})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {1,2,3})
self.assertEqual(k1 ^ k2, {3})
self.assertEqual(k1 ^ k3, {1,2,4})
def test_dictview_set_operations_on_items(self):
k1 = {1:1, 2:2}.items()
k2 = {1:1, 2:2, 3:3}.items()
k3 = {4:4}.items()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {(1,1), (2,2)})
self.assertEqual(k2 - k1, {(3,3)})
self.assertEqual(k3 - k1, {(4,4)})
self.assertEqual(k1 & k2, {(1,1), (2,2)})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {(1,1), (2,2), (3,3)})
self.assertEqual(k1 ^ k2, {(3,3)})
self.assertEqual(k1 ^ k3, {(1,1), (2,2), (4,4)})
def test_dictview_mixed_set_operations(self):
# Just a few for .keys()
self.assertTrue({1:1}.keys() == {1})
self.assertTrue({1} == {1:1}.keys())
self.assertEqual({1:1}.keys() | {2}, {1, 2})
self.assertEqual({2} | {1:1}.keys(), {1, 2})
# And a few for .items()
self.assertTrue({1:1}.items() == {(1,1)})
self.assertTrue({(1,1)} == {1:1}.items())
self.assertEqual({1:1}.items() | {2}, {(1,1), 2})
self.assertEqual({2} | {1:1}.items(), {(1,1), 2})
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(dict, "__missing__"))
self.assertFalse(hasattr({}, "__missing__"))
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
with self.assertRaises(RuntimeError) as c:
e[42]
self.assertEqual(c.exception.args, (42,))
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
with self.assertRaises(KeyError) as c:
f[42]
self.assertEqual(c.exception.args, (42,))
class G(dict):
pass
g = G()
with self.assertRaises(KeyError) as c:
g[42]
self.assertEqual(c.exception.args, (42,))
def test_tuple_keyerror(self):
# SF #1576657
d = {}
with self.assertRaises(KeyError) as c:
d[(1,)]
self.assertEqual(c.exception.args, ((1,),))
def test_bad_key(self):
# Dictionary lookups should fail if __eq__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in ['d[x2] = 2',
'z = d[x2]',
'x2 in d',
'd.get(x2)',
'd.setdefault(x2, 42)',
'd.pop(x2)',
'd.update({x2: 2})']:
with self.assertRaises(CustomException):
exec(stmt, locals())
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
with self.assertRaises(ZeroDivisionError):
d = {'a': 1 // 0, 'b': None, 'c': None, 'd': None, 'e': None,
'f': None, 'g': None, 'h': None}
d = {}
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for dictiter and
# dictview objects.
class C(object):
pass
views = (dict.items, dict.values, dict.keys)
for v in views:
obj = C()
ref = weakref.ref(obj)
container = {obj: 1}
obj.v = v(container)
obj.x = iter(obj.v)
del obj, container
gc.collect()
self.assertIs(ref(), None, "Cycle was not collected")
def _not_tracked(self, t):
# Nested containers can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@support.cpython_only
def test_track_literals(self):
# Test GC-optimization of dict literals
x, y, z, w = 1.5, "a", (1, None), []
self._not_tracked({})
self._not_tracked({x:(), y:x, z:1})
self._not_tracked({1: "a", "b": 2})
self._not_tracked({1: 2, (None, True, False, ()): int})
self._not_tracked({1: object()})
# Dicts with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked({1: []})
self._tracked({1: ([],)})
self._tracked({1: {}})
self._tracked({1: set()})
@support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically-created dicts
class MyObject(object):
pass
x, y, z, w, o = 1.5, "a", (1, object()), [], MyObject()
d = dict()
self._not_tracked(d)
d[1] = "a"
self._not_tracked(d)
d[y] = 2
self._not_tracked(d)
d[z] = 3
self._not_tracked(d)
self._not_tracked(d.copy())
d[4] = w
self._tracked(d)
self._tracked(d.copy())
d[4] = None
self._not_tracked(d)
self._not_tracked(d.copy())
# dd isn't tracked right now, but it may mutate and therefore d
# which contains it must be tracked.
d = dict()
dd = dict()
d[1] = dd
self._not_tracked(dd)
self._tracked(d)
dd[1] = d
self._tracked(dd)
d = dict.fromkeys([x, y, z])
self._not_tracked(d)
dd = dict()
dd.update(d)
self._not_tracked(dd)
d = dict.fromkeys([x, y, z, o])
self._tracked(d)
dd = dict()
dd.update(d)
self._tracked(dd)
d = dict(x=x, y=y, z=z)
self._not_tracked(d)
d = dict(x=x, y=y, z=z, w=w)
self._tracked(d)
d = dict()
d.update(x=x, y=y, z=z)
self._not_tracked(d)
d.update(w=w)
self._tracked(d)
d = dict([(x, y), (z, 1)])
self._not_tracked(d)
d = dict([(x, y), (z, w)])
self._tracked(d)
d = dict()
d.update([(x, y), (z, 1)])
self._not_tracked(d)
d.update([(x, y), (z, w)])
self._tracked(d)
@support.cpython_only
def test_track_subtypes(self):
# Dict subtypes are always tracked
class MyDict(dict):
pass
self._tracked(MyDict())
def test_iterator_pickling(self):
data = {1:"a", 2:"b", 3:"c"}
it = iter(data)
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(sorted(it), sorted(data))
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
return
d = pickle.dumps(it)
it = pickle.loads(d)
del data[drop]
self.assertEqual(sorted(it), sorted(data))
def test_itemiterator_pickling(self):
data = {1:"a", 2:"b", 3:"c"}
# dictviews aren't picklable, only their iterators
itorg = iter(data.items())
d = pickle.dumps(itorg)
it = pickle.loads(d)
# note that the type of type of the unpickled iterator
# is not necessarily the same as the original. It is
# merely an object supporting the iterator protocol, yielding
# the same objects as the original one.
# self.assertEqual(type(itorg), type(it))
self.assertTrue(isinstance(it, collections.abc.Iterator))
self.assertEqual(dict(it), data)
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it)
it = pickle.loads(d)
del data[drop[0]]
self.assertEqual(dict(it), data)
def test_valuesiterator_pickling(self):
data = {1:"a", 2:"b", 3:"c"}
# data.values() isn't picklable, only its iterator
it = iter(data.values())
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(sorted(list(it)), sorted(list(data.values())))
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it)
it = pickle.loads(d)
values = list(it) + [drop]
self.assertEqual(sorted(values), sorted(list(data.values())))
def test_instance_dict_getattr_str_subclass(self):
class Foo:
def __init__(self, msg):
self.msg = msg
f = Foo('123')
class _str(str):
pass
self.assertEqual(f.msg, getattr(f, _str('msg')))
self.assertEqual(f.msg, f.__dict__[_str('msg')])
def test_object_set_item_single_instance_non_str_key(self):
class Foo: pass
f = Foo()
f.__dict__[1] = 1
f.a = 'a'
self.assertEqual(f.__dict__, {1:1, 'a':'a'})
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = dict
class Dict(dict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = Dict
def test_main():
support.run_unittest(
DictTest,
GeneralMappingTests,
SubclassMappingTests,
)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
crosswalk-project/chromium-crosswalk-efl | tools/perf/benchmarks/spaceport.py | 33 | 4300 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs spaceport.io's PerfMarks benchmark."""
import logging
import os
from telemetry import benchmark
from telemetry.core import util
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
DESCRIPTIONS = {
'canvasDrawImageFullClear':
'Using a canvas element to render. Bitmaps are blitted to the canvas '
'using the "drawImage" function and the canvas is fully cleared at '
'the beginning of each frame.',
'canvasDrawImageFullClearAlign':
'Same as canvasDrawImageFullClear except all "x" and "y" values are '
'rounded to the nearest integer. This can be more efficient on '
'translate on certain browsers.',
'canvasDrawImagePartialClear':
'Using a canvas element to render. Bitmaps are blitted to the canvas '
'using the "drawImage" function and pixels drawn in the last frame '
'are cleared to the clear color at the beginning of each frame. '
'This is generally slower on hardware accelerated implementations, '
'but sometimes faster on CPU-based implementations.',
'canvasDrawImagePartialClearAlign':
'Same as canvasDrawImageFullClearAlign but only partially clearing '
'the canvas each frame.',
'css2dBackground':
'Using div elements that have a background image specified using CSS '
'styles. These div elements are translated, scaled, and rotated using '
'CSS-2D transforms.',
'css2dImg':
'Same as css2dBackground, but using img elements instead of div '
'elements.',
'css3dBackground':
'Same as css2dBackground, but using CSS-3D transforms.',
'css3dImg':
'Same as css2dImage but using CSS-3D tranforms.',
}
class _SpaceportMeasurement(page_test.PageTest):
def __init__(self):
super(_SpaceportMeasurement, self).__init__()
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--disable-gpu-vsync')
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'!document.getElementById("start-performance-tests").disabled', 60)
tab.ExecuteJavaScript("""
window.__results = {};
window.console.log = function(str) {
if (!str) return;
var key_val = str.split(': ');
if (!key_val.length == 2) return;
__results[key_val[0]] = key_val[1];
};
document.getElementById('start-performance-tests').click();
""")
num_results = 0
num_tests_in_spaceport = 24
while num_results < num_tests_in_spaceport:
tab.WaitForJavaScriptExpression(
'Object.keys(window.__results).length > %d' % num_results, 180)
num_results = tab.EvaluateJavaScript(
'Object.keys(window.__results).length')
logging.info('Completed test %d of %d' %
(num_results, num_tests_in_spaceport))
result_dict = eval(tab.EvaluateJavaScript(
'JSON.stringify(window.__results)'))
for key in result_dict:
chart, trace = key.split('.', 1)
results.AddValue(scalar.ScalarValue(
results.current_page, '%s.%s'% (chart, trace),
'objects (bigger is better)', float(result_dict[key]),
important=False, description=DESCRIPTIONS.get(chart)))
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, 'Score', 'objects (bigger is better)',
[float(x) for x in result_dict.values()],
description='Combined score for all parts of the spaceport benchmark.'))
# crbug.com/166703: This test frequently times out on Windows.
@benchmark.Disabled('mac', 'win')
class Spaceport(benchmark.Benchmark):
"""spaceport.io's PerfMarks benchmark."""
test = _SpaceportMeasurement
def CreatePageSet(self, options):
spaceport_dir = os.path.join(util.GetChromiumSrcDir(), 'chrome', 'test',
'data', 'third_party', 'spaceport')
ps = page_set.PageSet(file_path=spaceport_dir)
ps.AddPageWithDefaultRunNavigate('file://index.html')
return ps
| bsd-3-clause |
google/embedding-tests | models/bert/extract_features.py | 1 | 13765 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import codecs
import collections
import json
import re
import modeling
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None, "")
flags.DEFINE_string("output_file", None, "")
flags.DEFINE_string("layers", "-1,-2,-3,-4", "")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer("batch_size", 32, "Batch size for predictions.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string("master", None,
"If using a TPU, the address of the master.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"use_one_hot_embeddings", False,
"If True, tf.one_hot will be used for embedding lookups, otherwise "
"tf.nn.embedding_lookup will be used. On TPUs, this should be True "
"since it is much faster.")
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
def input_fn_builder(features, seq_length):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_input_type_ids = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_input_type_ids.append(feature.input_type_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_type_ids":
tf.constant(
all_input_type_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
input_type_ids = features["input_type_ids"]
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
scaffold_fn = None
(assignment_map,
initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {
"unique_id": unique_ids,
}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (example.unique_id))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with tf.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
layer_indexes = [int(x) for x in FLAGS.layers.split(",")]
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
master=FLAGS.master,
tpu_config=tf.contrib.tpu.TPUConfig(
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
examples = read_examples(FLAGS.input_file)
features = convert_examples_to_features(
examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
layer_indexes=layer_indexes,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
predict_batch_size=FLAGS.batch_size)
input_fn = input_fn_builder(
features=features, seq_length=FLAGS.max_seq_length)
with codecs.getwriter("utf-8")(tf.gfile.Open(FLAGS.output_file,
"w")) as writer:
for result in estimator.predict(input_fn, yield_single_examples=True):
unique_id = int(result["unique_id"])
feature = unique_id_to_feature[unique_id]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
all_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = result["layer_output_%d" % j]
layers = collections.OrderedDict()
layers["index"] = layer_index
layers["values"] = [
round(float(x), 6) for x in layer_output[i:(i + 1)].flat
]
all_layers.append(layers)
features = collections.OrderedDict()
features["token"] = token
features["layers"] = all_layers
all_features.append(features)
output_json["features"] = all_features
writer.write(json.dumps(output_json) + "\n")
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("init_checkpoint")
flags.mark_flag_as_required("output_file")
tf.app.run()
| apache-2.0 |
KohlsTechnology/ansible | lib/ansible/utils/module_docs_fragments/ios.py | 58 | 5333 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
authorize:
description:
- B(Deprecated)
- "Starting with Ansible 2.5 we recommend using C(connection: network_cli) and C(become: yes)."
- For more information please see the L(IOS Platform Options guide, ../network/user_guide/platform_ios.html).
- HORIZONTALLINE
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
type: bool
default: 'no'
auth_pass:
description:
- B(Deprecated)
- "Starting with Ansible 2.5 we recommend using C(connection: network_cli) and C(become: yes) with C(become_pass)."
- For more information please see the L(IOS Platform Options guide, ../network/user_guide/platform_ios.html).
- HORIZONTALLINE
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
provider:
description:
- B(Deprecated)
- "Starting with Ansible 2.5 we recommend using C(connection: network_cli)."
- For more information please see the L(IOS Platform Options guide, ../network/user_guide/platform_ios.html).
- HORIZONTALLINE
- A dict object containing connection details.
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
type: bool
default: 'no'
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
notes:
- For more information on using Ansible to manage network devices see the :ref:`Ansible Network Guide <network_guide>`
- For more information on using Ansible to manage Cisco devices see the `Cisco integration page <https://www.ansible.com/integrations/networks/cisco>`_.
"""
| gpl-3.0 |
brianjgeiger/osf.io | api_tests/meetings/views/test_meetings_submissions_detail.py | 9 | 4700 | import pytest
from api_tests import utils as api_utils
from framework.auth.core import Auth
from osf.models import PageCounter
from osf_tests.factories import ConferenceFactory, ProjectFactory, AuthUserFactory
@pytest.mark.django_db
class TestMeetingSubmissionsDetail:
@pytest.fixture()
def meeting(self):
return ConferenceFactory(name='OSF 2019', endpoint='osf2019')
@pytest.fixture()
def base_url(self, meeting):
return '/_/meetings/{}/submissions/'.format(meeting.endpoint)
@pytest.fixture()
def user(self):
return AuthUserFactory(fullname='Grapes McGee')
@pytest.fixture()
def meeting_one_submission(self, meeting, user):
submission = ProjectFactory(title='Submission One', is_public=True, creator=user)
meeting.submissions.add(submission)
submission.add_tag('poster', Auth(user))
return submission
@pytest.fixture()
def meeting_submission_no_category(self, meeting, user):
submission = ProjectFactory(title='Submission One', is_public=True, creator=user)
meeting.submissions.add(submission)
api_utils.create_test_file(submission, user, create_guid=False)
return submission
@pytest.fixture()
def meeting_one_private_submission(self, meeting, user):
submission = ProjectFactory(title='Submission One', is_public=False, creator=user)
meeting.submissions.add(submission)
submission.add_tag('poster', Auth(user))
return submission
@pytest.fixture()
def random_project(self, meeting, user):
project = ProjectFactory(title='Submission One', is_public=True, creator=user)
project.add_tag('poster', Auth(user))
return project
@pytest.fixture()
def file(self, user, meeting_one_submission):
file = api_utils.create_test_file(meeting_one_submission, user, create_guid=False)
self.mock_download(meeting_one_submission, file, 10)
return file
def mock_download(self, project, file, download_count):
pc, _ = PageCounter.objects.get_or_create(
_id='download:{}:{}'.format(project._id, file._id),
resource=project.guids.first(),
action='download',
file=file
)
pc.total = download_count
pc.save()
return pc
def test_meeting_submission_detail(self, app, user, meeting, base_url, meeting_one_submission,
meeting_one_private_submission, random_project, meeting_submission_no_category, file):
# test_get_poster_submission
url = '{}{}/'.format(base_url, meeting_one_submission._id)
res = app.get(url)
assert res.status_code == 200
data = res.json['data']
assert data['id'] == meeting_one_submission._id
assert data['type'] == 'meeting-submissions'
assert data['attributes']['title'] == meeting_one_submission.title
assert data['attributes']['author_name'] == user.family_name
assert data['attributes']['download_count'] == 10
assert 'date_created' in data['attributes']
assert data['attributes']['meeting_category'] == 'poster'
assert '/_/meetings/{}/submissions/{}'.format(meeting.endpoint, meeting_one_submission._id) in data['links']['self']
assert data['relationships']['author']['data']['id'] == user._id
assert file._id in data['links']['download']
# test_get_private_submission
url = '{}{}/'.format(base_url, meeting_one_private_submission._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 401
# Restricting even logged in contributor from viewing private submission
url = '{}{}/'.format(base_url, meeting_one_private_submission._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
# test_get_random_project_not_affiliated_with_meeting
url = '{}{}/'.format(base_url, random_project._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'This is not a submission to OSF 2019.'
# test_get_invalid_submission
url = '{}{}/'.format(base_url, 'jjjjj')
res = app.get(url, expect_errors=True)
assert res.status_code == 404
# test_get_meeting_submission_with_no_category
url = '{}{}/'.format(base_url, meeting_submission_no_category._id)
res = app.get(url)
assert res.status_code == 200
# Second submission type given by default if none exists (following legacy logic)
assert res.json['data']['attributes']['meeting_category'] == 'talk'
| apache-2.0 |
perlygatekeeper/glowing-robot | Little_Alchemy_2/Scraper_python/env/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py | 9 | 16468 | """
SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 16.0.0)
* cryptography (minimum 1.3.4, from pyopenssl)
* idna (minimum 2.0, from cryptography)
However, pyopenssl depends on cryptography, which depends on idna, so while we
use all three directly here we end up having relatively few packages required.
You can install them with the following command:
pip install pyopenssl cryptography idna
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
"""
from __future__ import absolute_import
import OpenSSL.SSL
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend as openssl_backend
from cryptography.hazmat.backends.openssl.x509 import _Certificate
try:
from cryptography.x509 import UnsupportedExtension
except ImportError:
# UnsupportedExtension is gone in cryptography >= 2.1.0
class UnsupportedExtension(Exception):
pass
from socket import timeout, error as SocketError
from io import BytesIO
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
import logging
import ssl
from ..packages import six
import sys
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI always works.
HAS_SNI = True
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
if hasattr(ssl, 'PROTOCOL_SSLv3') and hasattr(OpenSSL.SSL, 'SSLv3_METHOD'):
_openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD
if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
_stdlib_to_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED:
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
_openssl_to_stdlib_verify = dict(
(v, k) for k, v in _stdlib_to_openssl_verify.items()
)
# OpenSSL will only write 16K at a time
SSL_WRITE_BLOCKSIZE = 16384
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
log = logging.getLogger(__name__)
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
_validate_dependencies_met()
util.SSLContext = PyOpenSSLContext
util.ssl_.SSLContext = PyOpenSSLContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_PYOPENSSL = True
util.ssl_.IS_PYOPENSSL = True
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
util.SSLContext = orig_util_SSLContext
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_PYOPENSSL = False
util.ssl_.IS_PYOPENSSL = False
def _validate_dependencies_met():
"""
Verifies that PyOpenSSL's package-level dependencies have been met.
Throws `ImportError` if they are not met.
"""
# Method added in `cryptography==1.1`; not available in older versions
from cryptography.x509.extensions import Extensions
if getattr(Extensions, "get_extension_for_class", None) is None:
raise ImportError("'cryptography' module missing required functionality. "
"Try upgrading to v1.3.4 or newer.")
# pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
# attribute is only present on those versions.
from OpenSSL.crypto import X509
x509 = X509()
if getattr(x509, "_x509", None) is None:
raise ImportError("'pyOpenSSL' module missing required functionality. "
"Try upgrading to v0.14 or newer.")
def _dnsname_to_stdlib(name):
"""
Converts a dNSName SubjectAlternativeName field to the form used by the
standard library on the given Python version.
Cryptography produces a dNSName as a unicode string that was idna-decoded
from ASCII bytes. We need to idna-encode that string to get it back, and
then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
If the name cannot be idna-encoded then we return None signalling that
the name given should be skipped.
"""
def idna_encode(name):
"""
Borrowed wholesale from the Python Cryptography Project. It turns out
that we can't just safely call `idna.encode`: it can explode for
wildcard names. This avoids that problem.
"""
from pip._vendor import idna
try:
for prefix in [u'*.', u'.']:
if name.startswith(prefix):
name = name[len(prefix):]
return prefix.encode('ascii') + idna.encode(name)
return idna.encode(name)
except idna.core.IDNAError:
return None
# Don't send IPv6 addresses through the IDNA encoder.
if ':' in name:
return name
name = idna_encode(name)
if name is None:
return None
elif sys.version_info >= (3, 0):
name = name.decode('utf-8')
return name
def get_subj_alt_name(peer_cert):
"""
Given an PyOpenSSL certificate, provides all the subject alternative names.
"""
# Pass the cert to cryptography, which has much better APIs for this.
if hasattr(peer_cert, "to_cryptography"):
cert = peer_cert.to_cryptography()
else:
# This is technically using private APIs, but should work across all
# relevant versions before PyOpenSSL got a proper API for this.
cert = _Certificate(openssl_backend, peer_cert._x509)
# We want to find the SAN extension. Ask Cryptography to locate it (it's
# faster than looping in Python)
try:
ext = cert.extensions.get_extension_for_class(
x509.SubjectAlternativeName
).value
except x509.ExtensionNotFound:
# No such extension, return the empty list.
return []
except (x509.DuplicateExtension, UnsupportedExtension,
x509.UnsupportedGeneralNameType, UnicodeError) as e:
# A problem has been found with the quality of the certificate. Assume
# no SAN field is present.
log.warning(
"A problem was encountered with the certificate that prevented "
"urllib3 from finding the SubjectAlternativeName field. This can "
"affect certificate validation. The error was %s",
e,
)
return []
# We want to return dNSName and iPAddress fields. We need to cast the IPs
# back to strings because the match_hostname function wants them as
# strings.
# Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
# decoded. This is pretty frustrating, but that's what the standard library
# does with certificates, and so we need to attempt to do the same.
# We also want to skip over names which cannot be idna encoded.
names = [
('DNS', name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
if name is not None
]
names.extend(
('IP Address', str(name))
for name in ext.get_values_for_type(x509.IPAddress)
)
return names
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
self._closed = False
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
# TLS 1.3 post-handshake authentication
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("read error: %r" % e)
else:
return data
def recv_into(self, *args, **kwargs):
try:
return self.connection.recv_into(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return 0
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return 0
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout('The read operation timed out')
else:
return self.recv_into(*args, **kwargs)
# TLS 1.3 post-handshake authentication
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("read error: %r" % e)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
if not util.wait_for_write(self.socket, self.socket.gettimeout()):
raise timeout()
continue
except OpenSSL.SSL.SysCallError as e:
raise SocketError(str(e))
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
# FIXME rethrow compatible exceptions should we ever use this
self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
try:
self._closed = True
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': get_subj_alt_name(x509)
}
def version(self):
return self.connection.get_protocol_version_name()
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
makefile = backport_makefile
WrappedSocket.makefile = makefile
class PyOpenSSLContext(object):
"""
I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
for translating the interface of the standard library ``SSLContext`` object
to calls into PyOpenSSL.
"""
def __init__(self, protocol):
self.protocol = _openssl_versions[protocol]
self._ctx = OpenSSL.SSL.Context(self.protocol)
self._options = 0
self.check_hostname = False
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._ctx.set_options(value)
@property
def verify_mode(self):
return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
@verify_mode.setter
def verify_mode(self, value):
self._ctx.set_verify(
_stdlib_to_openssl_verify[value],
_verify_callback
)
def set_default_verify_paths(self):
self._ctx.set_default_verify_paths()
def set_ciphers(self, ciphers):
if isinstance(ciphers, six.text_type):
ciphers = ciphers.encode('utf-8')
self._ctx.set_cipher_list(ciphers)
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
if cafile is not None:
cafile = cafile.encode('utf-8')
if capath is not None:
capath = capath.encode('utf-8')
self._ctx.load_verify_locations(cafile, capath)
if cadata is not None:
self._ctx.load_verify_locations(BytesIO(cadata))
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._ctx.use_certificate_chain_file(certfile)
if password is not None:
if not isinstance(password, six.binary_type):
password = password.encode('utf-8')
self._ctx.set_passwd_cb(lambda *_: password)
self._ctx.use_privatekey_file(keyfile or certfile)
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True, suppress_ragged_eofs=True,
server_hostname=None):
cnx = OpenSSL.SSL.Connection(self._ctx, sock)
if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
server_hostname = server_hostname.encode('utf-8')
if server_hostname is not None:
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(sock, sock.gettimeout()):
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake: %r' % e)
break
return WrappedSocket(cnx, sock)
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
| artistic-2.0 |
reddraggone9/youtube-dl | youtube_dl/extractor/tlc.py | 11 | 2612 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
from .discovery import DiscoveryIE
from ..compat import compat_urlparse
class TlcIE(DiscoveryIE):
IE_NAME = 'tlc.com'
_VALID_URL = r'http://www\.tlc\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9\-]*)(.htm)?'
# DiscoveryIE has _TESTS
_TESTS = [{
'url': 'http://www.tlc.com/tv-shows/cake-boss/videos/too-big-to-fly.htm',
'info_dict': {
'id': '104493',
'ext': 'mp4',
'title': 'Too Big to Fly',
'description': 'Buddy has taken on a high flying task.',
'duration': 119,
'timestamp': 1393365060,
'upload_date': '20140225',
},
'params': {
'skip_download': True, # requires ffmpef
},
}]
class TlcDeIE(InfoExtractor):
IE_NAME = 'tlc.de'
_VALID_URL = r'http://www\.tlc\.de/sendungen/[^/]+/videos/(?P<title>[^/?]+)'
_TEST = {
'url': 'http://www.tlc.de/sendungen/breaking-amish/videos/#3235167922001',
'info_dict': {
'id': '3235167922001',
'ext': 'mp4',
'title': 'Breaking Amish: Die Welt da draußen',
'uploader': 'Discovery Networks - Germany',
'description': (
'Vier Amische und eine Mennonitin wagen in New York'
' den Sprung in ein komplett anderes Leben. Begleitet sie auf'
' ihrem spannenden Weg.'),
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group('title')
webpage = self._download_webpage(url, title)
iframe_url = self._search_regex(
'<iframe src="(http://www\.tlc\.de/wp-content/.+?)"', webpage,
'iframe url')
# Otherwise we don't get the correct 'BrightcoveExperience' element,
# example: http://www.tlc.de/sendungen/cake-boss/videos/cake-boss-cannoli-drama/
iframe_url = iframe_url.replace('.htm?', '.php?')
url_fragment = compat_urlparse.urlparse(url).fragment
if url_fragment:
# Since the fragment is not send to the server, we always get the same iframe
iframe_url = re.sub(r'playlist=(\d+)', 'playlist=%s' % url_fragment, iframe_url)
iframe = self._download_webpage(iframe_url, title)
return {
'_type': 'url',
'url': BrightcoveLegacyIE._extract_brightcove_url(iframe),
'ie': BrightcoveLegacyIE.ie_key(),
}
| unlicense |
pedrobaeza/OpenUpgrade | addons/sale_stock/report/sale_report.py | 231 | 2100 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class sale_report(osv.osv):
_inherit = "sale.report"
_columns = {
'shipped': fields.boolean('Shipped', readonly=True),
'shipped_qty_1': fields.integer('Shipped', readonly=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse',readonly=True),
'state': fields.selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('waiting_date', 'Waiting Schedule'),
('manual', 'Manual In Progress'),
('progress', 'In Progress'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
], 'Order Status', readonly=True),
}
def _select(self):
return super(sale_report, self)._select() + ", s.warehouse_id as warehouse_id, s.shipped, s.shipped::integer as shipped_qty_1"
def _group_by(self):
return super(sale_report, self)._group_by() + ", s.warehouse_id, s.shipped"
| agpl-3.0 |
google-research/google-research | felix/converter_for_felix_insert.py | 1 | 6197 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training data conversion for FelixInsert.
Source-target text pairs will be converted to (source) token-level edit
operations and token-level insertions.
"""
from typing import List, Optional, Sequence, Tuple
from felix import felix_constants as constants
from felix import phrase_vocabulary_optimization_utils as phrase_utils
def compute_edits_and_insertions(
source_tokens, target_tokens,
max_insertions_per_token, insert_after_token = True
):
"""Computes edit operations and insertions per source token.
Note that you should add a dummy token to the beginning / end of the source
and target token lists if you want to be able to insert before the first
actual token (when `insert_after_token==True`) / after the last actual token
(when `insert_after_token==False`).
Args:
source_tokens: List of source tokens.
target_tokens: List of target tokens.
max_insertions_per_token: Maximum number of tokens to insert per source
token.
insert_after_token: Whether to insert after the current token (the current
behavior on Felix) or before it (the current behavior in LaserTagger and
in the original experimental FelixInsert implementation).
Returns:
None if target can't be obtained with the given `max_insertions_per_token`.
Otherwise, a tuple with:
1. List of edit operations ("KEEP" or "DELETE"), one per source token.
2. List of inserted tokens, one per source token.
"""
kept_tokens = phrase_utils.compute_lcs(source_tokens, target_tokens)
# Added token lists between the kept source tokens.
added_phrases = _get_added_token_lists(kept_tokens, target_tokens)
# Regardless of input, every kept token (K_i) should be surrounded by an added
# phrase (A_j, which can also be an empty list) on both sides, e.g.:
# [A_0, K_0, A_1, K_1, A_2].
# Thus the number of added phrases has to be one larger than the number of
# kept tokens.
assert len(added_phrases) == len(kept_tokens) + 1, (
f'Incorrect number of added phrases: {len(added_phrases)} != '
f'{len(kept_tokens)} + 1')
if insert_after_token:
return _get_edits_and_insertions(kept_tokens, source_tokens, added_phrases,
max_insertions_per_token)
else:
# When inserting before the current token, we can simply run the same
# algorithm as above but for reversed lists and then reverse the output.
edits_and_insertions = _get_edits_and_insertions(
kept_tokens[::-1], source_tokens[::-1],
_reverse_list_of_lists(added_phrases), max_insertions_per_token)
if edits_and_insertions is not None:
edits, insertions = edits_and_insertions
return edits[::-1], _reverse_list_of_lists(insertions)
else:
return None
def _get_added_token_lists(kept_tokens,
target_tokens):
"""Return a list of added tokens lists next to every kept token."""
added_phrases = []
# Index of the `kept_tokens` element that we are currently looking for.
kept_idx = 0
phrase = []
for token in target_tokens:
if kept_idx < len(kept_tokens) and token == kept_tokens[kept_idx]:
kept_idx += 1
added_phrases.append(phrase)
phrase = []
else:
phrase.append(token)
added_phrases.append(phrase)
return added_phrases
def _get_edits_and_insertions(
kept_tokens, source_tokens,
added_phrases, max_insertions_per_token
):
"""Returns edit operations and insertions per source token."""
edit_operations = []
insertions = []
kept_idx = 0
current_added_phrase = list(added_phrases[kept_idx])
for token in source_tokens:
if kept_idx < len(kept_tokens) and token == kept_tokens[kept_idx]:
if current_added_phrase:
# Couldn't insert all required tokens before the current kept token.
return None
kept_idx += 1
current_added_phrase = list(added_phrases[kept_idx])
edit_operations.append(constants.KEEP)
# Insert as many tokens as possible after the current token and leave the
# remaining to be added after next deleted tokens.
insertions_i, current_added_phrase = (
current_added_phrase[:max_insertions_per_token],
current_added_phrase[max_insertions_per_token:])
insertions.append(insertions_i)
else:
edit_operations.append(constants.DELETE)
# If token i-1 is kept and token i deleted, the output will be the same
# regardless of whether we insert new tokens after i-1 or after i.
# However, semantically it makes more sense to insert after i since these
# insertions typically correspond to replacing (e.g. inflecting) the ith
# token. It also makes the tagging task easier since we need to predict
# only one non-KEEP tag, i.e. DELETE|INSERT instead of independently
# predicting KEEP|INSERT + DELETE.
if (len(edit_operations) >= 2 and
edit_operations[-2] == constants.KEEP and insertions[-1]):
# Move the last insertion to the current token.
insertions.append(insertions[-1])
insertions[-2] = []
else:
insertions_i, current_added_phrase = (
current_added_phrase[:max_insertions_per_token],
current_added_phrase[max_insertions_per_token:])
insertions.append(insertions_i)
if current_added_phrase:
# Tokens to be inserted remain but we've already consumed all source tokens.
return None
return edit_operations, insertions
def _reverse_list_of_lists(x):
"""Deep reverse of a list of lists."""
return [sublist[::-1] for sublist in x[::-1]]
| apache-2.0 |
shingonoide/odoo | addons/account/project/project.py | 273 | 2423 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_journal(osv.osv):
_name = 'account.analytic.journal'
_description = 'Analytic Journal'
_columns = {
'name': fields.char('Journal Name', required=True),
'code': fields.char('Journal Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the analytic journal without removing it."),
'type': fields.selection([('sale','Sale'), ('purchase','Purchase'), ('cash','Cash'), ('general','General'), ('situation','Situation')], 'Type', required=True, help="Gives the type of the analytic journal. When it needs for a document (eg: an invoice) to create analytic entries, Odoo will look for a matching journal of the same type."),
'line_ids': fields.one2many('account.analytic.line', 'journal_id', 'Lines', copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': True,
'type': 'general',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
class account_journal(osv.osv):
_inherit="account.journal"
_columns = {
'analytic_journal_id':fields.many2one('account.analytic.journal','Analytic Journal', help="Journal for analytic entries"),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sky-uk/bslint | tests/bslint_command_tests/skip_file_command_test.py | 1 | 1641 | import unittest
import os
import bslint
import bslint.messages.handler as msg_handler
import bslint.messages.error_constants as err_const
from tests.resources.common.test_methods import CommonMethods as Common
from filepaths import BSLINT_COMMAND_CONFIG_PATH
from filepaths import TEST_CONFIG_FILE_PATH
class TestSkipFileCommand(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.common = Common()
def test_skip_file_command_skip_start(self):
active_skip_file_config_path = os.path.join(BSLINT_COMMAND_CONFIG_PATH, 'active-skip-file-config.json')
bslint.load_config_file(user_filepath=active_skip_file_config_path)
self.common.lex_warnings_match([], "'BSLint_skip_file \nxgygu= 22\n y=4\n sdfsf=2 \n")
def test_skip_file_command_skip_half_way(self):
self.common.lex_warnings_match([], "one = 22\ntwo = 4\n'BSLint_skip_file \n sdfsf=2 \n")
def test_skip_file_command_skip_start_inactive(self):
inactive_skip_file_config_path = os.path.join(BSLINT_COMMAND_CONFIG_PATH, 'inactive-skip-file-config.json')
bslint.load_config_file(user_filepath=inactive_skip_file_config_path, default_filepath=TEST_CONFIG_FILE_PATH)
self.common.lex_warnings_match([msg_handler.get_error_msg(err_const.TYPO_IN_CODE, [2]),
msg_handler.get_error_msg(err_const.NO_SPACE_AROUND_OPERATORS, [1, 2])],
"'BSLint_skip_file\nxgygu =22\ny = 4")
def test_skip_file_command_skip_halfway_inactive(self):
self.common.lex_warnings_match([], "one = 22\ntwo = 4\n'BSLint_skip_file\ntwo= 2\n")
| bsd-3-clause |
wangpanjun/django-rest-framework | tests/test_serializer_bulk_update.py | 79 | 3944 | """
Tests to cover bulk create and update using serializers.
"""
from __future__ import unicode_literals
from django.test import TestCase
from django.utils import six
from rest_framework import serializers
class BulkCreateSerializerTests(TestCase):
"""
Creating multiple instances using serializers.
"""
def setUp(self):
class BookSerializer(serializers.Serializer):
id = serializers.IntegerField()
title = serializers.CharField(max_length=100)
author = serializers.CharField(max_length=100)
self.BookSerializer = BookSerializer
def test_bulk_create_success(self):
"""
Correct bulk update serialization should return the input data.
"""
data = [
{
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}, {
'id': 1,
'title': 'If this is a man',
'author': 'Primo Levi'
}, {
'id': 2,
'title': 'The wind-up bird chronicle',
'author': 'Haruki Murakami'
}
]
serializer = self.BookSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.validated_data, data)
def test_bulk_create_errors(self):
"""
Incorrect bulk create serialization should return errors.
"""
data = [
{
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}, {
'id': 1,
'title': 'If this is a man',
'author': 'Primo Levi'
}, {
'id': 'foo',
'title': 'The wind-up bird chronicle',
'author': 'Haruki Murakami'
}
]
expected_errors = [
{},
{},
{'id': ['A valid integer is required.']}
]
serializer = self.BookSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), False)
self.assertEqual(serializer.errors, expected_errors)
def test_invalid_list_datatype(self):
"""
Data containing list of incorrect data type should return errors.
"""
data = ['foo', 'bar', 'baz']
serializer = self.BookSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), False)
text_type_string = six.text_type.__name__
message = 'Invalid data. Expected a dictionary, but got %s.' % text_type_string
expected_errors = [
{'non_field_errors': [message]},
{'non_field_errors': [message]},
{'non_field_errors': [message]}
]
self.assertEqual(serializer.errors, expected_errors)
def test_invalid_single_datatype(self):
"""
Data containing a single incorrect data type should return errors.
"""
data = 123
serializer = self.BookSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), False)
expected_errors = {'non_field_errors': ['Expected a list of items but got type "int".']}
self.assertEqual(serializer.errors, expected_errors)
def test_invalid_single_object(self):
"""
Data containing only a single object, instead of a list of objects
should return errors.
"""
data = {
'id': 0,
'title': 'The electric kool-aid acid test',
'author': 'Tom Wolfe'
}
serializer = self.BookSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), False)
expected_errors = {'non_field_errors': ['Expected a list of items but got type "dict".']}
self.assertEqual(serializer.errors, expected_errors)
| bsd-2-clause |
woodpecker1/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/read_checksum_from_png.py | 207 | 1877 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def read_checksum(filehandle):
# We expect the comment to be at the beginning of the file.
data = filehandle.read(2048)
comment_key = 'tEXtchecksum\x00'
comment_pos = data.find(comment_key)
if comment_pos == -1:
return
checksum_pos = comment_pos + len(comment_key)
return data[checksum_pos:checksum_pos + 32]
| bsd-3-clause |
LonelyPale/mongo-connector | tests/util.py | 69 | 1175 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for mongo-connector tests. There are no actual tests in here.
"""
import time
def wait_for(condition, max_tries=60):
"""Wait for a condition to be true up to a maximum number of tries
"""
while not condition() and max_tries > 1:
time.sleep(1)
max_tries -= 1
return condition()
def assert_soon(condition, message=None, max_tries=60):
"""Assert that a condition eventually evaluates to True after at most
max_tries number of attempts
"""
if not wait_for(condition, max_tries=max_tries):
raise AssertionError(message or "")
| apache-2.0 |
jnfsmile/zinnia | zinnia/preview.py | 9 | 3772 | """Preview for Zinnia"""
from __future__ import division
from django.utils import six
from django.utils.text import Truncator
from django.utils.html import strip_tags
from django.utils.functional import cached_property
from django.utils.encoding import python_2_unicode_compatible
from bs4 import BeautifulSoup
from zinnia.settings import PREVIEW_SPLITTERS
from zinnia.settings import PREVIEW_MAX_WORDS
from zinnia.settings import PREVIEW_MORE_STRING
@python_2_unicode_compatible
class HTMLPreview(object):
"""
Build an HTML preview of an HTML content.
"""
def __init__(self, content, lead='',
splitters=PREVIEW_SPLITTERS,
max_words=PREVIEW_MAX_WORDS,
more_string=PREVIEW_MORE_STRING):
self._preview = None
self.lead = lead
self.content = content
self.splitters = splitters
self.max_words = max_words
self.more_string = more_string
@property
def preview(self):
"""
The preview is a cached property.
"""
if self._preview is None:
self._preview = self.build_preview()
return self._preview
@property
def has_more(self):
"""
Boolean telling if the preview has hidden content.
"""
return bool(self.content and self.preview != self.content)
def __str__(self):
"""
Method used to render the preview in templates.
"""
return six.text_type(self.preview)
def build_preview(self):
"""
Build the preview by:
- Returning the lead attribut if not empty.
- Checking if a split marker is present in the content
Then split the content with the marker to build the preview.
- Splitting the content to a fixed number of words.
"""
if self.lead:
return self.lead
for splitter in self.splitters:
if splitter in self.content:
return self.split(splitter)
return self.truncate()
def truncate(self):
"""
Truncate the content with the Truncator object.
"""
return Truncator(self.content).words(
self.max_words, self.more_string, html=True)
def split(self, splitter):
"""
Split the HTML content with a marker
without breaking closing markups.
"""
soup = BeautifulSoup(self.content.split(splitter)[0],
'html.parser')
last_string = soup.find_all(text=True)[-1]
last_string.replace_with(last_string.string + self.more_string)
return soup
@cached_property
def total_words(self):
"""
Return the total of words contained
in the content and in the lead.
"""
return len(strip_tags('%s %s' % (self.lead, self.content)).split())
@cached_property
def displayed_words(self):
"""
Return the number of words displayed in the preview.
"""
return (len(strip_tags(self.preview).split()) -
(len(self.more_string.split()) * int(not bool(self.lead))))
@cached_property
def remaining_words(self):
"""
Return the number of words remaining after the preview.
"""
return self.total_words - self.displayed_words
@cached_property
def displayed_percent(self):
"""
Return the percentage of the content displayed in the preview.
"""
return (self.displayed_words / self.total_words) * 100
@cached_property
def remaining_percent(self):
"""
Return the percentage of the content remaining after the preview.
"""
return (self.remaining_words / self.total_words) * 100
| bsd-3-clause |
Juniper/tempest | tempest/api/compute/servers/test_multiple_create.py | 3 | 1930 | # Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common import compute
from tempest.lib import decorators
class MultipleCreateTestJSON(base.BaseV2ComputeTest):
@decorators.idempotent_id('61e03386-89c3-449c-9bb1-a06f423fd9d1')
def test_multiple_create(self):
tenant_network = self.get_tenant_network()
body, servers = compute.create_test_server(
self.os_primary,
wait_until='ACTIVE',
min_count=2,
tenant_network=tenant_network)
for server in servers:
self.addCleanup(self.servers_client.delete_server, server['id'])
# NOTE(maurosr): do status response check and also make sure that
# reservation_id is not in the response body when the request send
# contains return_reservation_id=False
self.assertNotIn('reservation_id', body)
self.assertEqual(2, len(servers))
@decorators.idempotent_id('864777fb-2f1e-44e3-b5b9-3eb6fa84f2f7')
def test_multiple_create_with_reservation_return(self):
body = self.create_test_server(wait_until='ACTIVE',
min_count=1,
max_count=2,
return_reservation_id=True)
self.assertIn('reservation_id', body)
| apache-2.0 |
Antiun/partner-contact | partner_relations/model/__init__.py | 17 | 1438 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
PADDING = 10
def get_partner_type(partner):
"""Get partner type for relation.
:param partner: a res.partner either a company or not
:return: 'c' for company or 'p' for person
:rtype: str
"""
return 'c' if partner.is_company else 'p'
from . import res_partner
from . import res_partner_relation
from . import res_partner_relation_type
from . import res_partner_relation_all
from . import res_partner_relation_type_selection
| agpl-3.0 |
Vallher/zulip | zerver/management/commands/makemessages.py | 1 | 8262 | """
The contents of this file are taken from
[Django-admin](https://github.com/niwinz/django-jinja/blob/master/django_jinja/management/commands/makemessages.py)
Jinja2's i18n functionality is not exactly the same as Django's.
In particular, the tags names and their syntax are different:
1. The Django ``trans`` tag is replaced by a _() global.
2. The Django ``blocktrans`` tag is called ``trans``.
(1) isn't an issue, since the whole ``makemessages`` process is based on
converting the template tags to ``_()`` calls. However, (2) means that
those Jinja2 ``trans`` tags will not be picked up by Django's
``makemessages`` command.
There aren't any nice solutions here. While Jinja2's i18n extension does
come with extraction capabilities built in, the code behind ``makemessages``
unfortunately isn't extensible, so we can:
* Duplicate the command + code behind it.
* Offer a separate command for Jinja2 extraction.
* Try to get Django to offer hooks into makemessages().
* Monkey-patch.
We are currently doing that last thing. It turns out there we are lucky
for once: It's simply a matter of extending two regular expressions.
Credit for the approach goes to:
http://stackoverflow.com/questions/2090717/getting-translation-strings-for-jinja2-templates-integrated-with-django-1-x
"""
from __future__ import absolute_import
from typing import Any, Dict, Iterable, Mapping, Set, Tuple
from argparse import ArgumentParser
import os
import re
import glob
import json
from six.moves import filter
from six.moves import map
from six.moves import zip
from django.core.management.commands import makemessages
from django.utils.translation import trans_real
from django.template.base import BLOCK_TAG_START, BLOCK_TAG_END
strip_whitespace_right = re.compile(r"(%s-?\s*(trans|pluralize).*?-%s)\s+" % (BLOCK_TAG_START, BLOCK_TAG_END), re.U)
strip_whitespace_left = re.compile(r"\s+(%s-\s*(endtrans|pluralize).*?-?%s)" % (BLOCK_TAG_START, BLOCK_TAG_END), re.U)
regexes = ['{{#tr .*?}}(.*?){{/tr}}',
'{{t "(.*?)"\W*}}',
"{{t '(.*?)'\W*}}",
]
frontend_compiled_regexes = [re.compile(regex) for regex in regexes]
def strip_whitespaces(src):
# type: (str) -> str
src = strip_whitespace_left.sub(r'\1', src)
src = strip_whitespace_right.sub(r'\1', src)
return src
class Command(makemessages.Command):
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
super(Command, self).add_arguments(parser)
parser.add_argument('--frontend-source', type=str,
default='static/templates',
help='Name of the Handlebars template directory')
parser.add_argument('--frontend-output', type=str,
default='static/locale',
help='Name of the frontend messages output directory')
parser.add_argument('--frontend-namespace', type=str,
default='translations.json',
help='Namespace of the frontend locale file')
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
self.handle_django_locales(*args, **options)
self.handle_frontend_locales(*args, **options)
def handle_frontend_locales(self, *args, **options):
# type: (*Any, **Any) -> None
self.frontend_source = options.get('frontend_source')
self.frontend_output = options.get('frontend_output')
self.frontend_namespace = options.get('frontend_namespace')
self.frontend_locale = options.get('locale')
self.frontend_exclude = options.get('exclude')
self.frontend_all = options.get('all')
translation_strings = self.get_translation_strings()
self.write_translation_strings(translation_strings)
def handle_django_locales(self, *args, **options):
# type: (*Any, **Any) -> None
old_endblock_re = trans_real.endblock_re
old_block_re = trans_real.block_re
old_constant_re = trans_real.constant_re
old_templatize = trans_real.templatize
# Extend the regular expressions that are used to detect
# translation blocks with an "OR jinja-syntax" clause.
trans_real.endblock_re = re.compile(
trans_real.endblock_re.pattern + '|' + r"""^-?\s*endtrans\s*-?$""")
trans_real.block_re = re.compile(
trans_real.block_re.pattern + '|' + r"""^-?\s*trans(?:\s+(?!'|")(?=.*?=.*?)|\s*-?$)""")
trans_real.plural_re = re.compile(
trans_real.plural_re.pattern + '|' + r"""^-?\s*pluralize(?:\s+.+|-?$)""")
trans_real.constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?')).*\)""")
def my_templatize(src, origin=None):
new_src = strip_whitespaces(src)
return old_templatize(new_src, origin)
trans_real.templatize = my_templatize
try:
super(Command, self).handle(*args, **options)
finally:
trans_real.endblock_re = old_endblock_re
trans_real.block_re = old_block_re
trans_real.templatize = old_templatize
trans_real.constant_re = old_constant_re
def extract_strings(self, data):
# type: (str) -> Dict[str, str]
translation_strings = {} # type: Dict[str, str]
for regex in frontend_compiled_regexes:
for match in regex.findall(data):
translation_strings[match] = ""
return translation_strings
def get_translation_strings(self):
# type: () -> Dict[str, str]
translation_strings = {} # type: Dict[str, str]
dirname = self.get_template_dir()
for filename in os.listdir(dirname):
if filename.endswith('handlebars'):
with open(os.path.join(dirname, filename)) as reader:
data = reader.read()
translation_strings.update(self.extract_strings(data))
return translation_strings
def get_template_dir(self):
# type: () -> str
return self.frontend_source
def get_namespace(self):
# type: () -> str
return self.frontend_namespace
def get_locales(self):
# type: () -> Iterable[str]
locale = self.frontend_locale
exclude = self.frontend_exclude
process_all = self.frontend_all
paths = glob.glob('%s/*' % self.default_locale_path,)
all_locales = [os.path.basename(path) for path in paths if os.path.isdir(path)]
# Account for excluded locales
if process_all:
return all_locales
else:
locales = locale or all_locales
return set(locales) - set(exclude)
def get_base_path(self):
# type: () -> str
return self.frontend_output
def get_output_paths(self):
# type: () -> Iterable[str]
base_path = self.get_base_path()
locales = self.get_locales()
for path in [os.path.join(base_path, locale) for locale in locales]:
if not os.path.exists(path):
os.makedirs(path)
yield os.path.join(path, self.get_namespace())
def get_new_strings(self, old_strings, translation_strings):
# type: (Mapping[str, str], Iterable[str]) -> Dict[str, str]
"""
Missing strings are removed, new strings are added and already
translated strings are not touched.
"""
new_strings = {} # Dict[str, str]
for k in translation_strings:
new_strings[k] = old_strings.get(k, k)
return new_strings
def write_translation_strings(self, translation_strings):
# type: (Iterable[str]) -> None
for locale, output_path in zip(self.get_locales(), self.get_output_paths()):
self.stdout.write("[frontend] processing locale {}".format(locale))
try:
with open(output_path, 'r') as reader:
old_strings = json.load(reader)
except (IOError, ValueError):
old_strings = {}
new_strings = self.get_new_strings(old_strings, translation_strings)
with open(output_path, 'w') as writer:
json.dump(new_strings, writer, indent=2)
| apache-2.0 |
philoniare/horizon | openstack_dashboard/test/test_data/nova_data.py | 22 | 30359 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from novaclient.v2 import aggregates
from novaclient.v2 import availability_zones
from novaclient.v2 import certs
from novaclient.v2 import flavor_access
from novaclient.v2 import flavors
from novaclient.v2 import floating_ips
from novaclient.v2 import hosts
from novaclient.v2 import hypervisors
from novaclient.v2 import keypairs
from novaclient.v2 import quotas
from novaclient.v2 import security_group_rules as rules
from novaclient.v2 import security_groups as sec_groups
from novaclient.v2 import servers
from novaclient.v2 import services
from novaclient.v2 import usage
from novaclient.v2 import volume_snapshots as vol_snaps
from novaclient.v2 import volume_types
from novaclient.v2 import volumes
from openstack_dashboard.api import base
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas as usage_quotas
from openstack_dashboard.test.test_data import utils
class FlavorExtraSpecs(dict):
def __repr__(self):
return "<FlavorExtraSpecs %s>" % self._info
def __init__(self, info):
super(FlavorExtraSpecs, self).__init__()
self.__dict__.update(info)
self.update(info)
self._info = info
SERVER_DATA = """
{
"server": {
"OS-EXT-SRV-ATTR:instance_name": "instance-00000005",
"OS-EXT-SRV-ATTR:host": "instance-host",
"OS-EXT-STS:task_state": null,
"addresses": {
"private": [
{
"version": 4,
"addr": "10.0.0.1"
}
]
},
"links": [
{
"href": "%(host)s/v1.1/%(tenant_id)s/servers/%(server_id)s",
"rel": "self"
},
{
"href": "%(host)s/%(tenant_id)s/servers/%(server_id)s",
"rel": "bookmark"
}
],
"image": {
"id": "%(image_id)s",
"links": [
{
"href": "%(host)s/%(tenant_id)s/images/%(image_id)s",
"rel": "bookmark"
}
]
},
"OS-EXT-STS:vm_state": "active",
"flavor": {
"id": "%(flavor_id)s",
"links": [
{
"href": "%(host)s/%(tenant_id)s/flavors/%(flavor_id)s",
"rel": "bookmark"
}
]
},
"id": "%(server_id)s",
"user_id": "%(user_id)s",
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": "",
"accessIPv6": "",
"progress": null,
"OS-EXT-STS:power_state": 1,
"config_drive": "",
"status": "%(status)s",
"updated": "2012-02-28T19:51:27Z",
"hostId": "c461ea283faa0ab5d777073c93b126c68139e4e45934d4fc37e403c2",
"key_name": "%(key_name)s",
"name": "%(name)s",
"created": "2012-02-28T19:51:17Z",
"tenant_id": "%(tenant_id)s",
"metadata": {"someMetaLabel": "someMetaData",
"some<b>html</b>label": "<!--",
"empty": ""}
}
}
"""
USAGE_DATA = """
{
"total_memory_mb_usage": 64246.89777777778,
"total_vcpus_usage": 125.48222222222223,
"total_hours": 125.48222222222223,
"total_local_gb_usage": 0,
"tenant_id": "%(tenant_id)s",
"stop": "2012-01-31 23:59:59",
"start": "2012-01-01 00:00:00",
"server_usages": [
{
"memory_mb": %(flavor_ram)s,
"uptime": 442321,
"started_at": "2012-01-26 20:38:21",
"ended_at": null,
"name": "%(instance_name)s",
"tenant_id": "%(tenant_id)s",
"state": "active",
"hours": 122.87361111111112,
"vcpus": %(flavor_vcpus)s,
"flavor": "%(flavor_name)s",
"local_gb": %(flavor_disk)s
},
{
"memory_mb": %(flavor_ram)s,
"uptime": 9367,
"started_at": "2012-01-31 20:54:15",
"ended_at": null,
"name": "%(instance_name)s",
"tenant_id": "%(tenant_id)s",
"state": "active",
"hours": 2.608611111111111,
"vcpus": %(flavor_vcpus)s,
"flavor": "%(flavor_name)s",
"local_gb": %(flavor_disk)s
}
]
}
"""
def data(TEST):
TEST.servers = utils.TestDataContainer()
TEST.flavors = utils.TestDataContainer()
TEST.flavor_access = utils.TestDataContainer()
TEST.keypairs = utils.TestDataContainer()
TEST.security_groups = utils.TestDataContainer()
TEST.security_groups_uuid = utils.TestDataContainer()
TEST.security_group_rules = utils.TestDataContainer()
TEST.security_group_rules_uuid = utils.TestDataContainer()
TEST.volumes = utils.TestDataContainer()
TEST.quotas = utils.TestDataContainer()
TEST.quota_usages = utils.TestDataContainer()
TEST.disabled_quotas = utils.TestDataContainer()
TEST.floating_ips = utils.TestDataContainer()
TEST.floating_ips_uuid = utils.TestDataContainer()
TEST.usages = utils.TestDataContainer()
TEST.certs = utils.TestDataContainer()
TEST.volume_snapshots = utils.TestDataContainer()
TEST.volume_types = utils.TestDataContainer()
TEST.availability_zones = utils.TestDataContainer()
TEST.hypervisors = utils.TestDataContainer()
TEST.services = utils.TestDataContainer()
TEST.aggregates = utils.TestDataContainer()
TEST.hosts = utils.TestDataContainer()
# Data return by novaclient.
# It is used if API layer does data conversion.
TEST.api_floating_ips = utils.TestDataContainer()
TEST.api_floating_ips_uuid = utils.TestDataContainer()
# Volumes
volume = volumes.Volume(
volumes.VolumeManager(None),
{"id": "41023e92-8008-4c8b-8059-7f2293ff3775",
"name": 'test_volume',
"status": 'available',
"size": 40,
"display_name": 'Volume name',
"created_at": '2012-04-01 10:30:00',
"volume_type": None,
"attachments": []})
nameless_volume = volumes.Volume(
volumes.VolumeManager(None),
{"id": "3b189ac8-9166-ac7f-90c9-16c8bf9e01ac",
"name": '',
"status": 'in-use',
"size": 10,
"display_name": '',
"display_description": '',
"device": "/dev/hda",
"created_at": '2010-11-21 18:34:25',
"volume_type": 'vol_type_1',
"attachments": [{"id": "1", "server_id": '1',
"device": "/dev/hda"}]})
attached_volume = volumes.Volume(
volumes.VolumeManager(None),
{"id": "8cba67c1-2741-6c79-5ab6-9c2bf8c96ab0",
"name": 'my_volume',
"status": 'in-use',
"size": 30,
"display_name": 'My Volume',
"display_description": '',
"device": "/dev/hdk",
"created_at": '2011-05-01 11:54:33',
"volume_type": 'vol_type_2',
"attachments": [{"id": "2", "server_id": '1',
"device": "/dev/hdk"}]})
non_bootable_volume = volumes.Volume(
volumes.VolumeManager(None),
{"id": "41023e92-8008-4c8b-8059-7f2293ff3771",
"name": 'non_bootable_volume',
"status": 'available',
"size": 40,
"display_name": 'Non Bootable Volume',
"created_at": '2012-04-01 10:30:00',
"volume_type": None,
"attachments": []})
volume.bootable = 'true'
nameless_volume.bootable = 'true'
attached_volume.bootable = 'true'
non_bootable_volume.bootable = 'false'
TEST.volumes.add(volume)
TEST.volumes.add(nameless_volume)
TEST.volumes.add(attached_volume)
TEST.volumes.add(non_bootable_volume)
vol_type1 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': 1,
'name': 'vol_type_1'})
vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': 2,
'name': 'vol_type_2'})
TEST.volume_types.add(vol_type1, vol_type2)
# Flavors
flavor_1 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'name': 'm1.tiny',
'vcpus': 1,
'disk': 0,
'ram': 512,
'swap': 0,
'extra_specs': {},
'os-flavor-access:is_public': True,
'OS-FLV-EXT-DATA:ephemeral': 0})
flavor_2 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'name': 'm1.massive',
'vcpus': 1000,
'disk': 1024,
'ram': 10000,
'swap': 0,
'extra_specs': {'Trusted': True, 'foo': 'bar'},
'os-flavor-access:is_public': True,
'OS-FLV-EXT-DATA:ephemeral': 2048})
flavor_3 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "dddddddd-dddd-dddd-dddd-dddddddddddd",
'name': 'm1.secret',
'vcpus': 1000,
'disk': 1024,
'ram': 10000,
'swap': 0,
'extra_specs': {},
'os-flavor-access:is_public': False,
'OS-FLV-EXT-DATA:ephemeral': 2048})
flavor_4 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee",
'name': 'm1.metadata',
'vcpus': 1000,
'disk': 1024,
'ram': 10000,
'swap': 0,
'extra_specs': FlavorExtraSpecs(
{'key': 'key_mock',
'value': 'value_mock'}),
'os-flavor-access:is_public': False,
'OS-FLV-EXT-DATA:ephemeral': 2048})
TEST.flavors.add(flavor_1, flavor_2, flavor_3, flavor_4)
flavor_access_manager = flavor_access.FlavorAccessManager(None)
flavor_access_1 = flavor_access.FlavorAccess(
flavor_access_manager,
{"tenant_id": "1",
"flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
flavor_access_2 = flavor_access.FlavorAccess(
flavor_access_manager,
{"tenant_id": "2",
"flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
TEST.flavor_access.add(flavor_access_1, flavor_access_2)
# Key pairs
keypair = keypairs.Keypair(keypairs.KeypairManager(None),
dict(name='keyName'))
TEST.keypairs.add(keypair)
# Security Groups and Rules
def generate_security_groups(is_uuid=False):
def get_id(is_uuid):
global current_int_id
if is_uuid:
return str(uuid.uuid4())
else:
get_id.current_int_id += 1
return get_id.current_int_id
get_id.current_int_id = 0
sg_manager = sec_groups.SecurityGroupManager(None)
rule_manager = rules.SecurityGroupRuleManager(None)
sec_group_1 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"default",
"description": u"default"})
sec_group_2 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"other_group",
"description": u"NotDefault."})
sec_group_3 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"another_group",
"description": u"NotDefault."})
rule = {'id': get_id(is_uuid),
'group': {},
'ip_protocol': u"tcp",
'from_port': u"80",
'to_port': u"80",
'parent_group_id': sec_group_1.id,
'ip_range': {'cidr': u"0.0.0.0/32"}}
icmp_rule = {'id': get_id(is_uuid),
'group': {},
'ip_protocol': u"icmp",
'from_port': u"9",
'to_port': u"5",
'parent_group_id': sec_group_1.id,
'ip_range': {'cidr': u"0.0.0.0/32"}}
group_rule = {'id': 3,
'group': {},
'ip_protocol': u"tcp",
'from_port': u"80",
'to_port': u"80",
'parent_group_id': sec_group_1.id,
'source_group_id': sec_group_1.id}
rule_obj = rules.SecurityGroupRule(rule_manager, rule)
rule_obj2 = rules.SecurityGroupRule(rule_manager, icmp_rule)
rule_obj3 = rules.SecurityGroupRule(rule_manager, group_rule)
sec_group_1.rules = [rule_obj]
sec_group_2.rules = [rule_obj]
return {"rules": [rule_obj, rule_obj2, rule_obj3],
"groups": [sec_group_1, sec_group_2, sec_group_3]}
sg_data = generate_security_groups()
TEST.security_group_rules.add(*sg_data["rules"])
TEST.security_groups.add(*sg_data["groups"])
sg_uuid_data = generate_security_groups(is_uuid=True)
TEST.security_group_rules_uuid.add(*sg_uuid_data["rules"])
TEST.security_groups_uuid.add(*sg_uuid_data["groups"])
# Quota Sets
quota_data = dict(metadata_items='1',
injected_file_content_bytes='1',
ram=10000,
floating_ips='1',
fixed_ips='10',
instances='10',
injected_files='1',
cores='10',
security_groups='10',
security_group_rules='20')
quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
TEST.quotas.nova = base.QuotaSet(quota)
TEST.quotas.add(base.QuotaSet(quota))
# nova quotas disabled when neutron is enabled
disabled_quotas_nova = ['floating_ips', 'fixed_ips',
'security_groups', 'security_group_rules']
TEST.disabled_quotas.add(disabled_quotas_nova)
# Quota Usages
quota_usage_data = {'gigabytes': {'used': 0,
'quota': 1000},
'instances': {'used': 0,
'quota': 10},
'ram': {'used': 0,
'quota': 10000},
'cores': {'used': 0,
'quota': 20},
'floating_ips': {'used': 0,
'quota': 10},
'security_groups': {'used': 0,
'quota': 10},
'volumes': {'used': 0,
'quota': 10}}
quota_usage = usage_quotas.QuotaUsage()
for k, v in quota_usage_data.items():
quota_usage.add_quota(base.Quota(k, v['quota']))
quota_usage.tally(k, v['used'])
TEST.quota_usages.add(quota_usage)
# Limits
limits = {"absolute": {"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 10000,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalKeyPairsUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0}}
TEST.limits = limits
# Servers
tenant3 = TEST.tenants.list()[2]
vals = {"host": "http://nova.example.com:8774",
"name": "server_1",
"status": "ACTIVE",
"tenant_id": TEST.tenants.first().id,
"user_id": TEST.user.id,
"server_id": "1",
"flavor_id": flavor_1.id,
"image_id": TEST.images.first().id,
"key_name": keypair.name}
server_1 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": "server_2",
"status": "BUILD",
"server_id": "2"})
server_2 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": u'\u4e91\u89c4\u5219',
"status": "ACTIVE",
"tenant_id": tenant3.id,
"server_id": "3"})
server_3 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": "server_4",
"status": "PAUSED",
"server_id": "4"})
server_4 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
TEST.servers.add(server_1, server_2, server_3, server_4)
# VNC Console Data
console = {u'console': {u'url': u'http://example.com:6080/vnc_auto.html',
u'type': u'novnc'}}
TEST.servers.vnc_console_data = console
# SPICE Console Data
console = {u'console': {u'url': u'http://example.com:6080/spice_auto.html',
u'type': u'spice'}}
TEST.servers.spice_console_data = console
# RDP Console Data
console = {u'console': {u'url': u'http://example.com:6080/rdp_auto.html',
u'type': u'rdp'}}
TEST.servers.rdp_console_data = console
# Floating IPs
def generate_fip(conf):
return floating_ips.FloatingIP(floating_ips.FloatingIPManager(None),
conf)
fip_1 = {'id': 1,
'fixed_ip': '10.0.0.4',
'instance_id': server_1.id,
'ip': '58.58.58.58',
'pool': 'pool1'}
fip_2 = {'id': 2,
'fixed_ip': None,
'instance_id': None,
'ip': '58.58.58.58',
'pool': 'pool2'}
# this floating ip is for lbaas tests
fip_3 = {'id': 3,
'fixed_ip': '10.0.0.5',
# the underlying class maps the instance id to port id
'instance_id': '063cf7f3-ded1-4297-bc4c-31eae876cc91',
'ip': '58.58.58.58',
'pool': 'pool2'}
TEST.api_floating_ips.add(generate_fip(fip_1), generate_fip(fip_2),
generate_fip(fip_3))
TEST.floating_ips.add(nova.FloatingIp(generate_fip(fip_1)),
nova.FloatingIp(generate_fip(fip_2)),
nova.FloatingIp(generate_fip(fip_3)))
# Floating IP with UUID id (for Floating IP with Neutron Proxy)
fip_3 = {'id': str(uuid.uuid4()),
'fixed_ip': '10.0.0.4',
'instance_id': server_1.id,
'ip': '58.58.58.58',
'pool': 'pool1'}
fip_4 = {'id': str(uuid.uuid4()),
'fixed_ip': None,
'instance_id': None,
'ip': '58.58.58.58',
'pool': 'pool2'}
TEST.api_floating_ips_uuid.add(generate_fip(fip_3), generate_fip(fip_4))
TEST.floating_ips_uuid.add(nova.FloatingIp(generate_fip(fip_3)),
nova.FloatingIp(generate_fip(fip_4)))
# Usage
usage_vals = {"tenant_id": TEST.tenant.id,
"instance_name": server_1.name,
"flavor_name": flavor_1.name,
"flavor_vcpus": flavor_1.vcpus,
"flavor_disk": flavor_1.disk,
"flavor_ram": flavor_1.ram}
usage_obj = usage.Usage(usage.UsageManager(None),
json.loads(USAGE_DATA % usage_vals))
TEST.usages.add(usage_obj)
usage_2_vals = {"tenant_id": tenant3.id,
"instance_name": server_3.name,
"flavor_name": flavor_1.name,
"flavor_vcpus": flavor_1.vcpus,
"flavor_disk": flavor_1.disk,
"flavor_ram": flavor_1.ram}
usage_obj_2 = usage.Usage(usage.UsageManager(None),
json.loads(USAGE_DATA % usage_2_vals))
TEST.usages.add(usage_obj_2)
volume_snapshot = vol_snaps.Snapshot(
vol_snaps.SnapshotManager(None),
{'id': '40f3fabf-3613-4f5e-90e5-6c9a08333fc3',
'display_name': 'test snapshot',
'display_description': 'vol snap!',
'size': 40,
'status': 'available',
'volume_id': '41023e92-8008-4c8b-8059-7f2293ff3775'})
volume_snapshot2 = vol_snaps.Snapshot(
vol_snaps.SnapshotManager(None),
{'id': 'a374cbb8-3f99-4c3f-a2ef-3edbec842e31',
'display_name': '',
'display_description': 'vol snap 2!',
'size': 80,
'status': 'available',
'volume_id': '3b189ac8-9166-ac7f-90c9-16c8bf9e01ac'})
TEST.volume_snapshots.add(volume_snapshot)
TEST.volume_snapshots.add(volume_snapshot2)
cert_data = {'private_key': 'private',
'data': 'certificate_data'}
certificate = certs.Certificate(certs.CertificateManager(None), cert_data)
TEST.certs.add(certificate)
# Availability Zones
TEST.availability_zones.add(availability_zones.AvailabilityZone(
availability_zones.AvailabilityZoneManager(None),
{
'zoneName': 'nova',
'zoneState': {'available': True},
'hosts': {
"host001": {
"nova-network": {
"active": True,
"available": True,
},
},
},
},
))
# hypervisors
hypervisor_1 = hypervisors.Hypervisor(
hypervisors.HypervisorManager(None),
{
"service": {"host": "devstack001", "id": 3},
"vcpus_used": 1,
"hypervisor_type": "QEMU",
"local_gb_used": 20,
"hypervisor_hostname": "devstack001",
"memory_mb_used": 1500,
"memory_mb": 2000,
"current_workload": 0,
"vcpus": 1,
"cpu_info": '{"vendor": "Intel", "model": "core2duo",'
'"arch": "x86_64", "features": ["lahf_lm"'
', "rdtscp"], "topology": {"cores": 1, "t'
'hreads": 1, "sockets": 1}}',
"running_vms": 1,
"free_disk_gb": 9,
"hypervisor_version": 1002000,
"disk_available_least": 6,
"local_gb": 29,
"free_ram_mb": 500,
"id": 1,
"servers": [{"name": "test_name", "uuid": "test_uuid"}]
},
)
hypervisor_2 = hypervisors.Hypervisor(
hypervisors.HypervisorManager(None),
{
"service": {"host": "devstack002", "id": 4},
"vcpus_used": 1,
"hypervisor_type": "QEMU",
"local_gb_used": 20,
"hypervisor_hostname": "devstack001",
"memory_mb_used": 1500,
"memory_mb": 2000,
"current_workload": 0,
"vcpus": 1,
"cpu_info": '{"vendor": "Intel", "model": "core2duo",'
'"arch": "x86_64", "features": ["lahf_lm"'
', "rdtscp"], "topology": {"cores": 1, "t'
'hreads": 1, "sockets": 1}}',
"running_vms": 1,
"free_disk_gb": 9,
"hypervisor_version": 1002000,
"disk_available_least": 6,
"local_gb": 29,
"free_ram_mb": 500,
"id": 2,
"servers": [{"name": "test_name_2", "uuid": "test_uuid_2"}]
},
)
hypervisor_3 = hypervisors.Hypervisor(
hypervisors.HypervisorManager(None),
{
"service": {"host": "instance-host", "id": 5},
"vcpus_used": 1,
"hypervisor_type": "QEMU",
"local_gb_used": 20,
"hypervisor_hostname": "devstack003",
"memory_mb_used": 1500,
"memory_mb": 2000,
"current_workload": 0,
"vcpus": 1,
"cpu_info": '{"vendor": "Intel", "model": "core2duo",'
'"arch": "x86_64", "features": ["lahf_lm"'
', "rdtscp"], "topology": {"cores": 1, "t'
'hreads": 1, "sockets": 1}}',
"running_vms": 1,
"free_disk_gb": 9,
"hypervisor_version": 1002000,
"disk_available_least": 6,
"local_gb": 29,
"free_ram_mb": 500,
"id": 3,
},
)
TEST.hypervisors.add(hypervisor_1)
TEST.hypervisors.add(hypervisor_2)
TEST.hypervisors.add(hypervisor_3)
TEST.hypervisors.stats = {
"hypervisor_statistics": {
"count": 5,
"vcpus_used": 3,
"local_gb_used": 15,
"memory_mb": 483310,
"current_workload": 0,
"vcpus": 160,
"running_vms": 3,
"free_disk_gb": 12548,
"disk_available_least": 12556,
"local_gb": 12563,
"free_ram_mb": 428014,
"memory_mb_used": 55296,
}
}
# Services
service_1 = services.Service(services.ServiceManager(None), {
"status": "enabled",
"binary": "nova-conductor",
"zone": "internal",
"state": "up",
"updated_at": "2013-07-08T05:21:00.000000",
"host": "devstack001",
"disabled_reason": None,
})
service_2 = services.Service(services.ServiceManager(None), {
"status": "enabled",
"binary": "nova-compute",
"zone": "nova",
"state": "up",
"updated_at": "2013-07-08T05:20:51.000000",
"host": "devstack001",
"disabled_reason": None,
})
service_3 = services.Service(services.ServiceManager(None), {
"status": "enabled",
"binary": "nova-compute",
"zone": "nova",
"state": "down",
"updated_at": "2013-07-08T04:20:51.000000",
"host": "devstack002",
"disabled_reason": None,
})
service_4 = services.Service(services.ServiceManager(None), {
"status": "disabled",
"binary": "nova-compute",
"zone": "nova",
"state": "up",
"updated_at": "2013-07-08T04:20:51.000000",
"host": "devstack003",
"disabled_reason": None,
})
TEST.services.add(service_1)
TEST.services.add(service_2)
TEST.services.add(service_3)
TEST.services.add(service_4)
# Aggregates
aggregate_1 = aggregates.Aggregate(aggregates.AggregateManager(None), {
"name": "foo",
"availability_zone": "testing",
"deleted": 0,
"created_at": "2013-07-04T13:34:38.000000",
"updated_at": None,
"hosts": ["foo", "bar"],
"deleted_at": None,
"id": 1,
"metadata": {"foo": "testing", "bar": "testing"},
})
aggregate_2 = aggregates.Aggregate(aggregates.AggregateManager(None), {
"name": "bar",
"availability_zone": "testing",
"deleted": 0,
"created_at": "2013-07-04T13:34:38.000000",
"updated_at": None,
"hosts": ["foo", "bar"],
"deleted_at": None,
"id": 2,
"metadata": {"foo": "testing", "bar": "testing"},
})
TEST.aggregates.add(aggregate_1)
TEST.aggregates.add(aggregate_2)
host1 = hosts.Host(hosts.HostManager(None), {
"host_name": "devstack001",
"service": "compute",
"zone": "testing",
})
host2 = hosts.Host(hosts.HostManager(None), {
"host_name": "devstack002",
"service": "nova-conductor",
"zone": "testing",
})
host3 = hosts.Host(hosts.HostManager(None), {
"host_name": "devstack003",
"service": "compute",
"zone": "testing",
})
host4 = hosts.Host(hosts.HostManager(None), {
"host_name": "devstack004",
"service": "compute",
"zone": "testing",
})
TEST.hosts.add(host1)
TEST.hosts.add(host2)
TEST.hosts.add(host3)
TEST.hosts.add(host4)
| apache-2.0 |
songfj/calibre | src/calibre/library/catalogs/epub_mobi.py | 14 | 25308 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import datetime, os, time
from collections import namedtuple
from calibre import strftime
from calibre.customize import CatalogPlugin
from calibre.customize.conversion import OptionRecommendation, DummyReporter
from calibre.library import current_library_name
from calibre.library.catalogs import AuthorSortMismatchException, EmptyCatalogException
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.localization import calibre_langcode_to_name, canonicalize_lang, get_lang
Option = namedtuple('Option', 'option, default, dest, action, help')
class EPUB_MOBI(CatalogPlugin):
'ePub catalog generator'
name = 'Catalog_EPUB_MOBI'
description = 'AZW3/EPUB/MOBI catalog generator'
supported_platforms = ['windows', 'osx', 'linux']
minimum_calibre_version = (0, 7, 40)
author = 'Greg Riker'
version = (1, 0, 0)
file_types = set(['azw3', 'epub', 'mobi'])
THUMB_SMALLEST = "1.0"
THUMB_LARGEST = "2.0"
cli_options = [Option('--catalog-title', # {{{
default='My Books',
dest='catalog_title',
action=None,
help=_('Title of generated catalog used as title in metadata.\n'
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--cross-reference-authors',
default=False,
dest='cross_reference_authors',
action='store_true',
help=_("Create cross-references in Authors section for books with multiple authors.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--debug-pipeline',
default=None,
dest='debug_pipeline',
action=None,
help=_("Save the output from different stages of the conversion "
"pipeline to the specified "
"directory. Useful if you are unsure at which stage "
"of the conversion process a bug is occurring.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--exclude-genre',
default='\[.+\]|^\+$',
dest='exclude_genre',
action=None,
help=_("Regex describing tags to exclude as genres.\n"
"Default: '%default' excludes bracketed tags, e.g. '[Project Gutenberg]', and '+', the default tag for read books.\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--exclusion-rules',
default="(('Catalogs','Tags','Catalog'),)",
dest='exclusion_rules',
action=None,
help=_("Specifies the rules used to exclude books from the generated catalog.\n"
"The model for an exclusion rule is either\n('<rule name>','Tags','<comma-separated list of tags>') or\n"
"('<rule name>','<custom column>','<pattern>').\n"
"For example:\n"
"(('Archived books','#status','Archived'),)\n"
"will exclude a book with a value of 'Archived' in the custom column 'status'.\n"
"When multiple rules are defined, all rules will be applied.\n"
"Default: \n" + '"' + '%default' + '"' + "\n"
"Applies to AZW3, ePub, MOBI output formats")),
Option('--generate-authors',
default=False,
dest='generate_authors',
action='store_true',
help=_("Include 'Authors' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--generate-descriptions',
default=False,
dest='generate_descriptions',
action='store_true',
help=_("Include 'Descriptions' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--generate-genres',
default=False,
dest='generate_genres',
action='store_true',
help=_("Include 'Genres' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--generate-titles',
default=False,
dest='generate_titles',
action='store_true',
help=_("Include 'Titles' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--generate-series',
default=False,
dest='generate_series',
action='store_true',
help=_("Include 'Series' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--generate-recently-added',
default=False,
dest='generate_recently_added',
action='store_true',
help=_("Include 'Recently Added' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--genre-source-field',
default=_('Tags'),
dest='genre_source_field',
action=None,
help=_("Source field for Genres section.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--header-note-source-field',
default='',
dest='header_note_source_field',
action=None,
help=_("Custom field containing note text to insert in Description header.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--merge-comments-rule',
default='::',
dest='merge_comments_rule',
action=None,
help=_("#<custom field>:[before|after]:[True|False] specifying:\n"
" <custom field> Custom field containing notes to merge with Comments\n"
" [before|after] Placement of notes with respect to Comments\n"
" [True|False] - A horizontal rule is inserted between notes and Comments\n"
"Default: '%default'\n"
"Applies to AZW3, ePub, MOBI output formats")),
Option('--output-profile',
default=None,
dest='output_profile',
action=None,
help=_("Specifies the output profile. In some cases, an output profile is required to optimize the catalog for the device. For example, 'kindle' or 'kindle_dx' creates a structured Table of Contents with Sections and Articles.\n" # noqa
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--prefix-rules',
default="(('Read books','tags','+','\u2713'),('Wishlist item','tags','Wishlist','\u00d7'))",
dest='prefix_rules',
action=None,
help=_("Specifies the rules used to include prefixes indicating read books, wishlist items and other user-specified prefixes.\n"
"The model for a prefix rule is ('<rule name>','<source field>','<pattern>','<prefix>').\n"
"When multiple rules are defined, the first matching rule will be used.\n"
"Default:\n" + '"' + '%default' + '"' + "\n"
"Applies to AZW3, ePub, MOBI output formats")),
Option('--preset',
default=None,
dest='preset',
action=None,
help=_("Use a named preset created with the GUI Catalog builder.\n"
"A preset specifies all settings for building a catalog.\n"
"Default: '%default'\n"
"Applies to AZW3, ePub, MOBI output formats")),
Option('--use-existing-cover',
default=False,
dest='use_existing_cover',
action='store_true',
help=_("Replace existing cover when generating the catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--thumb-width',
default='1.0',
dest='thumb_width',
action=None,
help=_("Size hint (in inches) for book covers in catalog.\n"
"Range: 1.0 - 2.0\n"
"Default: '%default'\n"
"Applies to AZW3, ePub, MOBI output formats")),
]
# }}}
def run(self, path_to_output, opts, db, notification=DummyReporter()):
from calibre.library.catalogs.epub_mobi_builder import CatalogBuilder
from calibre.utils.logging import default_log as log
from calibre.utils.config import JSONConfig
# If preset specified from the cli, insert stored options from JSON file
if hasattr(opts, 'preset') and opts.preset:
available_presets = JSONConfig("catalog_presets")
if not opts.preset in available_presets:
if available_presets:
print(_('Error: Preset "%s" not found.' % opts.preset))
print(_('Stored presets: %s' % ', '.join([p for p in sorted(available_presets.keys())])))
else:
print(_('Error: No stored presets.'))
return 1
# Copy the relevant preset values to the opts object
for item in available_presets[opts.preset]:
if not item in ['exclusion_rules_tw', 'format', 'prefix_rules_tw']:
setattr(opts, item, available_presets[opts.preset][item])
# Provide an unconnected device
opts.connected_device = {
'is_device_connected': False,
'kind': None,
'name': None,
'save_template': None,
'serial': None,
'storage': None,
}
# Convert prefix_rules and exclusion_rules from JSON lists to tuples
prs = []
for rule in opts.prefix_rules:
prs.append(tuple(rule))
opts.prefix_rules = tuple(prs)
ers = []
for rule in opts.exclusion_rules:
ers.append(tuple(rule))
opts.exclusion_rules = tuple(ers)
opts.log = log
opts.fmt = self.fmt = path_to_output.rpartition('.')[2]
# Add local options
opts.creator = '%s, %s %s, %s' % (strftime('%A'), strftime('%B'), strftime('%d').lstrip('0'), strftime('%Y'))
opts.creator_sort_as = '%s %s' % ('calibre', strftime('%Y-%m-%d'))
opts.connected_kindle = False
# Finalize output_profile
op = opts.output_profile
if op is None:
op = 'default'
if opts.connected_device['name'] and 'kindle' in opts.connected_device['name'].lower():
opts.connected_kindle = True
if opts.connected_device['serial'] and \
opts.connected_device['serial'][:4] in ['B004', 'B005']:
op = "kindle_dx"
else:
op = "kindle"
opts.description_clip = 380 if op.endswith('dx') or 'kindle' not in op else 100
opts.author_clip = 100 if op.endswith('dx') or 'kindle' not in op else 60
opts.output_profile = op
opts.basename = "Catalog"
opts.cli_environment = not hasattr(opts, 'sync')
# Hard-wired to always sort descriptions by author, with series after non-series
opts.sort_descriptions_by_author = True
build_log = []
build_log.append(u"%s('%s'): Generating %s %sin %s environment, locale: '%s'" %
(self.name,
current_library_name(),
self.fmt,
'for %s ' % opts.output_profile if opts.output_profile else '',
'CLI' if opts.cli_environment else 'GUI',
calibre_langcode_to_name(canonicalize_lang(get_lang()), localize=False))
)
# If exclude_genre is blank, assume user wants all tags as genres
if opts.exclude_genre.strip() == '':
#opts.exclude_genre = '\[^.\]'
#build_log.append(" converting empty exclude_genre to '\[^.\]'")
opts.exclude_genre = 'a^'
build_log.append(" converting empty exclude_genre to 'a^'")
if opts.connected_device['is_device_connected'] and \
opts.connected_device['kind'] == 'device':
if opts.connected_device['serial']:
build_log.append(u" connected_device: '%s' #%s%s " %
(opts.connected_device['name'],
opts.connected_device['serial'][0:4],
'x' * (len(opts.connected_device['serial']) - 4)))
for storage in opts.connected_device['storage']:
if storage:
build_log.append(u" mount point: %s" % storage)
else:
build_log.append(u" connected_device: '%s'" % opts.connected_device['name'])
try:
for storage in opts.connected_device['storage']:
if storage:
build_log.append(u" mount point: %s" % storage)
except:
build_log.append(u" (no mount points)")
else:
build_log.append(u" connected_device: '%s'" % opts.connected_device['name'])
opts_dict = vars(opts)
if opts_dict['ids']:
build_log.append(" book count: %d" % len(opts_dict['ids']))
sections_list = []
if opts.generate_authors:
sections_list.append('Authors')
if opts.generate_titles:
sections_list.append('Titles')
if opts.generate_series:
sections_list.append('Series')
if opts.generate_genres:
sections_list.append('Genres')
if opts.generate_recently_added:
sections_list.append('Recently Added')
if opts.generate_descriptions:
sections_list.append('Descriptions')
if not sections_list:
if opts.cli_environment:
opts.log.warn('*** No Section switches specified, enabling all Sections ***')
opts.generate_authors = True
opts.generate_titles = True
opts.generate_series = True
opts.generate_genres = True
opts.generate_recently_added = True
opts.generate_descriptions = True
sections_list = ['Authors', 'Titles', 'Series', 'Genres', 'Recently Added', 'Descriptions']
else:
opts.log.warn('\n*** No enabled Sections, terminating catalog generation ***')
return ["No Included Sections", "No enabled Sections.\nCheck E-book options tab\n'Included sections'\n"]
if opts.fmt == 'mobi' and sections_list == ['Descriptions']:
warning = _("\n*** Adding 'By Authors' Section required for MOBI output ***")
opts.log.warn(warning)
sections_list.insert(0, 'Authors')
opts.generate_authors = True
opts.log(u" Sections: %s" % ', '.join(sections_list))
opts.section_list = sections_list
# Limit thumb_width to 1.0" - 2.0"
try:
if float(opts.thumb_width) < float(self.THUMB_SMALLEST):
log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST))
opts.thumb_width = self.THUMB_SMALLEST
if float(opts.thumb_width) > float(self.THUMB_LARGEST):
log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_LARGEST))
opts.thumb_width = self.THUMB_LARGEST
opts.thumb_width = "%.2f" % float(opts.thumb_width)
except:
log.error("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST))
opts.thumb_width = "1.0"
# eval prefix_rules if passed from command line
if type(opts.prefix_rules) is not tuple:
try:
opts.prefix_rules = eval(opts.prefix_rules)
except:
log.error("malformed --prefix-rules: %s" % opts.prefix_rules)
raise
for rule in opts.prefix_rules:
if len(rule) != 4:
log.error("incorrect number of args for --prefix-rules: %s" % repr(rule))
# eval exclusion_rules if passed from command line
if type(opts.exclusion_rules) is not tuple:
try:
opts.exclusion_rules = eval(opts.exclusion_rules)
except:
log.error("malformed --exclusion-rules: %s" % opts.exclusion_rules)
raise
for rule in opts.exclusion_rules:
if len(rule) != 3:
log.error("incorrect number of args for --exclusion-rules: %s" % repr(rule))
# Display opts
keys = sorted(opts_dict.keys())
build_log.append(" opts:")
for key in keys:
if key in ['catalog_title', 'author_clip', 'connected_kindle', 'creator',
'cross_reference_authors', 'description_clip', 'exclude_book_marker',
'exclude_genre', 'exclude_tags', 'exclusion_rules', 'fmt',
'genre_source_field', 'header_note_source_field', 'merge_comments_rule',
'output_profile', 'prefix_rules', 'preset', 'read_book_marker',
'search_text', 'sort_by', 'sort_descriptions_by_author', 'sync',
'thumb_width', 'use_existing_cover', 'wishlist_tag']:
build_log.append(" %s: %s" % (key, repr(opts_dict[key])))
if opts.verbose:
log('\n'.join(line for line in build_log))
# Capture start_time
opts.start_time = time.time()
self.opts = opts
if opts.verbose:
log.info(" Begin catalog source generation (%s)" %
str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))
# Launch the Catalog builder
catalog = CatalogBuilder(db, opts, self, report_progress=notification)
try:
catalog.build_sources()
if opts.verbose:
log.info(" Completed catalog source generation (%s)\n" %
str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))
except (AuthorSortMismatchException, EmptyCatalogException), e:
log.error(" *** Terminated catalog generation: %s ***" % e)
except:
log.error(" unhandled exception in catalog generator")
raise
else:
recommendations = []
recommendations.append(('remove_fake_margins', False,
OptionRecommendation.HIGH))
recommendations.append(('comments', '', OptionRecommendation.HIGH))
"""
>>> Use to debug generated catalog code before pipeline conversion <<<
"""
GENERATE_DEBUG_EPUB = False
if GENERATE_DEBUG_EPUB:
catalog_debug_path = os.path.join(os.path.expanduser('~'), 'Desktop', 'Catalog debug')
setattr(opts, 'debug_pipeline', os.path.expanduser(catalog_debug_path))
dp = getattr(opts, 'debug_pipeline', None)
if dp is not None:
recommendations.append(('debug_pipeline', dp,
OptionRecommendation.HIGH))
if opts.output_profile and opts.output_profile.startswith("kindle"):
recommendations.append(('output_profile', opts.output_profile,
OptionRecommendation.HIGH))
recommendations.append(('book_producer', opts.output_profile,
OptionRecommendation.HIGH))
if opts.fmt == 'mobi':
recommendations.append(('no_inline_toc', True,
OptionRecommendation.HIGH))
recommendations.append(('verbose', 2,
OptionRecommendation.HIGH))
# Use existing cover or generate new cover
cpath = None
existing_cover = False
try:
search_text = 'title:"%s" author:%s' % (
opts.catalog_title.replace('"', '\\"'), 'calibre')
matches = db.search(search_text, return_matches=True, sort_results=False)
if matches:
cpath = db.cover(matches[0], index_is_id=True, as_path=True)
if cpath and os.path.exists(cpath):
existing_cover = True
except:
pass
if self.opts.use_existing_cover and not existing_cover:
log.warning("no existing catalog cover found")
if self.opts.use_existing_cover and existing_cover:
recommendations.append(('cover', cpath, OptionRecommendation.HIGH))
log.info("using existing catalog cover")
else:
from calibre.ebooks.covers import calibre_cover2
log.info("replacing catalog cover")
new_cover_path = PersistentTemporaryFile(suffix='.jpg')
new_cover = calibre_cover2(opts.catalog_title, 'calibre')
new_cover_path.write(new_cover)
new_cover_path.close()
recommendations.append(('cover', new_cover_path.name, OptionRecommendation.HIGH))
# Run ebook-convert
from calibre.ebooks.conversion.plumber import Plumber
plumber = Plumber(os.path.join(catalog.catalog_path, opts.basename + '.opf'),
path_to_output, log, report_progress=notification,
abort_after_input_dump=False)
plumber.merge_ui_recommendations(recommendations)
plumber.run()
try:
os.remove(cpath)
except:
pass
if GENERATE_DEBUG_EPUB:
from calibre.ebooks.epub import initialize_container
from calibre.ebooks.tweak import zip_rebuilder
from calibre.utils.zipfile import ZipFile
input_path = os.path.join(catalog_debug_path, 'input')
epub_shell = os.path.join(catalog_debug_path, 'epub_shell.zip')
initialize_container(epub_shell, opf_name='content.opf')
with ZipFile(epub_shell, 'r') as zf:
zf.extractall(path=input_path)
os.remove(epub_shell)
zip_rebuilder(input_path, os.path.join(catalog_debug_path, 'input.epub'))
if opts.verbose:
log.info(" Catalog creation complete (%s)\n" %
str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))
# returns to gui2.actions.catalog:catalog_generated()
return catalog.error
| gpl-3.0 |
ingted/crmsh | modules/ui_resource.py | 1 | 24285 | # Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
# See COPYING for license information.
from . import command
from . import completers as compl
from . import constants
from . import config
from . import utils
from . import xmlutil
from . import ui_utils
from . import options
from .msg import common_error, common_err, common_info, common_debug
from .msg import no_prog_err
from .cibconfig import cib_factory
def rm_meta_attribute(node, attr, l, force_children=False):
'''
Build a list of nvpair nodes which contain attribute
(recursively in all children resources)
'''
for c in node.iterchildren():
if c.tag == "meta_attributes":
nvpair = xmlutil.get_attr_in_set(c, attr)
if nvpair is not None:
l.append(nvpair)
elif force_children or \
(xmlutil.is_child_rsc(c) and not c.getparent().tag == "group"):
rm_meta_attribute(c, attr, l, force_children=force_children)
def get_children_with_different_attr(node, attr, value):
l = []
for p in node.xpath(".//primitive"):
diff_attr = False
for meta_set in xmlutil.get_set_nodes(p, "meta_attributes", create=False):
p_value = xmlutil.get_attr_value(meta_set, attr)
if p_value is not None and p_value != value:
diff_attr = True
break
if diff_attr:
l.append(p)
return l
def set_deep_meta_attr_node(target_node, attr, value):
nvpair_l = []
if xmlutil.is_clone(target_node):
for c in target_node.iterchildren():
if xmlutil.is_child_rsc(c):
rm_meta_attribute(c, attr, nvpair_l)
if config.core.manage_children != "never" and \
(xmlutil.is_group(target_node) or
(xmlutil.is_clone(target_node) and xmlutil.cloned_el(target_node) == "group")):
odd_children = get_children_with_different_attr(target_node, attr, value)
for c in odd_children:
if config.core.manage_children == "always" or \
(config.core.manage_children == "ask" and
utils.ask("Do you want to override %s for child resource %s?" %
(attr, c.get("id")))):
common_debug("force remove meta attr %s from %s" %
(attr, c.get("id")))
rm_meta_attribute(c, attr, nvpair_l, force_children=True)
xmlutil.rmnodes(list(set(nvpair_l)))
xmlutil.xml_processnodes(target_node,
xmlutil.is_emptynvpairs, xmlutil.rmnodes)
# work around issue with pcs interoperability
# by finding exising nvpairs -- if there are any, just
# set the value in those. Otherwise fall back to adding
# to all meta_attributes tags
nvpairs = target_node.xpath("./meta_attributes/nvpair[@name='%s']" % (attr))
if len(nvpairs) > 0:
for nvpair in nvpairs:
nvpair.set("value", value)
else:
for n in xmlutil.get_set_nodes(target_node, "meta_attributes", create=True):
xmlutil.set_attr(n, attr, value)
return True
def set_deep_meta_attr(rsc, attr, value, commit=True):
"""
If the referenced rsc is a primitive that belongs to a group,
then set its attribute.
Otherwise, go up to the topmost resource which contains this
resource and set the attribute there (i.e. if the resource is
cloned).
If it's a group then check its children. If any of them has
the attribute set to a value different from the one given,
then ask the user whether to reset them or not (exact
behaviour depends on the value of config.core.manage_children).
"""
def update_obj(obj):
"""
set the meta attribute in the given object
"""
node = obj.node
obj.set_updated()
if not (node.tag == "primitive" and
node.getparent().tag == "group"):
node = xmlutil.get_topmost_rsc(node)
return set_deep_meta_attr_node(node, attr, value)
def flatten(objs):
for obj in objs:
if isinstance(obj, list):
for subobj in obj:
yield subobj
else:
yield obj
def resolve(obj):
if obj.obj_type == 'tag':
ret = [cib_factory.find_object(o) for o in obj.node.xpath('./obj_ref/@id')]
ret = [r for r in ret if r is not None]
return ret
return obj
def is_resource(obj):
return xmlutil.is_resource(obj.node)
objs = cib_factory.find_objects(rsc)
if objs is None:
common_error("CIB is not valid!")
return False
while any(obj for obj in objs if obj.obj_type == 'tag'):
objs = list(flatten(resolve(obj) for obj in objs))
objs = filter(is_resource, objs)
common_debug("set_deep_meta_attr: %s" % (', '.join([obj.obj_id for obj in objs])))
if not objs:
common_error("Resource not found: %s" % (rsc))
return False
ok = all(update_obj(obj) for obj in objs)
if not ok:
common_error("Failed to update meta attributes for %s" % (rsc))
return False
if not commit:
return True
ok = cib_factory.commit()
if not ok:
common_error("Failed to commit updates to %s" % (rsc))
return False
return True
def cleanup_resource(rsc, node=''):
if not utils.is_name_sane(rsc) or not utils.is_name_sane(node):
return False
if not node:
rc = utils.ext_cmd(RscMgmt.rsc_cleanup_all % (rsc)) == 0
else:
rc = utils.ext_cmd(RscMgmt.rsc_cleanup % (rsc, node)) == 0
return rc
_attrcmds = compl.choice(['delete', 'set', 'show'])
_raoperations = compl.choice(constants.ra_operations)
class RscMgmt(command.UI):
'''
Resources management class
'''
name = "resource"
rsc_status_all = "crm_resource -L"
rsc_status = "crm_resource --locate -r '%s'"
rsc_showxml = "crm_resource -q -r '%s'"
rsc_setrole = "crm_resource --meta -r '%s' -p target-role -v '%s'"
rsc_migrate = "crm_resource --quiet --move -r '%s' %s"
rsc_unmigrate = "crm_resource --quiet --clear -r '%s'"
rsc_ban = "crm_resource --ban -r '%s' %s"
rsc_cleanup = "crm_resource -C -r '%s' -H '%s'"
rsc_cleanup_all = "crm_resource -C -r '%s'"
rsc_maintenance = "crm_resource -r '%s' --meta -p maintenance -v '%s'"
rsc_param = {
'set': "crm_resource -r '%s' -p '%s' -v '%s'",
'delete': "crm_resource -r '%s' -d '%s'",
'show': "crm_resource -r '%s' -g '%s'",
}
rsc_meta = {
'set': "crm_resource --meta -r '%s' -p '%s' -v '%s'",
'delete': "crm_resource --meta -r '%s' -d '%s'",
'show': "crm_resource --meta -r '%s' -g '%s'",
}
rsc_failcount = {
'set': "crm_attribute -t status -n 'fail-count-%s' -N '%s' -v '%s' -d 0",
'delete': "crm_attribute -t status -n 'fail-count-%s' -N '%s' -D -d 0",
'show': "crm_attribute -t status -n 'fail-count-%s' -N '%s' -G -d 0",
}
rsc_utilization = {
'set': "crm_resource -z -r '%s' -p '%s' -v '%s'",
'delete': "crm_resource -z -r '%s' -d '%s'",
'show': "crm_resource -z -r '%s' -g '%s'",
}
rsc_secret = {
'set': "cibsecret set '%s' '%s' '%s'",
'stash': "cibsecret stash '%s' '%s'",
'unstash': "cibsecret unstash '%s' '%s'",
'delete': "cibsecret delete '%s' '%s'",
'show': "cibsecret get '%s' '%s'",
'check': "cibsecret check '%s' '%s'",
}
rsc_refresh = "crm_resource -C"
rsc_refresh_node = "crm_resource -C -H '%s'"
rsc_reprobe = "crm_resource -C"
rsc_reprobe_node = "crm_resource -C -H '%s'"
def requires(self):
for program in ('crm_resource', 'crm_attribute'):
if not utils.is_program(program):
no_prog_err(program)
return False
return True
@command.alias('show', 'list')
@command.completers(compl.resources)
def do_status(self, context, *resources):
"usage: status [<rsc> ...]"
if len(resources) > 0:
rc = True
for rsc in resources:
if not utils.is_name_sane(rsc):
return False
rc = rc and (utils.ext_cmd(self.rsc_status % rsc) == 0)
return rc
else:
return utils.ext_cmd(self.rsc_status_all) == 0
def _commit_meta_attr(self, context, rsc, name, value):
"""
Perform change to resource
"""
if not utils.is_name_sane(rsc):
return False
commit = not cib_factory.has_cib_changed()
if not commit:
context.info("Currently editing the CIB, changes will not be committed")
return set_deep_meta_attr(rsc, name, value, commit=commit)
def _commit_meta_attrs(self, context, resources, name, value):
"""
Perform change to list of resources
"""
for rsc in resources:
if not utils.is_name_sane(rsc):
return False
commit = not cib_factory.has_cib_changed()
if not commit:
context.info("Currently editing the CIB, changes will not be committed")
rc = True
for rsc in resources:
rc = rc and set_deep_meta_attr(rsc, name, value, commit=False)
if commit and rc:
ok = cib_factory.commit()
if not ok:
common_error("Failed to commit updates to %s" % (rsc))
return ok
return rc
@command.wait
@command.completers(compl.resources)
def do_start(self, context, *resources):
"usage: start <rsc> [<rsc> ...]"
if len(resources) == 0:
context.error("Expected at least one resource as argument")
return self._commit_meta_attrs(context, resources, "target-role", "Started")
@command.wait
@command.completers(compl.resources)
def do_stop(self, context, *resources):
"usage: stop <rsc> [<rsc> ...]"
if len(resources) == 0:
context.error("Expected at least one resource as argument")
return self._commit_meta_attrs(context, resources, "target-role", "Stopped")
@command.wait
@command.completers(compl.resources)
def do_restart(self, context, *resources):
"usage: restart <rsc> [<rsc> ...]"
common_info("ordering %s to stop" % ", ".join(resources))
if not self._commit_meta_attrs(context, resources, "target-role", "Stopped"):
return False
if not utils.wait4dc("stop", not options.batch):
return False
common_info("ordering %s to start" % ", ".join(resources))
return self._commit_meta_attrs(context, resources, "target-role", "Started")
@command.wait
@command.completers(compl.resources)
def do_promote(self, context, rsc):
"usage: promote <rsc>"
if not utils.is_name_sane(rsc):
return False
if not xmlutil.RscState().is_ms(rsc):
common_err("%s is not a master-slave resource" % rsc)
return False
return utils.ext_cmd(self.rsc_setrole % (rsc, "Master")) == 0
def do_scores(self, context):
"usage: scores"
if utils.is_program('crm_simulate'):
utils.ext_cmd('crm_simulate -sL')
elif utils.is_program('ptest'):
utils.ext_cmd('ptest -sL')
else:
context.fatal_error("Need crm_simulate or ptest in path to display scores")
@command.wait
@command.completers(compl.resources)
def do_demote(self, context, rsc):
"usage: demote <rsc>"
if not utils.is_name_sane(rsc):
return False
if not xmlutil.RscState().is_ms(rsc):
common_err("%s is not a master-slave resource" % rsc)
return False
return utils.ext_cmd(self.rsc_setrole % (rsc, "Slave")) == 0
@command.completers(compl.resources)
def do_manage(self, context, rsc):
"usage: manage <rsc>"
return self._commit_meta_attr(context, rsc, "is-managed", "true")
@command.completers(compl.resources)
def do_unmanage(self, context, rsc):
"usage: unmanage <rsc>"
return self._commit_meta_attr(context, rsc, "is-managed", "false")
@command.alias('move')
@command.skill_level('administrator')
@command.wait
@command.completers_repeating(compl.resources, compl.nodes,
compl.choice(['reboot', 'forever', 'force']))
def do_migrate(self, context, rsc, *args):
"""usage: migrate <rsc> [<node>] [<lifetime>] [force]"""
if not utils.is_name_sane(rsc):
return False
node = None
argl = list(args)
force = "force" in utils.fetch_opts(argl, ["force"])
lifetime = utils.fetch_lifetime_opt(argl)
if len(argl) > 0:
node = argl[0]
if not xmlutil.is_our_node(node):
context.fatal_error("Not our node: " + node)
opts = ''
if node:
opts = "--node='%s'" % node
if lifetime:
opts = "%s --lifetime='%s'" % (opts, lifetime)
if force or config.core.force:
opts = "%s --force" % opts
return utils.ext_cmd(self.rsc_migrate % (rsc, opts)) == 0
@command.skill_level('administrator')
@command.wait
@command.completers_repeating(compl.resources, compl.nodes)
def do_ban(self, context, rsc, *args):
"""usage: ban <rsc> [<node>] [<lifetime>] [force]"""
if not utils.is_name_sane(rsc):
return False
node = None
argl = list(args)
force = "force" in utils.fetch_opts(argl, ["force"])
lifetime = utils.fetch_lifetime_opt(argl)
if len(argl) > 0:
node = argl[0]
if not xmlutil.is_our_node(node):
context.fatal_error("Not our node: " + node)
opts = ''
if node:
opts = "--node='%s'" % node
if lifetime:
opts = "%s --lifetime='%s'" % (opts, lifetime)
if force or config.core.force:
opts = "%s --force" % opts
return utils.ext_cmd(self.rsc_ban % (rsc, opts)) == 0
@command.alias('unmove', 'unban')
@command.skill_level('administrator')
@command.wait
@command.completers(compl.resources)
def do_unmigrate(self, context, rsc):
"usage: unmigrate <rsc>"
if not utils.is_name_sane(rsc):
return False
return utils.ext_cmd(self.rsc_unmigrate % rsc) == 0
@command.skill_level('administrator')
@command.wait
@command.completers(compl.resources, compl.nodes)
def do_cleanup(self, context, resource, node=''):
"usage: cleanup <rsc> [<node>]"
# Cleanup a resource on a node. Omit node to cleanup on
# all live nodes.
return cleanup_resource(resource, node)
@command.wait
@command.completers(compl.resources, compl.nodes)
def do_operations(self, context, resource=None, node=None):
"usage: operations [<rsc>] [<node>]"
cmd = "crm_resource -O"
if resource is None:
return utils.ext_cmd(cmd)
if node is None:
return utils.ext_cmd("%s -r '%s'" % (cmd, resource))
return utils.ext_cmd("%s -r '%s' -N '%s'" % (cmd, resource, node))
@command.wait
@command.completers(compl.resources)
def do_constraints(self, context, resource):
"usage: constraints <rsc>"
return utils.ext_cmd("crm_resource -a -r '%s'" % (resource))
@command.wait
@command.completers(compl.resources, _attrcmds, compl.nodes)
def do_failcount(self, context, rsc, cmd, node, value=None):
"""usage:
failcount <rsc> set <node> <value>
failcount <rsc> delete <node>
failcount <rsc> show <node>"""
return ui_utils.manage_attr(context.get_command_name(), self.rsc_failcount,
rsc, cmd, node, value)
@command.skill_level('administrator')
@command.wait
@command.completers(compl.resources, _attrcmds)
def do_param(self, context, rsc, cmd, param, value=None):
"""usage:
param <rsc> set <param> <value>
param <rsc> delete <param>
param <rsc> show <param>"""
return ui_utils.manage_attr(context.get_command_name(), self.rsc_param,
rsc, cmd, param, value)
@command.skill_level('administrator')
@command.completers(compl.resources,
compl.choice(['set', 'stash', 'unstash', 'delete', 'show', 'check']))
def do_secret(self, context, rsc, cmd, param, value=None):
"""usage:
secret <rsc> set <param> <value>
secret <rsc> stash <param>
secret <rsc> unstash <param>
secret <rsc> delete <param>
secret <rsc> show <param>
secret <rsc> check <param>"""
return ui_utils.manage_attr(context.get_command_name(), self.rsc_secret,
rsc, cmd, param, value)
@command.skill_level('administrator')
@command.wait
@command.completers(compl.resources, _attrcmds)
def do_meta(self, context, rsc, cmd, attr, value=None):
"""usage:
meta <rsc> set <attr> <value>
meta <rsc> delete <attr>
meta <rsc> show <attr>"""
return ui_utils.manage_attr(context.get_command_name(), self.rsc_meta,
rsc, cmd, attr, value)
@command.skill_level('administrator')
@command.wait
@command.completers(compl.resources, _attrcmds)
def do_utilization(self, context, rsc, cmd, attr, value=None):
"""usage:
utilization <rsc> set <attr> <value>
utilization <rsc> delete <attr>
utilization <rsc> show <attr>"""
return ui_utils.manage_attr(context.get_command_name(), self.rsc_utilization,
rsc, cmd, attr, value)
@command.completers(compl.nodes)
def do_refresh(self, context, *args):
'usage: refresh [<node>]'
if len(args) == 1:
if not utils.is_name_sane(args[0]):
return False
return utils.ext_cmd(self.rsc_refresh_node % args[0]) == 0
else:
return utils.ext_cmd(self.rsc_refresh) == 0
@command.wait
@command.completers(compl.nodes)
def do_reprobe(self, context, *args):
'usage: reprobe [<node>]'
if len(args) == 1:
if not utils.is_name_sane(args[0]):
return False
return utils.ext_cmd(self.rsc_reprobe_node % args[0]) == 0
else:
return utils.ext_cmd(self.rsc_reprobe) == 0
@command.wait
@command.completers(compl.resources, compl.choice(['on', 'off', 'true', 'false']))
def do_maintenance(self, context, resource, on_off='true'):
'usage: maintenance <resource> [on|off|true|false]'
on_off = on_off.lower()
if on_off not in ('on', 'true', 'off', 'false'):
context.fatal_error("Expected <resource> [on|off|true|false]")
elif on_off in ('on', 'true'):
on_off = 'true'
else:
on_off = 'false'
return utils.ext_cmd(self.rsc_maintenance % (resource, on_off)) == 0
def _get_trace_rsc(self, rsc_id):
if not cib_factory.refresh():
return None
rsc = cib_factory.find_object(rsc_id)
if not rsc:
common_err("resource %s does not exist" % rsc_id)
return None
if rsc.obj_type != "primitive":
common_err("element %s is not a primitive resource" % rsc_id)
return None
return rsc
def _add_trace_op(self, rsc, op, interval):
from lxml import etree
n = etree.Element('op')
n.set('name', op)
n.set('interval', interval)
n.set(constants.trace_ra_attr, '1')
return rsc.add_operation(n)
def _trace_resource(self, context, rsc_id, rsc):
op_nodes = rsc.node.xpath('.//op')
def trace(name):
for o in op_nodes:
if o.get('name') == name:
return
if not self._add_trace_op(rsc, name, '0'):
context.fatal_error("Failed to add trace for %s:%s" % (rsc_id, name))
trace('start')
trace('stop')
if xmlutil.is_ms(rsc.node):
trace('promote')
trace('demote')
for op_node in op_nodes:
rsc.set_op_attr(op_node, constants.trace_ra_attr, "1")
def _trace_op(self, context, rsc_id, rsc, op):
op_nodes = rsc.node.xpath('.//op[@name="%s"]' % (op))
if not op_nodes:
if op == 'monitor':
context.fatal_error("No monitor operation configured for %s" % (rsc_id))
if not self._add_trace_op(rsc, op, '0'):
context.fatal_error("Failed to add trace for %s:%s" % (rsc_id, op))
for op_node in op_nodes:
rsc.set_op_attr(op_node, constants.trace_ra_attr, "1")
def _trace_op_interval(self, context, rsc_id, rsc, op, interval):
op_node = xmlutil.find_operation(rsc.node, op, interval)
if op_node is None and utils.crm_msec(interval) != 0:
context.fatal_error("Operation %s with interval %s not found in %s" % (op, interval, rsc_id))
if op_node is None:
if not self._add_trace_op(rsc, op, interval):
context.fatal_error("Failed to add trace for %s:%s" % (rsc_id, op))
else:
rsc.set_op_attr(op_node, constants.trace_ra_attr, "1")
@command.completers(compl.primitives, _raoperations)
def do_trace(self, context, rsc_id, op=None, interval=None):
'usage: trace <rsc> [<op>] [<interval>]'
rsc = self._get_trace_rsc(rsc_id)
if not rsc:
return False
if op == "probe":
op = "monitor"
if interval is None:
interval = "0"
if op is None:
self._trace_resource(context, rsc_id, rsc)
elif interval is None:
self._trace_op(context, rsc_id, rsc, op)
else:
self._trace_op_interval(context, rsc_id, rsc, op, interval)
if not cib_factory.commit():
return False
if op is not None:
common_info("Trace for %s:%s is written to %s/trace_ra/" %
(rsc_id, op, config.path.heartbeat_dir))
else:
common_info("Trace for %s is written to %s/trace_ra/" %
(rsc_id, config.path.heartbeat_dir))
if op is not None and op != "monitor":
common_info("Trace set, restart %s to trace the %s operation" % (rsc_id, op))
else:
common_info("Trace set, restart %s to trace non-monitor operations" % (rsc_id))
return True
def _remove_trace(self, rsc, op_node):
from lxml import etree
common_debug("op_node: %s" % (etree.tostring(op_node)))
op_node = rsc.del_op_attr(op_node, constants.trace_ra_attr)
if rsc.is_dummy_operation(op_node):
rsc.del_operation(op_node)
@command.completers(compl.primitives, _raoperations)
def do_untrace(self, context, rsc_id, op=None, interval=None):
'usage: untrace <rsc> [<op>] [<interval>]'
rsc = self._get_trace_rsc(rsc_id)
if not rsc:
return False
if op == "probe":
op = "monitor"
if op is None:
n = 0
for tn in rsc.node.xpath('.//*[@%s]' % (constants.trace_ra_attr)):
self._remove_trace(rsc, tn)
n += 1
for tn in rsc.node.xpath('.//*[@name="%s"]' % (constants.trace_ra_attr)):
if tn.getparent().getparent().tag == 'op':
self._remove_trace(rsc, tn.getparent().getparent())
n += 1
else:
op_node = xmlutil.find_operation(rsc.node, op, interval=interval)
if op_node is None:
common_err("operation %s does not exist in %s" % (op, rsc.obj_id))
return False
self._remove_trace(rsc, op_node)
return cib_factory.commit()
| gpl-2.0 |
lshain-android-source/external-chromium_org | tools/telemetry/telemetry/core/extension_unittest.py | 23 | 6764 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import shutil
import tempfile
import unittest
from telemetry.core import browser_finder
from telemetry.core import extension_to_load
from telemetry.core.chrome import extension_dict_backend
from telemetry.unittest import options_for_unittests
class ExtensionTest(unittest.TestCase):
def setUp(self):
extension_path = os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'simple_extension')
options = options_for_unittests.GetCopy()
load_extension = extension_to_load.ExtensionToLoad(
extension_path, options.browser_type)
options.extensions_to_load = [load_extension]
browser_to_create = browser_finder.FindBrowser(options)
self._browser = None
self._extension = None
if not browser_to_create:
# May not find a browser that supports extensions.
return
self._browser = browser_to_create.Create()
self._browser.Start()
self._extension = self._browser.extensions[load_extension]
self.assertTrue(self._extension)
def tearDown(self):
if self._browser:
self._browser.Close()
def testExtensionBasic(self):
"""Test ExtensionPage's ExecuteJavaScript and EvaluateJavaScript."""
if not self._extension:
logging.warning('Did not find a browser that supports extensions, '
'skipping test.')
return
self._extension.ExecuteJavaScript('setTestVar("abcdef")')
self.assertEquals('abcdef',
self._extension.EvaluateJavaScript('_testVar'))
def testDisconnect(self):
"""Test that ExtensionPage.Disconnect exists by calling it.
EvaluateJavaScript should reconnect."""
if not self._extension:
logging.warning('Did not find a browser that supports extensions, '
'skipping test.')
return
self._extension.Disconnect()
self.assertEquals(2, self._extension.EvaluateJavaScript('1+1'))
class NonExistentExtensionTest(unittest.TestCase):
def testNonExistentExtensionPath(self):
"""Test that a non-existent extension path will raise an exception."""
extension_path = os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'foo')
options = options_for_unittests.GetCopy()
self.assertRaises(extension_to_load.ExtensionPathNonExistentException,
lambda: extension_to_load.ExtensionToLoad(
extension_path, options.browser_type))
def testExtensionNotLoaded(self):
"""Querying an extension that was not loaded will return None"""
extension_path = os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'simple_extension')
options = options_for_unittests.GetCopy()
load_extension = extension_to_load.ExtensionToLoad(
extension_path, options.browser_type)
browser_to_create = browser_finder.FindBrowser(options)
with browser_to_create.Create() as b:
b.Start()
if b.supports_extensions:
self.assertRaises(extension_dict_backend.ExtensionNotFoundException,
lambda: b.extensions[load_extension])
class MultipleExtensionTest(unittest.TestCase):
def setUp(self):
""" Copy the manifest and background.js files of simple_extension to a
number of temporary directories to load as extensions"""
self._extension_dirs = [tempfile.mkdtemp()
for i in range(3)] # pylint: disable=W0612
src_extension_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'simple_extension'))
manifest_path = os.path.join(src_extension_dir, 'manifest.json')
script_path = os.path.join(src_extension_dir, 'background.js')
for d in self._extension_dirs:
shutil.copy(manifest_path, d)
shutil.copy(script_path, d)
options = options_for_unittests.GetCopy()
self._extensions_to_load = [extension_to_load.ExtensionToLoad(
d, options.browser_type)
for d in self._extension_dirs]
options.extensions_to_load = self._extensions_to_load
browser_to_create = browser_finder.FindBrowser(options)
self._browser = None
# May not find a browser that supports extensions.
if browser_to_create:
self._browser = browser_to_create.Create()
self._browser.Start()
def tearDown(self):
if self._browser:
self._browser.Close()
for d in self._extension_dirs:
shutil.rmtree(d)
def testMultipleExtensions(self):
if not self._browser:
logging.warning('Did not find a browser that supports extensions, '
'skipping test.')
return
# Test contains.
loaded_extensions = filter(lambda e: e in self._browser.extensions,
self._extensions_to_load)
self.assertEqual(len(loaded_extensions), len(self._extensions_to_load))
for load_extension in self._extensions_to_load:
extension = self._browser.extensions[load_extension]
assert extension
extension.ExecuteJavaScript('setTestVar("abcdef")')
self.assertEquals('abcdef', extension.EvaluateJavaScript('_testVar'))
class ComponentExtensionTest(unittest.TestCase):
def testComponentExtensionBasic(self):
extension_path = os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'component_extension')
options = options_for_unittests.GetCopy()
load_extension = extension_to_load.ExtensionToLoad(
extension_path, options.browser_type, is_component=True)
options.extensions_to_load = [load_extension]
browser_to_create = browser_finder.FindBrowser(options)
if not browser_to_create:
logging.warning('Did not find a browser that supports extensions, '
'skipping test.')
return
with browser_to_create.Create() as b:
b.Start()
extension = b.extensions[load_extension]
extension.ExecuteJavaScript('setTestVar("abcdef")')
self.assertEquals('abcdef', extension.EvaluateJavaScript('_testVar'))
def testComponentExtensionNoPublicKey(self):
# simple_extension does not have a public key.
extension_path = os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'simple_extension')
options = options_for_unittests.GetCopy()
self.assertRaises(extension_to_load.MissingPublicKeyException,
lambda: extension_to_load.ExtensionToLoad(
extension_path,
browser_type=options.browser_type,
is_component=True))
| bsd-3-clause |
yongtang/tensorflow | tensorflow/python/ops/nn.py | 47 | 1787 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Neural network support.
See the [Neural network](https://tensorflow.org/api_guides/python/nn) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
# pylint: disable=unused-import
from tensorflow.python.ops import ctc_ops as _ctc_ops
from tensorflow.python.ops import embedding_ops as _embedding_ops
from tensorflow.python.ops import nn_grad as _nn_grad
from tensorflow.python.ops import nn_ops as _nn_ops
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
# pylint: enable=unused-import
# Bring more nn-associated functionality into this package.
# go/tf-wildcard-import
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ops.ctc_ops import *
from tensorflow.python.ops.nn_impl import *
from tensorflow.python.ops.nn_ops import *
from tensorflow.python.ops.candidate_sampling_ops import *
from tensorflow.python.ops.embedding_ops import *
# pylint: enable=wildcard-import,unused-import
| apache-2.0 |
npuichigo/ttsflow | third_party/tensorflow/tensorflow/python/kernel_tests/string_join_op_test.py | 134 | 1896 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_join_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringJoinOpTest(test.TestCase):
def testStringJoin(self):
input0 = ["a", "b"]
input1 = "a"
input2 = [["b"], ["c"]]
with self.test_session():
output = string_ops.string_join([input0, input1])
self.assertAllEqual(output.eval(), [b"aa", b"ba"])
output = string_ops.string_join([input0, input1], separator="--")
self.assertAllEqual(output.eval(), [b"a--a", b"b--a"])
output = string_ops.string_join([input0, input1, input0], separator="--")
self.assertAllEqual(output.eval(), [b"a--a--a", b"b--a--b"])
output = string_ops.string_join([input1] * 4, separator="!")
self.assertEqual(output.eval(), b"a!a!a!a")
output = string_ops.string_join([input2] * 2, separator="")
self.assertAllEqual(output.eval(), [[b"bb"], [b"cc"]])
with self.assertRaises(ValueError): # Inconsistent shapes
string_ops.string_join([input0, input2]).eval()
if __name__ == "__main__":
test.main()
| apache-2.0 |
roadmapper/ansible | lib/ansible/modules/network/f5/bigip_traffic_selector.py | 38 | 15443 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_traffic_selector
short_description: Manage IPSec Traffic Selectors on BIG-IP
description:
- Manage IPSec Traffic Selectors on BIG-IP.
version_added: 2.8
options:
name:
description:
- Specifies the name of the traffic selector.
type: str
required: True
destination_address:
description:
- Specifies the host or network IP address to which the application traffic is destined.
- When creating a new traffic selector, this parameter is required.
type: str
source_address:
description:
- Specifies the host or network IP address from which the application traffic originates.
- When creating a new traffic selector, this parameter is required.
type: str
ipsec_policy:
description:
- Specifies the IPsec policy that tells the BIG-IP system how to handle the packets.
- When creating a new traffic selector, if this parameter is not specified, the default
is C(default-ipsec-policy).
type: str
order:
description:
- Specifies the order in which traffic is matched, if traffic can be matched to multiple
traffic selectors.
- Traffic is matched to the traffic selector with the highest priority (lowest order number).
- When creating a new traffic selector, if this parameter is not specified, the default
is C(last).
type: int
description:
description:
- Description of the traffic selector.
type: str
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures the resource is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a traffic selector
bigip_traffic_selector:
name: selector1
destination_address: 1.1.1.1
ipsec_policy: policy1
order: 1
source_address: 2.2.2.2
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
destination_address:
description: The new Destination IP Address.
returned: changed
type: str
sample: 1.2.3.4/32
source_address:
description: The new Source IP address.
returned: changed
type: str
sample: 2.3.4.5/32
ipsec_policy:
description: The new IPSec policy.
returned: changed
type: str
sample: /Common/policy1
order:
description: The new sort order.
returned: changed
type: int
sample: 1
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.compat.ipaddress import ip_interface
from library.module_utils.network.f5.compare import cmp_str_with_none
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.compat.ipaddress import ip_interface
from ansible.module_utils.network.f5.compare import cmp_str_with_none
class Parameters(AnsibleF5Parameters):
api_map = {
'destinationAddress': 'destination_address',
'sourceAddress': 'source_address',
'ipsecPolicy': 'ipsec_policy',
}
api_attributes = [
'destinationAddress',
'sourceAddress',
'ipsecPolicy',
'order',
'description',
]
returnables = [
'destination_address',
'source_address',
'ipsec_policy',
'order',
'description',
]
updatables = [
'destination_address',
'source_address',
'ipsec_policy',
'order',
'description',
]
class ApiParameters(Parameters):
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
class ModuleParameters(Parameters):
@property
def ipsec_policy(self):
if self._values['ipsec_policy'] is None:
return None
return fq_name(self.partition, self._values['ipsec_policy'])
@property
def destination_address(self):
result = self._format_address('destination_address')
if result == -1:
raise F5ModuleError(
"No IP address found in 'destination_address'."
)
return result
@property
def source_address(self):
result = self._format_address('source_address')
if result == -1:
raise F5ModuleError(
"No IP address found in 'source_address'."
)
return result
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
def _format_address(self, type):
if self._values[type] is None:
return None
pattern = r'(?P<addr>[^%/]+)(%(?P<rd>\d+))?(/(?P<cidr>\d+))?'
if '%' in self._values[type]:
# Handle route domains
matches = re.match(pattern, self._values[type])
if not matches:
return None
addr = matches.group('addr')
if addr is None:
return -1
cidr = matches.group('cidr')
rd = matches.group('rd')
if cidr is not None:
ip = ip_interface(u'{0}/{1}'.format(addr, cidr))
else:
ip = ip_interface(u'{0}'.format(addr))
if rd:
result = '{0}%{1}/{2}'.format(str(ip.ip), rd, ip.network.prefixlen)
else:
result = '{0}/{1}'.format(str(ip.ip), ip.network.prefixlen)
return result
return str(ip_interface(u'{0}'.format(self._values[type])))
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/net/ipsec/traffic-selector/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/net/ipsec/traffic-selector/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/net/ipsec/traffic-selector/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/net/ipsec/traffic-selector/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/net/ipsec/traffic-selector/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
destination_address=dict(),
source_address=dict(),
ipsec_policy=dict(),
order=dict(type='int'),
description=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
mychris/xbmc-command | xbmc_command/play_music.py | 1 | 5180 | # -*- coding: utf-8 -*-
import argparse
import re
from . import core
class Command(core.Command):
def call(self, args):
if args.artist:
self.__call_artist(args.artist, args.dry)
elif args.album:
self.__call_album(args.album, args.dry)
elif args.genre:
self.__call_genre(args.genre, args.dry)
def __call_artist(self, artist_regex, dry):
regex = self.__try_regex(artist_regex)
sort = {
'order': 'ascending',
'ignorearticle': True,
'method': 'artist'
}
self.xbmc.AudioLibrary.GetArtists(params={'sort': sort})
artists = self.xbmc.recv('AudioLibrary.GetArtists')['result']['artists']
artists = [a for a in artists if regex.search(a['artist']) != None]
if not artists:
raise core.CommandException("No artists found for regex '%s'" %
artist_regex)
if dry:
for artist in artists:
print((artist['artist'].encode('utf-8')))
return
self.__play({'artist': [a['artistid'] for a in artists]})
def __call_album(self, album_regex, dry):
regex = self.__try_regex(album_regex)
sort = {
'order': 'ascending',
'ignorearticle': True,
'method': 'album'
}
self.xbmc.AudioLibrary.GetAlbums(params={'sort': sort})
albums = self.xbmc.recv('AudioLibrary.GetAlbums')
albums = albums['result']['albums']
albums = [a for a in albums if regex.search(a['label']) != None]
if not albums:
raise core.CommandException("No albums found for regex '%s'" %
album_regex)
if dry:
for album in albums:
print((album['label'].encode('utf-8')))
return
self.__play({'album': [a['albumid'] for a in albums]})
def __call_genre(self, genre_regex, dry):
regex = self.__try_regex(genre_regex)
sort = {
'order': 'ascending',
'ignorearticle': False,
'method': 'genre'
}
self.xbmc.AudioLibrary.GetGenres(params={'sort': sort})
genres = self.xbmc.recv('AudioLibrary.GetGenres')['result']['genres']
genres = [g for g in genres if regex.search(g['label']) != None]
if not genres:
raise core.CommandException("No genres found for regex '%s'" %
genre_regex)
if dry:
for genre in genres:
print((genre['label'].encode('utf-8')))
return
self.__play({'genre': [g['genreid'] for g in genres]})
def __play(self, play_dict):
audio_pl = self.__get_playlist()
self.xbmc.Playlist.Clear(params={'playlistid': audio_pl})
for item_type, item_ids in list(play_dict.items()):
if item_type == 'album':
item_type = 'albumid'
elif item_type == 'artist':
item_type = 'artistid'
elif item_type == 'genre':
item_type = 'genreid'
for item_id in item_ids[::-1]:
self.xbmc.Playlist.Insert(params={
'playlistid': audio_pl,
'item': {item_type: item_id},
'position': 0
})
self.xbmc.Player.Open(params={'item': {'playlistid': audio_pl}})
def __try_regex(self, reg):
regex_flags = re.IGNORECASE | re.UNICODE
try:
return re.compile(reg, regex_flags)
except:
raise core.CommandException("Invalid regex '%s'" % reg)
def __get_playlist(self):
self.xbmc.Playlist.GetPlaylists()
playlists = self.xbmc.recv("Playlist.GetPlaylists")
filtered = list([pl for pl in playlists['result'] if pl['type'] == 'audio'])
return filtered[0]['playlistid']
def create_parser(self, parser):
parser.prog = '%s play-music' % core.PROG
parser.formatter_class = argparse.RawTextHelpFormatter
parser.description = '''Start playing music.
You can either specifiy an artist, album or genre regex.
The MusicLibrary will be scanned and filtered with the specified regex.
The remaining items will be played by the AudioPlayer. If the regex matches too many items, the process may tak a while, depending
on the size of your MusicLibrary.'''
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--artist', metavar='<regex>',
help='regex for artist filtering')
group.add_argument('--album', metavar='<regex>',
help='regex for album filtering')
group.add_argument('--genre', metavar='<regex>',
help='regex for genre filtering')
parser.add_argument('--dry', action='store_true', default=False,
help='prints the filtered items, but does not start playing them')
@property
def short_description(self):
return 'Start playing music'
# vim: ft=python ts=8 sts=4 sw=4 et:
| gpl-3.0 |
allenp/odoo | addons/mrp_operations/__openerp__.py | 45 | 2001 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Manufacturing Operations',
'version': '1.0',
'category': 'Manufacturing',
'description': """
This module adds state, date_start, date_stop in manufacturing order operation lines (in the 'Work Orders' tab).
================================================================================================================
Status: draft, confirm, done, cancel
When finishing/confirming, cancelling manufacturing orders set all state lines
to the according state.
Create menus:
-------------
**Manufacturing** > **Manufacturing** > **Work Orders**
Which is a view on 'Work Orders' lines in manufacturing order.
Add buttons in the form view of manufacturing order under workorders tab:
-------------------------------------------------------------------------
* start (set state to confirm), set date_start
* done (set state to done), set date_stop
* set to draft (set state to draft)
* cancel set state to cancel
When the manufacturing order becomes 'ready to produce', operations must
become 'confirmed'. When the manufacturing order is done, all operations
must become done.
The field 'Working Hours' is the delay(stop date - start date).
So, that we can compare the theoretic delay and real delay.
""",
'website': 'https://www.odoo.com/page/manufacturing',
'depends': ['mrp'],
'data': [
'data/report_paperformat.xml',
'security/ir.model.access.csv',
'mrp_operation_data.xml',
'mrp_operations_workflow.xml',
'mrp_operations_view.xml',
'mrp_operations_report.xml',
'report/mrp_workorder_analysis_view.xml',
'views/report_wcbarcode.xml',
'mrp_operations_workflow_instance.xml'
],
'demo': [
'mrp_operations_demo.yml'
],
'test': [
'test/workcenter_operations.yml',
],
'installable': True,
'auto_install': False,
}
| gpl-3.0 |
pjg101/SickRage | lib/pbr/core.py | 10 | 5507 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
from distutils import core
from distutils import errors
import logging
import os
import sys
import warnings
from pbr import util
if sys.version_info[0] == 3:
string_type = str
integer_types = (int,)
else:
string_type = basestring # flake8: noqa
integer_types = (int, long) # flake8: noqa
def pbr(dist, attr, value):
"""Implements the actual pbr setup() keyword.
When used, this should be the only keyword in your setup() aside from
`setup_requires`.
If given as a string, the value of pbr is assumed to be the relative path
to the setup.cfg file to use. Otherwise, if it evaluates to true, it
simply assumes that pbr should be used, and the default 'setup.cfg' is
used.
This works by reading the setup.cfg file, parsing out the supported
metadata and command options, and using them to rebuild the
`DistributionMetadata` object and set the newly added command options.
The reason for doing things this way is that a custom `Distribution` class
will not play nicely with setup_requires; however, this implementation may
not work well with distributions that do use a `Distribution` subclass.
"""
if not value:
return
if isinstance(value, string_type):
path = os.path.abspath(value)
else:
path = os.path.abspath('setup.cfg')
if not os.path.exists(path):
raise errors.DistutilsFileError(
'The setup.cfg file %s does not exist.' % path)
# Converts the setup.cfg file to setup() arguments
try:
attrs = util.cfg_to_args(path, dist.script_args)
except Exception:
e = sys.exc_info()[1]
# NB: This will output to the console if no explicit logging has
# been setup - but thats fine, this is a fatal distutils error, so
# being pretty isn't the #1 goal.. being diagnosable is.
logging.exception('Error parsing')
raise errors.DistutilsSetupError(
'Error parsing %s: %s: %s' % (path, e.__class__.__name__, e))
# Repeat some of the Distribution initialization code with the newly
# provided attrs
if attrs:
# Skips 'options' and 'licence' support which are rarely used; may
# add back in later if demanded
for key, val in attrs.items():
if hasattr(dist.metadata, 'set_' + key):
getattr(dist.metadata, 'set_' + key)(val)
elif hasattr(dist.metadata, key):
setattr(dist.metadata, key, val)
elif hasattr(dist, key):
setattr(dist, key, val)
else:
msg = 'Unknown distribution option: %s' % repr(key)
warnings.warn(msg)
# Re-finalize the underlying Distribution
try:
super(dist.__class__, dist).finalize_options()
except TypeError:
# If dist is not declared as a new-style class (with object as
# a subclass) then super() will not work on it. This is the case
# for Python 2. In that case, fall back to doing this the ugly way
dist.__class__.__bases__[-1].finalize_options(dist)
# This bit comes out of distribute/setuptools
if isinstance(dist.metadata.version, integer_types + (float,)):
# Some people apparently take "version number" too literally :)
dist.metadata.version = str(dist.metadata.version)
| gpl-3.0 |
applecool/AI | Best product/simulate_agents_phase4.py | 1 | 2564 | import sys
import numpy as np
from agents import CheapAgent, RandomAgent
from Agent_spamu import Agent_spamu #addition
from product import Product
if __name__ == '__main__':
data_path = "./datasets/"
data_group = "dataset1"
# if you like, you can read the data_path and data_group using sys.argv
X = np.loadtxt(data_path + data_group + "_X.csv", dtype=float, delimiter=',') # features
y = np.loadtxt(data_path + data_group + "_y.csv", dtype=int) # bad (0) or good (1)
prices = np.loadtxt(data_path + data_group + "_p.csv", dtype=float) # prices
value = 1000.
num_products = X.shape[0]
products = []
for i in range(num_products):
products.append(Product(X[i], value, prices[i]))
#agent = CheapAgent("cheap")
#agent = RandomAgent("random")
agent = agent_spamu("spamu")
agent_wealth = 0
num_good_products_agent_has = 0
# We'll gift you two random products, we'll give them to you for free
seed = 42
rs = np.random.RandomState(seed)
# choose a good product
good_products = np.nonzero(y==1)[0]
chosen = rs.choice(good_products)
products[chosen].price = 0 # it's our gift to you
agent.add_to_my_products(products[chosen], 1)
num_good_products_agent_has += 1
agent_wealth += products[chosen].value
del products[chosen]
y = np.delete(y, chosen)
# choose a bad product
bad_products = np.nonzero(y==0)[0]
chosen = rs.choice(bad_products)
products[chosen].price = 0 # it's our gift to you
agent.add_to_my_products(products[chosen], 0)
del products[chosen]
y = np.delete(y, chosen)
num_products_you_can_choose = num_products / 2
for _ in range(num_products_you_can_choose):
chosen = agent.choose_one_product(products)
agent.add_to_my_products(products[chosen], y[chosen])
#print "Agent %s chose %s" %(agent, products[chosen])
agent_wealth -= products[chosen].price
if y[chosen] == 1: # a good product
agent_wealth += products[chosen].value
num_good_products_agent_has += 1
del products[chosen]
y = np.delete(y, chosen)
print "{}'s final wealth:\t${:,.2f}".format(agent, agent_wealth)
print "%s has %d good products." %(agent, num_good_products_agent_has)
| mit |
qqzwc/XX-Net | code/default/python27/1.0/lib/encodings/iso8859_3.py | 593 | 13345 | """ Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE
u'\u02d8' # 0xA2 -> BREVE
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe'
u'\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE
u'\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\ufffe'
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
u'\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE
u'\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE
u'\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE
u'\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-2-clause |
mrquim/mrquimrepo | repo/plugin.video.live.streamspro/pyaesnew/__init__.py | 68 | 2087 | # The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# This is a pure-Python implementation of the AES algorithm and AES common
# modes of operation.
# See: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
# See: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation
# Supported key sizes:
# 128-bit
# 192-bit
# 256-bit
# Supported modes of operation:
# ECB - Electronic Codebook
# CBC - Cipher-Block Chaining
# CFB - Cipher Feedback
# OFB - Output Feedback
# CTR - Counter
# See the README.md for API details and general information.
# Also useful, PyCrypto, a crypto library implemented in C with Python bindings:
# https://www.dlitz.net/software/pycrypto/
VERSION = [1, 3, 0]
from .aes import AES, AESModeOfOperationCTR, AESModeOfOperationCBC, AESModeOfOperationCFB, AESModeOfOperationECB, AESModeOfOperationOFB, AESModesOfOperation, Counter
from .blockfeeder import decrypt_stream, Decrypter, encrypt_stream, Encrypter
from .blockfeeder import PADDING_NONE, PADDING_DEFAULT
| gpl-2.0 |
hansroh/aquests | aquests/protocols/http2/hyper/packages/rfc3986/normalizers.py | 47 | 3332 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .compat import to_bytes
from .misc import NON_PCT_ENCODED
def normalize_scheme(scheme):
return scheme.lower()
def normalize_authority(authority):
userinfo, host, port = authority
result = ''
if userinfo:
result += normalize_percent_characters(userinfo) + '@'
if host:
result += host.lower()
if port:
result += ':' + port
return result
def normalize_path(path):
if not path:
return path
path = normalize_percent_characters(path)
return remove_dot_segments(path)
def normalize_query(query):
return normalize_percent_characters(query)
def normalize_fragment(fragment):
return normalize_percent_characters(fragment)
PERCENT_MATCHER = re.compile('%[A-Fa-f0-9]{2}')
def normalize_percent_characters(s):
"""All percent characters should be upper-cased.
For example, ``"%3afoo%DF%ab"`` should be turned into ``"%3Afoo%DF%AB"``.
"""
matches = set(PERCENT_MATCHER.findall(s))
for m in matches:
if not m.isupper():
s = s.replace(m, m.upper())
return s
def remove_dot_segments(s):
# See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
segments = s.split('/') # Turn the path into a list of segments
output = [] # Initialize the variable to use to store output
for segment in segments:
# '.' is the current directory, so ignore it, it is superfluous
if segment == '.':
continue
# Anything other than '..', should be appended to the output
elif segment != '..':
output.append(segment)
# In this case segment == '..', if we can, we should pop the last
# element
elif output:
output.pop()
# If the path starts with '/' and the output is empty or the first string
# is non-empty
if s.startswith('/') and (not output or output[0]):
output.insert(0, '')
# If the path starts with '/.' or '/..' ensure we add one more empty
# string to add a trailing '/'
if s.endswith(('/.', '/..')):
output.append('')
return '/'.join(output)
def encode_component(uri_component, encoding):
if uri_component is None:
return uri_component
uri_bytes = to_bytes(uri_component, encoding)
encoded_uri = bytearray()
for i in range(0, len(uri_bytes)):
# Will return a single character bytestring on both Python 2 & 3
byte = uri_bytes[i:i+1]
byte_ord = ord(byte)
if byte_ord < 128 and byte.decode() in NON_PCT_ENCODED:
encoded_uri.extend(byte)
continue
encoded_uri.extend('%{0:02x}'.format(byte_ord).encode())
return encoded_uri.decode(encoding)
| mit |
bbbbanjax/CloudBot | plugins/tvdb.py | 3 | 5083 | import datetime
import requests
from lxml import etree
from cloudbot import hook
# security
parser = etree.XMLParser(resolve_entities=False, no_network=True)
base_url = "http://thetvdb.com/api/"
def get_episodes_for_series(series_name, api_key):
res = {"error": None, "ended": False, "episodes": None, "name": None}
# http://thetvdb.com/wiki/index.php/API:GetSeries
try:
params = {'seriesname': series_name}
request = requests.get(base_url + 'GetSeries.php', params=params)
request.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
res["error"] = "error contacting thetvdb.com"
return res
query = etree.fromstring(request.content, parser=parser)
series_id = query.xpath('//seriesid/text()')
if not series_id:
res["error"] = "Unknown TV series. (using www.thetvdb.com)"
return res
series_id = series_id[0]
try:
_request = requests.get(base_url + '%s/series/%s/all/en.xml' % (api_key, series_id))
_request.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
res["error"] = "error contacting thetvdb.com"
return res
series = etree.fromstring(_request.content, parser=parser)
series_name = series.xpath('//SeriesName/text()')[0]
if series.xpath('//Status/text()')[0] == 'Ended':
res["ended"] = True
res["episodes"] = series.xpath('//Episode')
res["name"] = series_name
return res
def get_episode_info(episode, api_key):
first_aired = episode.findtext("FirstAired")
try:
air_date = datetime.date(*list(map(int, first_aired.split('-'))))
except (ValueError, TypeError):
return None
episode_num = "S%02dE%02d" % (int(episode.findtext("SeasonNumber")),
int(episode.findtext("EpisodeNumber")))
episode_name = episode.findtext("EpisodeName")
# in the event of an unannounced episode title, users either leave the
# field out (None) or fill it with TBA
if episode_name == "TBA":
episode_name = None
episode_desc = '{}'.format(episode_num)
if episode_name:
episode_desc += ' - {}'.format(episode_name)
return first_aired, air_date, episode_desc
@hook.command()
@hook.command('tv')
def tv_next(text, bot=None):
"""tv <series> -- Get the next episode of <series>."""
api_key = bot.config.get("api_keys", {}).get("tvdb", None)
if api_key is None:
return "error: no api key set"
episodes = get_episodes_for_series(text, api_key)
if episodes["error"]:
return episodes["error"]
series_name = episodes["name"]
ended = episodes["ended"]
episodes = episodes["episodes"]
if ended:
return "{} has ended.".format(series_name)
next_eps = []
today = datetime.date.today()
for episode in reversed(episodes):
ep_info = get_episode_info(episode, api_key)
if ep_info is None:
continue
(first_aired, air_date, episode_desc) = ep_info
if air_date > today:
next_eps = ['{} ({})'.format(first_aired, episode_desc)]
elif air_date == today:
next_eps = ['Today ({})'.format(episode_desc)] + next_eps
else:
# we're iterating in reverse order with newest episodes last
# so, as soon as we're past today, break out of loop
break
if not next_eps:
return "There are no new episodes scheduled for {}.".format(series_name)
if len(next_eps) == 1:
return "The next episode of {} airs {}".format(series_name, next_eps[0])
else:
next_eps = ', '.join(next_eps)
return "The next episodes of {}: {}".format(series_name, next_eps)
@hook.command()
@hook.command('tv_prev')
def tv_last(text, bot=None):
"""tv_last <series> -- Gets the most recently aired episode of <series>."""
api_key = bot.config.get("api_keys", {}).get("tvdb", None)
if api_key is None:
return "error: no api key set"
episodes = get_episodes_for_series(text, api_key)
if episodes["error"]:
return episodes["error"]
series_name = episodes["name"]
ended = episodes["ended"]
episodes = episodes["episodes"]
prev_ep = None
today = datetime.date.today()
for episode in reversed(episodes):
ep_info = get_episode_info(episode, api_key)
if ep_info is None:
continue
(first_aired, air_date, episode_desc) = ep_info
if air_date < today:
# iterating in reverse order, so the first episode encountered
# before today was the most recently aired
prev_ep = '{} ({})'.format(first_aired, episode_desc)
break
if not prev_ep:
return "There are no previously aired episodes for {}.".format(series_name)
if ended:
return '{} ended. The last episode aired {}.'.format(series_name, prev_ep)
return "The last episode of {} aired {}.".format(series_name, prev_ep)
| gpl-3.0 |
40023256/2015cd_midterm- | static/Brython3.1.0-20150301-090019/Lib/long_int1/__init__.py | 109 | 3805 | from browser import html, document, window
import javascript
#memorize/cache?
def _get_value(other):
if isinstance(other, LongInt):
return other.value
return other
class BigInt:
def __init__(self):
pass
def __abs__(self):
return LongInt(self.value.abs())
def __add__(self, other):
return LongInt(self.value.plus(_get_value(other)))
def __and__(self, other):
pass
def __divmod__(self, other):
_value=_get_value(other)
return LongInt(self.value.div(_value)), LongInt(self.value.mod(_value))
def __div__(self, other):
return LongInt(self.value.div(_get_value(other)))
def __eq__(self, other):
return bool(self.value.eq(_get_value(other)))
def __floordiv__(self, other):
return LongInt(self.value.div(_get_value(other)).floor())
def __ge__(self, other):
return bool(self.value.gte(_get_value(other)))
def __gt__(self, other):
return bool(self.value.gt(_get_value(other)))
def __index__(self):
if self.value.isInt():
return int(self.value.toNumber())
raise TypeError("This is not an integer")
def __le__(self, other):
return bool(self.value.lte(_get_value(other)))
def __lt__(self, other):
return bool(self.value.lt(_get_value(other)))
def __lshift__(self, shift):
if isinstance(shift, int):
_v=LongInt(2)**shift
return LongInt(self.value.times(_v.value))
def __mod__(self, other):
return LongInt(self.value.mod(_get_value(other)))
def __mul__(self, other):
return LongInt(self.value.times(_get_value(other)))
def __neg__(self, other):
return LongInt(self.value.neg(_get_value(other)))
def __or__(self, other):
pass
def __pow__(self, other):
return LongInt(self.value.pow(_get_value(other)))
def __rshift__(self, other):
pass
def __sub__(self, other):
return LongInt(self.value.minus(_get_value(other)))
def __repr__(self):
return "%s(%s)" % (self.__name__, self.value.toString(10))
def __str__(self):
return "%s(%s)" % (self.__name__, self.value.toString(10))
def __xor__(self, other):
pass
_precision=20
def get_precision(value):
if isinstance(value, LongInt):
return len(str(value.value.toString(10)))
return len(str(value))
class DecimalJS(BigInt):
def __init__(self, value=0, base=10):
global _precision
_prec=get_precision(value)
if _prec > _precision:
_precision=_prec
window.eval('Decimal.precision=%s' % _precision)
self.value=javascript.JSConstructor(window.Decimal)(value, base)
class BigNumberJS(BigInt):
def __init__(self, value=0, base=10):
self.value=javascript.JSConstructor(window.BigNumber)(value, base)
class BigJS(BigInt):
def __init__(self, value=0, base=10):
self.value=javascript.JSConstructor(window.Big)(value, base)
def __floordiv__(self, other):
_v=LongInt(self.value.div(_get_value(other)))
if _v >= 0:
return LongInt(_v.value.round(0, 0)) #round down
return LongInt(_v.value.round(0, 3)) #round up
def __pow__(self, other):
if isinstance(other, LongInt):
_value=int(other.value.toString(10))
elif isinstance(other, str):
_value=int(other)
return LongInt(self.value.pow(_value))
_path = __file__[:__file__.rfind('/')]+'/'
#to use decimal.js library uncomment these 2 lines
#javascript.load(_path+'decimal.min.js', ['Decimal'])
#LongInt=DecimalJS
#to use bignumber.js library uncomment these 2 lines
javascript.load(_path+'bignumber.min.js', ['BigNumber'])
LongInt=BigNumberJS
#big.js does not have a "base" so only base 10 stuff works.
#to use big.js library uncomment these 2 lines
#javascript.load(_path+'big.min.js', ['Big'])
#LongInt=BigJS
| gpl-3.0 |
FIWARE-TMForum/business-ecosystem-charging-backend | src/wstore/asset_manager/resource_plugins/test_data.py | 1 | 9865 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 - 2016 CoNWeT Lab., Universidad Politécnica de Madrid
# This file belongs to the business-charging-backend
# of the Business API Ecosystem.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from copy import deepcopy
PLUGIN_INFO = {
"name": "test plugin",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": [
"application/x-widget+mashable-application-component",
"application/x-mashup+mashable-application-component",
"application/x-operator+mashable-application-component"
],
"formats": ["FILE"],
"form": {
"vendor": {
"type": "text",
"placeholder": "Vendor",
"default": "default vendor",
"label": "Vendor"
},
"name": {
"type": "text",
"placeholder": "Name",
"default": "default name",
"label": "Name",
"mandatory": True
},
"type": {
"type": "select",
"label": "Select",
"options": [{
"text": "Option 1",
"value": "opt1"
}, {
"text": "Option 2",
"value": "opt2"
}]
},
"is_op": {
"type": "checkbox",
"label": "Is a checkbox",
"text": "The checkbox",
"default": True
},
"area": {
"type": "textarea",
"label": "Area",
"default": "default value",
"placeholder": "placeholder"
}
}
}
PLUGIN_INFO2 = {
"name": "test plugin 5",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"formats": ["FILE"]
}
MISSING_NAME = {
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": [],
"formats": ["FILE"]
}
INVALID_NAME = {
"name": "inv&name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": [],
"formats": ["FILE"]
}
MISSING_AUTHOR = {
"name": "plugin name",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": [],
"formats": ["FILE"]
}
MISSING_FORMATS = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": []
}
MISSING_MODULE = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"media_types": [],
"formats": ["FILE"]
}
MISSING_VERSION = {
"name": "plugin name",
"author": "test author",
"module": "test.TestPlugin",
"media_types": [],
"formats": ["FILE"]
}
INVALID_NAME_TYPE = {
"name": 9,
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": [],
"formats": ["FILE"]
}
INVALID_AUTHOR_TYPE = {
"name": "plugin name",
"author": 10,
"version": "1.0",
"module": "test.TestPlugin",
"media_types": [],
"formats": ["FILE"]
}
INVALID_FORMAT_TYPE = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": [],
"formats": "FILE"
}
INVALID_FORMAT = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": [],
"formats": ["FILE", "URL", "INV"]
}
INVALID_MEDIA_TYPE = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": "text/plain",
"formats": ["FILE", "URL"]
}
INVALID_MODULE_TYPE = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": [],
"media_types": ["text/plain"],
"formats": ["FILE", "URL"]
}
INVALID_VERSION = {
"name": "plugin name",
"author": "test author",
"version": "1.a",
"module": "test.TestPlugin",
"media_types": ["text/plain"],
"formats": ["FILE", "URL"]
}
INVALID_VERSION = {
"name": "plugin name",
"author": "test author",
"version": "1.a",
"module": "test.TestPlugin",
"media_types": ["text/plain"],
"formats": ["FILE", "URL"]
}
INVALID_ACCOUNTING = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": ["text/plain"],
"formats": ["FILE", "URL"],
"pull_accounting": "true"
}
BASIC_PLUGIN_DATA = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": ["text/plain"],
"formats": ["FILE", "URL"],
}
INVALID_FORM_TYPE = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_TYPE['form'] = ""
INVALID_FORM_ENTRY_TYPE = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_ENTRY_TYPE["form"] = {
"name": "input"
}
INVALID_FORM_MISSING_TYPE = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_MISSING_TYPE["form"] = {
"name": {
"placeholder": "Name",
"default": "Default name",
"label": "Name",
"mandatory": True
}
}
INVALID_FORM_INV_TYPE = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_INV_TYPE["form"] = {
"name": {
"type": "invalid",
"placeholder": "Name",
"default": "Default name",
"label": "Name",
"mandatory": True
}
}
INVALID_FORM_INVALID_NAME = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_INVALID_NAME["form"] = {
"inv&name": {
"type": "text",
"placeholder": "Name",
"default": "Default name",
"label": "Name",
"mandatory": True
}
}
INVALID_FORM_CHECKBOX_DEF = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_CHECKBOX_DEF["form"] = {
"check": {
"type": "checkbox",
"default": "Default name",
"label": "Name"
}
}
INVALID_FORM_TEXT = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_TEXT["form"] = {
"textf": {
"type": "text",
"default": True,
"label": {},
"mandatory": "true"
}
}
INVALID_FORM_TEXTAREA = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_TEXTAREA["form"] = {
"textf": {
"type": "textarea",
"placeholder": 25
}
}
INVALID_FORM_SELECT = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_SELECT["form"] = {
"select": {
"type": "select",
"default": 25,
"label": 30,
"mandatory": "true",
"options": [{
"text": "value",
"value": "value"
}]
}
}
INVALID_FORM_SELECT_MISS_OPT = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_SELECT_MISS_OPT["form"] = {
"select": {
"type": "select",
}
}
INVALID_FORM_SELECT_INV_OPT = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_SELECT_INV_OPT["form"] = {
"select": {
"type": "select",
"options": "option1"
}
}
INVALID_FORM_SELECT_EMPTY_OPT = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_SELECT_EMPTY_OPT["form"] = {
"select": {
"type": "select",
"options": []
}
}
INVALID_FORM_SELECT_INV_OPT_VAL = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_SELECT_INV_OPT_VAL["form"] = {
"select": {
"type": "select",
"options": ["option1"]
}
}
INVALID_FORM_SELECT_INV_OPT_VAL2 = deepcopy(BASIC_PLUGIN_DATA)
INVALID_FORM_SELECT_INV_OPT_VAL2["form"] = {
"select": {
"type": "select",
"options": [{}, {
"text": 1,
"value": "value"
}]
}
}
INVALID_OVERRIDES = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": ["text/plain"],
"formats": ["FILE", "URL"],
"overrides": ["INVALID"]
}
INVALID_FORM_ORDER_FORMAT = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": ["text/plain"],
"formats": ["FILE", "URL"],
"overrides": [],
"form_order": {},
"form": {
"name": {
"type": "text",
"placeholder": "Name",
"default": "Default name",
"label": "Name",
"mandatory": True
}
}
}
INVALID_FORM_ORDER_NO_FORM = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": ["text/plain"],
"formats": ["FILE", "URL"],
"overrides": [],
"form_order": []
}
INVALID_FORM_ORDER_MISSING_KEY = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": ["text/plain"],
"formats": ["FILE", "URL"],
"overrides": [],
"form_order": [],
"form": {
"name": {
"type": "text",
"placeholder": "Name",
"default": "Default name",
"label": "Name",
"mandatory": True
}
}
}
INVALID_FORM_ORDER_DIFFERENT_KEY = {
"name": "plugin name",
"author": "test author",
"version": "1.0",
"module": "test.TestPlugin",
"media_types": ["text/plain"],
"formats": ["FILE", "URL"],
"overrides": [],
"form_order": ["version"],
"form": {
"name": {
"type": "text",
"placeholder": "Name",
"default": "Default name",
"label": "Name",
"mandatory": True
}
}
} | agpl-3.0 |
mbernasocchi/QGIS | python/plugins/processing/gui/BatchInputSelectionPanel.py | 30 | 10001 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BatchInputSelectionPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from pathlib import Path
from qgis.PyQt.QtCore import pyqtSignal, QCoreApplication
from qgis.PyQt.QtWidgets import QWidget, QHBoxLayout, QMenu, QPushButton, QLineEdit, QSizePolicy, QAction, QFileDialog
from qgis.PyQt.QtGui import QCursor
from qgis.core import (QgsMapLayer,
QgsRasterLayer,
QgsSettings,
QgsProject,
QgsProcessing,
QgsProcessingUtils,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterMeshLayer,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterMapLayer)
from processing.gui.MultipleInputDialog import MultipleInputDialog
from processing.tools import dataobjects
class BatchInputSelectionPanel(QWidget):
valueChanged = pyqtSignal()
def __init__(self, param, row, col, dialog):
super(BatchInputSelectionPanel, self).__init__(None)
self.param = param
self.dialog = dialog
self.row = row
self.col = col
self.horizontalLayout = QHBoxLayout(self)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setMargin(0)
self.text = QLineEdit()
self.text.setObjectName('text')
self.text.setMinimumWidth(300)
self.setValue('')
self.text.editingFinished.connect(self.textEditingFinished)
self.text.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
self.horizontalLayout.addWidget(self.text)
self.pushButton = QPushButton()
self.pushButton.setText('…')
self.pushButton.clicked.connect(self.showPopupMenu)
self.horizontalLayout.addWidget(self.pushButton)
self.setLayout(self.horizontalLayout)
def _panel(self):
return self.dialog.mainWidget()
def _table(self):
return self._panel().tblParameters
def showPopupMenu(self):
popupmenu = QMenu()
if not (isinstance(self.param, QgsProcessingParameterMultipleLayers)
and self.param.layerType == dataobjects.TYPE_FILE):
selectLayerAction = QAction(
QCoreApplication.translate('BatchInputSelectionPanel', 'Select from Open Layers…'), self.pushButton)
selectLayerAction.triggered.connect(self.showLayerSelectionDialog)
popupmenu.addAction(selectLayerAction)
selectFileAction = QAction(
QCoreApplication.translate('BatchInputSelectionPanel', 'Select Files…'), self.pushButton)
selectFileAction.triggered.connect(self.showFileSelectionDialog)
popupmenu.addAction(selectFileAction)
selectDirectoryAction = QAction(
QCoreApplication.translate('BatchInputSelectionPanel', 'Select Directory…'), self.pushButton)
selectDirectoryAction.triggered.connect(self.showDirectorySelectionDialog)
popupmenu.addAction(selectDirectoryAction)
popupmenu.exec_(QCursor.pos())
def showLayerSelectionDialog(self):
layers = []
if (isinstance(self.param, QgsProcessingParameterRasterLayer)
or (isinstance(self.param, QgsProcessingParameterMultipleLayers) and
self.param.layerType() == QgsProcessing.TypeRaster)):
layers = QgsProcessingUtils.compatibleRasterLayers(QgsProject.instance())
elif isinstance(self.param, QgsProcessingParameterVectorLayer):
layers = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance())
elif isinstance(self.param, QgsProcessingParameterMapLayer):
layers = QgsProcessingUtils.compatibleLayers(QgsProject.instance())
elif (isinstance(self.param, QgsProcessingParameterMeshLayer)
or (isinstance(self.param, QgsProcessingParameterMultipleLayers) and
self.param.layerType() == QgsProcessing.TypeMesh)):
layers = QgsProcessingUtils.compatibleMeshLayers(QgsProject.instance())
else:
datatypes = [QgsProcessing.TypeVectorAnyGeometry]
if isinstance(self.param, QgsProcessingParameterFeatureSource):
datatypes = self.param.dataTypes()
elif isinstance(self.param, QgsProcessingParameterMultipleLayers):
datatypes = [self.param.layerType()]
if QgsProcessing.TypeVectorAnyGeometry not in datatypes:
layers = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), datatypes)
else:
layers = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance())
dlg = MultipleInputDialog([layer.name() for layer in layers])
dlg.exec_()
def generate_layer_id(layer):
# prefer layer name if unique
if len([l for l in layers if l.name().lower() == layer.name().lower()]) == 1:
return layer.name()
else:
# otherwise fall back to layer id
return layer.id()
if dlg.selectedoptions is not None:
selected = dlg.selectedoptions
if len(selected) == 1:
self.setValue(generate_layer_id(layers[selected[0]]))
else:
if isinstance(self.param, QgsProcessingParameterMultipleLayers):
self.text.setText(';'.join(layers[idx].id() for idx in selected))
else:
rowdif = len(selected) - (self._table().rowCount() - self.row)
for i in range(rowdif):
self._panel().addRow()
for i, layeridx in enumerate(selected):
self._table().cellWidget(i + self.row,
self.col).setValue(generate_layer_id(layers[layeridx]))
def showFileSelectionDialog(self):
self.showFileDialog(seldir=False)
def showDirectorySelectionDialog(self):
self.showFileDialog(seldir=True)
def showFileDialog(self, seldir):
settings = QgsSettings()
text = str(self.text.text())
if os.path.isdir(text):
path = text
elif not seldir and os.path.isdir(os.path.dirname(text)):
path = os.path.dirname(text)
elif settings.contains('/Processing/LastInputPath'):
path = str(settings.value('/Processing/LastInputPath'))
else:
path = ''
if not seldir:
ret, selected_filter = QFileDialog.getOpenFileNames(
self, self.tr('Select Files'), path, self.param.createFileFilter()
)
else:
ret = QFileDialog.getExistingDirectory(self, self.tr('Select Directory'), path)
if ret:
if seldir:
settings.setValue('/Processing/LastInputPath', ret)
files = []
for pp in Path(ret).rglob("*"):
if not pp.is_file():
continue
p = pp.as_posix()
if ((isinstance(self.param, QgsProcessingParameterRasterLayer)
or (isinstance(self.param, QgsProcessingParameterMultipleLayers) and self.param.layerType() == QgsProcessing.TypeRaster)) and
not QgsRasterLayer.isValidRasterFileName(p)):
continue
files.append(p)
if not files:
return
else:
files = list(ret)
settings.setValue('/Processing/LastInputPath', os.path.dirname(str(files[0])))
for i, filename in enumerate(files):
files[i] = dataobjects.getRasterSublayer(filename, self.param)
if len(files) == 1:
self.text.setText(files[0])
self.textEditingFinished()
else:
if isinstance(self.param, QgsProcessingParameterMultipleLayers):
self.text.setText(';'.join(str(f) for f in files))
else:
rowdif = len(files) - (self._table().rowCount() - self.row)
for i in range(rowdif):
self._panel().addRow()
for i, f in enumerate(files):
self._table().cellWidget(i + self.row,
self.col).setValue(f)
def textEditingFinished(self):
self._value = self.text.text()
self.valueChanged.emit()
def getValue(self):
return self._value if self._value else None
def setValue(self, value):
self._value = value
if isinstance(value, QgsMapLayer):
self.text.setText(value.name())
else: # should be basestring
self.text.setText(value)
self.valueChanged.emit()
| gpl-2.0 |
jylaxp/django | tests/template_tests/filter_tests/test_truncatechars_html.py | 390 | 1229 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import truncatechars_html
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_truncate_zero(self):
self.assertEqual(truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 0), '...')
def test_truncate(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 6),
'<p>one...</p>',
)
def test_truncate2(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 11),
'<p>one <a href="#">two ...</a></p>',
)
def test_truncate3(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 100),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate_unicode(self):
self.assertEqual(truncatechars_html('<b>\xc5ngstr\xf6m</b> was here', 5), '<b>\xc5n...</b>')
def test_truncate_something(self):
self.assertEqual(truncatechars_html('a<b>b</b>c', 3), 'a<b>b</b>c')
| bsd-3-clause |
sam-m888/gprime | gprime/plugins/graph/gvhourglass.py | 1 | 14183 | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Stephane Charette <stephanecharette@gmail.com>
# Contribution 2009 by Bob Ham <rah@bash.sh>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2013-2014 Paul Franklin
# Copyright (C) 2015 Detlef Wolz <detlef.wolz@t-online.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Generate an hourglass graph using the Graphviz generator.
"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Gprime modules
#
#------------------------------------------------------------------------
from gprime.const import LOCALE as glocale
_ = glocale.translation.gettext
from gprime.errors import ReportError
from gprime.plug.menu import (PersonOption, BooleanOption, NumberOption,
EnumeratedListOption)
from gprime.plug.report import Report
from gprime.plug.report import utils
from gprime.plug.report import MenuReportOptions
from gprime.plug.report import stdoptions
from gprime.utils.db import get_birth_or_fallback, get_death_or_fallback
from gprime.proxy import CacheProxyDb
#------------------------------------------------------------------------
#
# Constant options items
#
#------------------------------------------------------------------------
_COLORS = [{'name' : _("B&W outline"), 'value' : "outline"},
{'name' : _("Colored outline"), 'value' : "colored"},
{'name' : _("Color fill"), 'value' : "filled"}]
#------------------------------------------------------------------------
#
# HourGlassReport
#
#------------------------------------------------------------------------
class HourGlassReport(Report):
"""
An hourglass report displays ancestors and descendants of a center person.
"""
def __init__(self, database, options, user):
"""
Create HourGlass object that produces the report.
name_format - Preferred format to display names
incl_private - Whether to include private data
incid - Whether to include IDs.
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
menu = options.menu
lang = menu.get_option_by_name('trans').get_value()
locale = self.set_locale(lang)
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu, locale)
self.database = CacheProxyDb(self.database)
self.__db = self.database
self.__used_people = []
self.__family_father = [] # links allocated from family to father
self.__family_mother = [] # links allocated from family to mother
self.max_descend = menu.get_option_by_name('maxdescend').get_value()
self.max_ascend = menu.get_option_by_name('maxascend').get_value()
pid = menu.get_option_by_name('pid').get_value()
self.center_person = self.__db.get_person_from_gid(pid)
if self.center_person is None:
raise ReportError(_("Person %s is not in the Database") % pid)
# Would be nice to get rid of these 2 hard-coded arrays of colours
# and instead allow the user to pick-and-choose whatever colour they
# want. When/if this is done, take a look at the colour-selection
# widget and code used in the FamilyLines graph. FIXME
colored = {
'male': 'dodgerblue4',
'female': 'deeppink',
'unknown': 'black',
'family': 'darkgreen'
}
filled = {
'male': 'lightblue',
'female': 'lightpink',
'unknown': 'lightgray',
'family': 'lightyellow'
}
self.colorize = menu.get_option_by_name('color').get_value()
if self.colorize == 'colored':
self.colors = colored
elif self.colorize == 'filled':
self.colors = filled
self.roundcorners = menu.get_option_by_name('roundcorners').get_value()
self.includeid = menu.get_option_by_name('incid').get_value()
stdoptions.run_name_format_option(self, menu)
def write_report(self):
"""
Generate the report.
"""
self.add_person(self.center_person)
self.traverse_up(self.center_person, 1)
self.traverse_down(self.center_person, 1)
def traverse_down(self, person, gen):
"""
Recursively find the descendants of the given person.
"""
if gen > self.max_descend:
return
for family_handle in person.get_family_handle_list():
family = self.__db.get_family_from_handle(family_handle)
self.add_family(family)
self.doc.add_link(person.get_gid(), family.get_gid())
for child_ref in family.get_child_ref_list():
child_handle = child_ref.get_reference_handle()
if child_handle not in self.__used_people:
# Avoid going down paths twice when descendant cousins marry
self.__used_people.append(child_handle)
child = self.__db.get_person_from_handle(child_handle)
self.add_person(child)
self.doc.add_link(family.get_gid(),
child.get_gid())
self.traverse_down(child, gen+1)
def traverse_up(self, person, gen):
"""
Recursively find the ancestors of the given person.
"""
if gen > self.max_ascend:
return
family_handle = person.get_main_parents_family_handle()
if family_handle:
family = self.__db.get_family_from_handle(family_handle)
family_id = family.get_gid()
self.add_family(family)
self.doc.add_link(family_id, person.get_gid(),
head='none', tail='normal')
# create link from family to father
father_handle = family.get_father_handle()
if father_handle and family_handle not in self.__family_father:
# allocate only one father per family
self.__family_father.append(family_handle)
father = self.__db.get_person_from_handle(father_handle)
self.add_person(father)
self.doc.add_link(father.get_gid(), family_id,
head='none', tail='normal')
# no need to go up if he is a father in another family
if father_handle not in self.__used_people:
self.__used_people.append(father_handle)
self.traverse_up(father, gen+1)
# create link from family to mother
mother_handle = family.get_mother_handle()
if mother_handle and family_handle not in self.__family_mother:
# allocate only one mother per family
self.__family_mother.append(family_handle)
mother = self.__db.get_person_from_handle(mother_handle)
self.add_person(mother)
self.doc.add_link(mother.get_gid(), family_id,
head='none', tail='normal')
# no need to go up if she is a mother in another family
if mother_handle not in self.__used_people:
self.__used_people.append(mother_handle)
self.traverse_up(mother, gen+1)
def add_person(self, person):
"""
Add a person to the Graph. The node id will be the person's gid.
"""
p_id = person.get_gid()
name = self._name_display.display(person)
birth_evt = get_birth_or_fallback(self.__db, person)
if birth_evt:
birth = self._get_date(birth_evt.get_date_object())
else:
birth = ""
death_evt = get_death_or_fallback(self.__db, person)
if death_evt:
death = self._get_date(death_evt.get_date_object())
else:
death = ""
if self.includeid == 0: # no ID
label = "%s \\n(%s - %s)" % (name, birth, death)
elif self.includeid == 1: # same line
label = "%s (%s)\\n(%s - %s)" % (name, p_id, birth, death)
elif self.includeid == 2: # own line
label = "%s \\n(%s - %s)\\n(%s)" % (name, birth, death, p_id)
label = label.replace('"', '\\\"')
(shape, style, color, fill) = self.get_gender_style(person)
self.doc.add_node(p_id, label, shape, color, style, fill)
def add_family(self, family):
"""
Add a family to the Graph. The node id will be the family's gid.
"""
family_id = family.get_gid()
label = ""
marriage = utils.find_marriage(self.__db, family)
if marriage:
label = self._get_date(marriage.get_date_object())
if self.includeid == 1 and label: # same line
label = "%s (%s)" % (label, family_id)
elif self.includeid == 1 and not label:
label = "(%s)" % family_id
elif self.includeid == 2 and label: # own line
label = "%s\\n(%s)" % (label, family_id)
elif self.includeid == 2 and not label:
label = "(%s)" % family_id
color = ""
fill = ""
style = "solid"
if self.colorize == 'colored':
color = self.colors['family']
elif self.colorize == 'filled':
fill = self.colors['family']
style = "filled"
self.doc.add_node(family_id, label, "ellipse", color, style, fill)
def get_gender_style(self, person):
"return gender specific person style"
gender = person.get_gender()
shape = "box"
style = "solid"
color = ""
fill = ""
if gender == person.FEMALE and self.roundcorners:
style = "rounded"
elif gender == person.UNKNOWN:
shape = "hexagon"
if self.colorize == 'colored':
if gender == person.MALE:
color = self.colors['male']
elif gender == person.FEMALE:
color = self.colors['female']
else:
color = self.colors['unknown']
elif self.colorize == 'filled':
style += ",filled"
if gender == person.MALE:
fill = self.colors['male']
elif gender == person.FEMALE:
fill = self.colors['female']
else:
fill = self.colors['unknown']
return(shape, style, color, fill)
#------------------------------------------------------------------------
#
# HourGlassOptions
#
#------------------------------------------------------------------------
class HourGlassOptions(MenuReportOptions):
"""
Defines options for the HourGlass report.
"""
def __init__(self, name, dbase):
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Create all the menu options for this report.
"""
category_name = _("Report Options")
pid = PersonOption(_("Center Person"))
pid.set_help(_("The Center person for the graph"))
menu.add_option(category_name, "pid", pid)
stdoptions.add_name_format_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
max_gen = NumberOption(_('Max Descendant Generations'), 10, 1, 15)
max_gen.set_help(_("The number of generations of descendants to "
"include in the graph"))
menu.add_option(category_name, "maxdescend", max_gen)
max_gen = NumberOption(_('Max Ancestor Generations'), 10, 1, 15)
max_gen.set_help(_("The number of generations of ancestors to "
"include in the graph"))
menu.add_option(category_name, "maxascend", max_gen)
include_id = EnumeratedListOption(_('Include GID'), 0)
include_id.add_item(0, _('Do not include'))
include_id.add_item(1, _('Share an existing line'))
include_id.add_item(2, _('On a line of its own'))
include_id.set_help(_("Whether (and where) to include GIDs"))
menu.add_option(category_name, "incid", include_id)
stdoptions.add_localization_option(menu, category_name)
################################
category_name = _("Graph Style")
################################
color = EnumeratedListOption(_("Graph coloring"), "filled")
for i in range(0, len(_COLORS)):
color.add_item(_COLORS[i]["value"], _COLORS[i]["name"])
color.set_help(_("Males will be shown with blue, females "
"with red. If the sex of an individual "
"is unknown it will be shown with gray."))
menu.add_option(category_name, "color", color)
roundedcorners = BooleanOption(_("Use rounded corners"), False) # 2180
roundedcorners.set_help(
_("Use rounded corners to differentiate between women and men."))
menu.add_option(category_name, "roundcorners", roundedcorners)
| gpl-2.0 |
nicoboss/Floatmotion | OpenGL/GL/ARB/texture_rgb10_a2ui.py | 9 | 1165 | '''OpenGL extension ARB.texture_rgb10_a2ui
This module customises the behaviour of the
OpenGL.raw.GL.ARB.texture_rgb10_a2ui to provide a more
Python-friendly API
Overview (from the spec)
This extension adds support for the following data format:
A new texturing format for unsigned 10.10.10.2 integer textures.
OpenGL has supported RGB10 and RGB10_A2 formats for a very long time.
This extension provides a variant of RGB10_A2 which supports unsigned
integer data (in contrast to the above "unsigned normalized integer"
formats).
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/texture_rgb10_a2ui.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_rgb10_a2ui import *
from OpenGL.raw.GL.ARB.texture_rgb10_a2ui import _EXTENSION_NAME
def glInitTextureRgb10A2UiARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | agpl-3.0 |
arugifa/website | website/content.py | 1 | 11581 | """Entry point to manage all content of my website.
Mainly base classes to be inherited by website's components.
"""
import logging
from abc import ABC, abstractmethod
from contextlib import AbstractAsyncContextManager
from dataclasses import dataclass
from pathlib import Path, PurePath
from typing import Callable, ClassVar, List, Union
import aiofiles
import lxml.etree
import lxml.html
from lxml.cssselect import CSSSelector
from website import exceptions
from website.blog.models import Category, Tag
from website.models import Document
from website.update import AsyncPrompt
from website.utils import BaseCommandLine
DocumentPath = Union[Path, PurePath] # For prod and tests
logger = logging.getLogger(__name__)
class DocumentPrompt(AsyncPrompt):
"""User prompt used during documents update in database."""
@property
def questions(self):
"""Questions the user has to answer while importing new documents."""
return {
Category.name: 'Please enter a name for the new "{uri}" category: ',
Tag.name: 'Please enter a name for the new "{uri}" tag: ',
}
# Document Processing
class BaseDocumentHandler(ABC):
"""Manage the life cycle of a document in database.
:param path:
path of the document's source file. Every document must be written in HTML,
and respect a naming convention. See documentation of handler subclasses
for more info.
:param reader:
function to read the documents.
:param prompt:
function to interactively ask questions during documents import,
when certain things cannot be inferred automatically.
"""
#: Document model clas.
model: ClassVar[Document]
#: Document parser class.
parser: ClassVar['BaseDocumentSourceParser']
def __init__(
self, path: DocumentPath, *,
reader: Callable = aiofiles.open, prompt: AsyncPrompt = None):
self.path = path
self.reader = reader
self.prompt = prompt or DocumentPrompt()
self._document = None # Used for caching purpose
self._source = None # Used for caching purpose
@property
def document(self) -> Document:
"""Look for the document in database.
:raise ~.ItemNotFound: if the document cannot be found.
"""
if not self._document:
uri = self.scan_uri()
self._document = self.model.find(uri=uri) # Can raise ItemNotFound
return self._document
@document.setter
def document(self, value: Document):
self._document = value
@property
async def source(self) -> 'BaseDocumentSourceParser':
"""Load document's source on the fly.
:raise ~.DocumentLoadingError:
when something wrong happens while reading the source file.
"""
if not self._source:
self._source = await self.load() # Can raise DocumentLoadingError
return self._source
# Main API
async def insert(self, *, batch: bool = False) -> Document:
"""Insert document into database.
:param batch:
set to ``True`` to delay some actions requiring user input.
Useful when several documents are processed in parallel.
:return:
the newly created document.
:raise ~.ItemAlreadyExisting:
if a conflict happens during document insertion.
"""
# Can raise ItemAlreadyExisting.
return await self.update(create=True, batch=batch)
async def update(
self, uri: str = None, create: bool = False, *,
batch: bool = False) -> Document:
"""Update document in database.
:param uri:
URI the document currently has in database. If not provided, the URI will
be retrieved from the document's :attr:`path`.
:param create:
create the document if it doesn't exist yet in database.
:param batch:
set to ``True`` to delay some actions requiring user input.
Useful when several documents are processed in parallel.
:return:
the updated (or newly created) document.
:raise ~.ItemNotFound:
if the document cannot be found, and ``create`` is set to ``False``.
:raise ~.ItemAlreadyExisting:
if ``create`` is set to ``True``, but the document already exists.
""" # noqa: E501
if create:
uri = self.scan_uri()
document = self.model(uri=uri)
if document.exists():
raise exceptions.ItemAlreadyExisting(uri)
self.document = document
elif uri: # Renaming
self.document = self.model.find(uri=uri) # Can raise ItemNotFound
self.document.uri = self.scan_uri() # New URI
await self.process(batch=batch)
if not batch:
# Batch operations can create transient documents.
# Saving them now could raise integrity errors from the database.
self.document.save()
if create:
message = "Created new %s: %s"
# XXX: Why using logger, and not something like self.print(...)? (05/2019)
logger.info(message, self.document.doc_type, self.document.uri)
elif uri:
message = "Renamed and updated existing %s: %s"
logger.info(message, self.document.doc_type, self.document.uri)
else:
message = "Updated existing %s: %s"
logger.info(message, self.document.doc_type, self.document.uri)
return self.document
async def rename(self, target: Path, *, batch: bool = False) -> Document:
"""Rename (and update) document in database.
:param target: the new path of document's source file.
:return: the updated and renamed document.
:raise ~.ItemNotFound: if the document doesn't exist.
"""
# TODO: Set-up an HTTP redirection (01/2019)
uri = self.scan_uri()
handler = self.__class__(target, reader=self.reader, prompt=self.prompt)
return await handler.update(uri, batch=batch)
def delete(self) -> None:
"""Remove a document from database.
:return: URI of the deleted document.
:raise ~.ItemNotFound: if the document doesn't exist.
"""
uri = self.document.uri
doc_type = self.document.doc_type
self.document.delete() # Can raise ItemNotFound
logger.info("Deleted %s: %s", doc_type, uri)
# Helpers
async def load(self) -> 'BaseDocumentSourceParser':
"""Read and prepare for parsing the source file located at :attr:`path`.
:raise ~.DocumentLoadingError:
when something wrong happens while reading the source file
(e.g., file not found or unsupported format).
""" # noqa: E501
try:
async with self.reader(self.path) as source_file:
source = await source_file.read()
except (OSError, UnicodeDecodeError) as exc:
raise exceptions.DocumentLoadingError(self, exc)
return self.parser(source)
@abstractmethod
async def process(self, *, batch: bool = False) -> None:
"""Parse :attr:`path` and update :attr:`document`'s attributes.
:param batch:
set to ``True`` to delay some actions requiring user input.
Useful when several documents are processed in parallel.
"""
# Path Scanners
# Always process paths from right to left,
# to be able to handle absolute or relative paths.
def scan_uri(self) -> str:
"""Return document's URI, based on its :attr:`path`."""
return self.path.stem.split('.')[-1]
class BaseDocumentSourceParser:
"""Parse HTML source of a document.
:raise ~.DocumentMalformatted: when the given source is not valid HTML.
"""
def __init__(self, source: str):
try:
self.source = lxml.html.document_fromstring(source)
except lxml.etree.ParserError:
raise exceptions.DocumentMalformatted(source)
def parse_title(self) -> str:
"""Look for document's title.
:raise ~.DocumentTitleMissing: when no title is found.
"""
parser = CSSSelector('html head title')
try:
title = parser(self.source)[0].text_content()
assert title
except (AssertionError, IndexError):
raise exceptions.DocumentTitleMissing(self)
return title
def parse_tags(self) -> List[str]:
"""Look for document's tags."""
parser = CSSSelector('html head meta[name=keywords]')
try:
tags = parser(self.source)[0].get('content', '')
tags = [tag.strip() for tag in tags.split(',')]
assert all(tags)
except (AssertionError, IndexError):
return []
return tags
# Document Reading
class BaseDocumentReader(ABC, BaseCommandLine):
"""Base class for external document readers.
Provides a subset of :func:`aiofiles.open`'s API.
Every reader relies on a :attr:`~program` installed locally, to open, read and if
necessary convert documents on the fly to HTML format. The result of the conversion
should be displayed on the standard output.
:param shell:
alternative shell to run the reader's :attr:`~program`. Must have a similar API
to :func:`asyncio.create_subprocess_shell`.
"""
#: Name of the reader's binary to execute for reading documents.
program: ClassVar[str] = None
#: Default arguments to use when running the reader's program.
arguments: ClassVar[str] = None
def __init__(self, shell: Callable = None):
BaseCommandLine.__init__(self, shell)
#: Path of the document to read. Set by :meth:`__call__`.
self.path = None
def __call__(self, path: Union[str, Path]) -> 'DocumentOpener':
"""Open the document for further reading.
Can be called directly or used as a context manager.
:raise FileNotFoundError: when the document cannot be opened.
"""
path = Path(path)
if not path.is_file():
raise FileNotFoundError(f"Document doesn't exist: {path}")
self.path = Path(path)
return DocumentOpener(self)
async def read(self) -> str:
"""Read and convert to HTML the document located at :attr:`path`.
:raise OSError:
if the reader's :attr:`~program` cannot convert the document.
:raise UnicodeDecodeError:
when the conversion's result is invalid.
"""
assert self.path is not None, "Open a file before trying to read it"
cmdline = self.arguments.format(path=self.path)
# Can raise OSError or UnicodeDecodeError.
html = await self.run(cmdline)
return html.strip()
@dataclass
class DocumentOpener(AbstractAsyncContextManager):
"""Helper for :class:`BaseDocumentReader` to open documents.
:param reader: reader instance opening the document.
"""
reader: BaseDocumentReader
def __getattr__(self, name: str):
"""Let :attr:`reader` opening a document as a function call."""
return getattr(self.reader, name)
async def __aenter__(self) -> BaseDocumentReader:
"""Let :attr:`reader` opening a document inside a context manager."""
return self.reader
async def __aexit__(self, *exc) -> None:
"""Nothing done here for the moment..."""
return
| gpl-3.0 |
SEL-Columbia/commcare-hq | corehq/apps/accounting/migrations/0013_roles_and_plans_migration_p1m2.py | 3 | 32741 | # encoding: utf-8
from collections import defaultdict
import datetime
from decimal import Decimal
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.core.management import call_command
from south.db import db
from south.v2 import DataMigration
from django.db import models
from corehq.apps.accounting.models import (
FeatureType, SoftwarePlanEdition, SoftwareProductType,
SoftwarePlanVisibility,
)
logger = logging.getLogger(__name__)
class Migration(DataMigration):
def forwards(self, orm):
call_command('cchq_prbac_bootstrap')
boostrap_handler = BootstrapSoftwarePlans(orm)
boostrap_handler.bootstrap()
# Reset Subscription plan_version to the latest version for that plan
for subscription in orm.Subscription.objects.all():
software_plan = subscription.plan_version.plan
latest_version = software_plan.softwareplanversion_set.filter(
is_active=True
).latest('date_created')
if subscription.plan_version.pk != latest_version.pk:
logger.info("%s reset to newest version."
% subscription.subscriber.domain)
subscription.plan_version = latest_version
subscription.save()
# make sure that the default standard plan SMS FeatureRate
# has the monthly_limit set to 100
standard_plans = orm.DefaultProductPlan.objects.filter(
edition=SoftwarePlanEdition.STANDARD)
for std_plan in standard_plans:
feature_rate = std_plan.plan.softwareplanversion_set.filter(
is_active=True
).latest('date_created').feature_rates.filter(
feature__feature_type=FeatureType.SMS
)[0]
if feature_rate.monthly_limit != 100:
feature_rate.monthly_limit = 100
feature_rate.save()
for plan in orm.SoftwarePlan.objects.all():
default_version = plan.softwareplanversion_set.filter(
is_active=True
).latest('date_created')
for version in plan.softwareplanversion_set.all():
if version.pk != default_version.pk:
try:
version.delete()
except models.ProtectedError:
logger.info("Skipped deleting SoftwarePlanVersion "
"with id %d for plan %s because it was "
"still being used."
% (version.pk, plan.name))
for credit_line in orm.CreditLine.objects.filter(feature_rate__isnull=False).all():
latest_rate = credit_line.feature_rate.feature.get_rate()
if credit_line.feature_rate.pk != latest_rate.pk:
credit_line.feature_rate = latest_rate
credit_line.save()
for feature_rate in orm.FeatureRate.objects.all():
if feature_rate.softwareplanversion_set.count() == 0:
try:
feature_rate.delete()
except models.ProtectedError:
logger.info("Skipped deleting FeatureRate with id "
"%d because it was still being used."
% feature_rate.pk)
for credit_line in orm.CreditLine.objects.filter(product_rate__isnull=False).all():
latest_rate = credit_line.product_rate.product.get_rate()
if credit_line.product_rate.pk != latest_rate.pk:
credit_line.product_rate = latest_rate
credit_line.save()
for product_rate in orm.SoftwareProductRate.objects.all():
if product_rate.softwareplanversion_set.count() == 0:
try:
product_rate.delete()
except models.ProtectedError:
logger.info("Skipped deleting ProductRate with id "
"%d because it was still being used."
% product_rate.pk)
def backwards(self, orm):
pass
models = {
u'accounting.billingaccount': {
'Meta': {'object_name': 'BillingAccount'},
'account_type': ('django.db.models.fields.CharField', [], {'default': "'CONTRACT'", 'max_length': '25'}),
'billing_admins': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.BillingAccountAdmin']", 'null': 'True', 'symmetrical': 'False'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'created_by_domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Currency']"}),
'date_confirmed_extra_charges': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auto_invoiceable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'salesforce_account_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'})
},
u'accounting.billingaccountadmin': {
'Meta': {'object_name': 'BillingAccountAdmin'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'web_user': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'})
},
u'accounting.billingcontactinfo': {
'Meta': {'object_name': 'BillingContactInfo'},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['accounting.BillingAccount']", 'unique': 'True', 'primary_key': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'emails': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'first_line': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'second_line': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'state_province_region': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'accounting.billingrecord': {
'Meta': {'object_name': 'BillingRecord'},
'date_emailed': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'emailed_to': ('django.db.models.fields.CharField', [], {'max_length': '254', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']"}),
'pdf_data_id': ('django.db.models.fields.CharField', [], {'max_length': '48'})
},
u'accounting.creditadjustment': {
'Meta': {'object_name': 'CreditAdjustment'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'credit_line': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.CreditLine']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']", 'null': 'True'}),
'line_item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.LineItem']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'MANUAL'", 'max_length': '25'}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'})
},
u'accounting.creditline': {
'Meta': {'object_name': 'CreditLine'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccount']"}),
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.FeatureRate']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProductRate']", 'null': 'True', 'blank': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']", 'null': 'True', 'blank': 'True'})
},
u'accounting.currency': {
'Meta': {'object_name': 'Currency'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'rate_to_default': ('django.db.models.fields.DecimalField', [], {'default': "'1.0'", 'max_digits': '20', 'decimal_places': '9'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'accounting.defaultproductplan': {
'Meta': {'object_name': 'DefaultProductPlan'},
'edition': ('django.db.models.fields.CharField', [], {'default': "'Community'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlan']"}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'accounting.feature': {
'Meta': {'object_name': 'Feature'},
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
u'accounting.featurerate': {
'Meta': {'object_name': 'FeatureRate'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Feature']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'monthly_limit': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'per_excess_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'})
},
u'accounting.invoice': {
'Meta': {'object_name': 'Invoice'},
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_due': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {}),
'date_paid': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']"}),
'tax_rate': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'})
},
u'accounting.lineitem': {
'Meta': {'object_name': 'LineItem'},
'base_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'base_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feature_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.FeatureRate']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']"}),
'product_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProductRate']", 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'unit_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'accounting.softwareplan': {
'Meta': {'object_name': 'SoftwarePlan'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edition': ('django.db.models.fields.CharField', [], {'default': "'Enterprise'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'INTERNAL'", 'max_length': '10'})
},
u'accounting.softwareplanversion': {
'Meta': {'object_name': 'SoftwarePlanVersion'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.FeatureRate']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlan']"}),
'product_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.SoftwareProductRate']", 'symmetrical': 'False', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_prbac.Role']"})
},
u'accounting.softwareproduct': {
'Meta': {'object_name': 'SoftwareProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'})
},
u'accounting.softwareproductrate': {
'Meta': {'object_name': 'SoftwareProductRate'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProduct']"})
},
u'accounting.subscriber': {
'Meta': {'object_name': 'Subscriber'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'})
},
u'accounting.subscription': {
'Meta': {'object_name': 'Subscription'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccount']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_delay_invoicing': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
'do_not_invoice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan_version': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlanVersion']"}),
'salesforce_contract_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscriber']"})
},
u'accounting.subscriptionadjustment': {
'Meta': {'object_name': 'SubscriptionAdjustment'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']", 'null': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'default': "'INTERNAL'", 'max_length': '50'}),
'new_date_delay_invoicing': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'new_date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'new_date_start': ('django.db.models.fields.DateField', [], {}),
'new_salesforce_contract_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'CREATE'", 'max_length': '50'}),
'related_subscription': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptionadjustment_related'", 'null': 'True', 'to': u"orm['accounting.Subscription']"}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']"}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'})
},
u'django_prbac.role': {
'Meta': {'object_name': 'Role'},
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parameters': ('django_prbac.fields.StringSetField', [], {'default': '[]', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'})
}
}
complete_apps = ['accounting']
class BootstrapSoftwarePlans(object):
"""
This is a direct copy of the cchq_software_plan_bootstrap management command
so that orm can be used to reference the objects.
"""
def __init__(self, orm):
self.orm = orm
self.verbose = False
self.for_tests = False
def bootstrap(self):
logger.info('Bootstrapping standard plans. Enterprise plans will have to be created via the admin UIs.')
self.product_types = [p[0] for p in SoftwareProductType.CHOICES]
self.editions = [
SoftwarePlanEdition.COMMUNITY,
SoftwarePlanEdition.STANDARD,
SoftwarePlanEdition.PRO,
SoftwarePlanEdition.ADVANCED,
SoftwarePlanEdition.ENTERPRISE,
]
self.feature_types = [f[0] for f in FeatureType.CHOICES]
self.ensure_plans()
def ensure_plans(self, dry_run=False):
edition_to_features = self.ensure_features(dry_run=dry_run)
for product_type in self.product_types:
for edition in self.editions:
role_slug = self.BOOTSTRAP_EDITION_TO_ROLE[edition]
try:
role = self.orm['django_prbac.Role'].objects.get(slug=role_slug)
except ObjectDoesNotExist:
logger.info("Could not find the role '%s'. Did you forget to run cchq_prbac_bootstrap?")
logger.info("Aborting. You should figure this out.")
return
software_plan_version = self.orm.SoftwarePlanVersion(role=role)
product, product_rates = self.ensure_product_and_rate(product_type, edition, dry_run=dry_run)
feature_rates = self.ensure_feature_rates(edition_to_features[edition], edition, dry_run=dry_run)
software_plan = self.orm.SoftwarePlan(
name='%s Edition' % product.name, edition=edition, visibility=SoftwarePlanVisibility.PUBLIC
)
if dry_run:
logger.info("[DRY RUN] Creating Software Plan: %s" % software_plan.name)
else:
try:
software_plan = self.orm.SoftwarePlan.objects.get(name=software_plan.name)
if self.verbose:
logger.info("Plan '%s' already exists. Using existing plan to add version."
% software_plan.name)
except self.orm.SoftwarePlan.DoesNotExist:
software_plan.save()
if self.verbose:
logger.info("Creating Software Plan: %s" % software_plan.name)
software_plan_version.plan = software_plan
software_plan_version.save()
for product_rate in product_rates:
product_rate.save()
software_plan_version.product_rates.add(product_rate)
for feature_rate in feature_rates:
feature_rate.save()
software_plan_version.feature_rates.add(feature_rate)
software_plan_version.save()
default_product_plan = self.orm.DefaultProductPlan(product_type=product.product_type, edition=edition)
if dry_run:
logger.info("[DRY RUN] Setting plan as default for product '%s' and edition '%s'." %
(product.product_type, default_product_plan.edition))
else:
try:
default_product_plan = self.orm.DefaultProductPlan.objects.get(
product_type=product.product_type, edition=edition
)
if self.verbose:
logger.info("Default for product '%s' and edition "
"'%s' already exists." % (
product.product_type, default_product_plan.edition
))
except ObjectDoesNotExist:
default_product_plan.plan = software_plan
default_product_plan.save()
if self.verbose:
logger.info("Setting plan as default for product '%s' and edition '%s'." %
(product.product_type,
default_product_plan.edition))
def ensure_product_and_rate(self, product_type, edition, dry_run=False):
"""
Ensures that all the necessary SoftwareProducts and SoftwareProductRates are created for the plan.
"""
if self.verbose:
logger.info('Ensuring Products and Product Rates')
product = self.orm.SoftwareProduct(name='%s %s' % (product_type, edition), product_type=product_type)
if edition == SoftwarePlanEdition.ENTERPRISE:
product.name = "Dimagi Only %s" % product.name
product_rates = []
BOOTSTRAP_PRODUCT_RATES = {
SoftwarePlanEdition.COMMUNITY: [
self.orm.SoftwareProductRate(), # use all the defaults
],
SoftwarePlanEdition.STANDARD: [
self.orm.SoftwareProductRate(monthly_fee=Decimal('100.00')),
],
SoftwarePlanEdition.PRO: [
self.orm.SoftwareProductRate(monthly_fee=Decimal('500.00')),
],
SoftwarePlanEdition.ADVANCED: [
self.orm.SoftwareProductRate(monthly_fee=Decimal('1000.00')),
],
SoftwarePlanEdition.ENTERPRISE: [
self.orm.SoftwareProductRate(monthly_fee=Decimal('0.00')),
],
}
for product_rate in BOOTSTRAP_PRODUCT_RATES[edition]:
if dry_run:
logger.info("[DRY RUN] Creating Product: %s" % product)
logger.info("[DRY RUN] Corresponding product rate of $%d created." % product_rate.monthly_fee)
else:
try:
product = self.orm.SoftwareProduct.objects.get(name=product.name)
if self.verbose:
logger.info("Product '%s' already exists. Using "
"existing product to add rate."
% product.name)
except self.orm.SoftwareProduct.DoesNotExist:
product.save()
if self.verbose:
logger.info("Creating Product: %s" % product)
if self.verbose:
logger.info("Corresponding product rate of $%d created."
% product_rate.monthly_fee)
product_rate.product = product
product_rates.append(product_rate)
return product, product_rates
def ensure_features(self, dry_run=False):
"""
Ensures that all the Features necessary for the plans are created.
"""
if self.verbose:
logger.info('Ensuring Features')
edition_to_features = defaultdict(list)
for edition in self.editions:
for feature_type in self.feature_types:
feature = self.orm.Feature(name='%s %s' % (feature_type, edition), feature_type=feature_type)
if edition == SoftwarePlanEdition.ENTERPRISE:
feature.name = "Dimagi Only %s" % feature.name
if dry_run:
logger.info("[DRY RUN] Creating Feature: %s" % feature)
else:
try:
feature = self.orm.Feature.objects.get(name=feature.name)
if self.verbose:
logger.info("Feature '%s' already exists. Using "
"existing feature to add rate."
% feature.name)
except ObjectDoesNotExist:
feature.save()
if self.verbose:
logger.info("Creating Feature: %s" % feature)
edition_to_features[edition].append(feature)
return edition_to_features
def ensure_feature_rates(self, features, edition, dry_run=False):
"""
Ensures that all the FeatureRates necessary for the plans are created.
"""
if self.verbose:
logger.info('Ensuring Feature Rates')
feature_rates = []
BOOTSTRAP_FEATURE_RATES = {
SoftwarePlanEdition.COMMUNITY: {
FeatureType.USER: self.orm.FeatureRate(monthly_limit=2 if self.for_tests else 50,
per_excess_fee=Decimal('1.00')),
FeatureType.SMS: self.orm.FeatureRate(monthly_limit=0), # use defaults here
},
SoftwarePlanEdition.STANDARD: {
FeatureType.USER: self.orm.FeatureRate(monthly_limit=4 if self.for_tests else 100,
per_excess_fee=Decimal('1.00')),
FeatureType.SMS: self.orm.FeatureRate(monthly_limit=3 if self.for_tests else 100),
},
SoftwarePlanEdition.PRO: {
FeatureType.USER: self.orm.FeatureRate(monthly_limit=6 if self.for_tests else 500,
per_excess_fee=Decimal('1.00')),
FeatureType.SMS: self.orm.FeatureRate(monthly_limit=5 if self.for_tests else 500),
},
SoftwarePlanEdition.ADVANCED: {
FeatureType.USER: self.orm.FeatureRate(monthly_limit=8 if self.for_tests else 1000,
per_excess_fee=Decimal('1.00')),
FeatureType.SMS: self.orm.FeatureRate(monthly_limit=7 if self.for_tests else 1000),
},
SoftwarePlanEdition.ENTERPRISE: {
FeatureType.USER: self.orm.FeatureRate(monthly_limit=-1, per_excess_fee=Decimal('0.00')),
FeatureType.SMS: self.orm.FeatureRate(monthly_limit=-1),
},
}
for feature in features:
feature_rate = BOOTSTRAP_FEATURE_RATES[edition][feature.feature_type]
feature_rate.feature = feature
if dry_run:
logger.info("[DRY RUN] Creating rate for feature '%s': %s" % (feature.name, feature_rate))
elif self.verbose:
logger.info("Creating rate for feature '%s': %s" % (feature.name, feature_rate))
feature_rates.append(feature_rate)
return feature_rates
BOOTSTRAP_EDITION_TO_ROLE = {
SoftwarePlanEdition.COMMUNITY: 'community_plan_v0',
SoftwarePlanEdition.STANDARD: 'standard_plan_v0',
SoftwarePlanEdition.PRO: 'pro_plan_v0',
SoftwarePlanEdition.ADVANCED: 'advanced_plan_v0',
SoftwarePlanEdition.ENTERPRISE: 'enterprise_plan_v0',
}
| bsd-3-clause |
0Chencc/CTFCrackTools | Lib/Lib/copy_reg.py | 442 | 6800 | """Helper to provide extensibility for pickle/cPickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
from types import ClassType as _ClassType
__all__ = ["pickle", "constructor",
"add_extension", "remove_extension", "clear_extension_cache"]
dispatch_table = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if type(ob_type) is _ClassType:
raise TypeError("copy_reg is not intended for use with classes")
if not hasattr(pickle_function, '__call__'):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
# The constructor_ob function is a vestige of safe for unpickling.
# There is no reason for the caller to pass it anymore.
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not hasattr(object, '__call__'):
raise TypeError("constructors must be callable")
# Example: provide pickling support for complex numbers.
try:
complex
except NameError:
pass
else:
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(complex, pickle_complex, complex)
# Support for pickling new-style objects
def _reconstructor(cls, base, state):
if base is object:
obj = object.__new__(cls)
else:
obj = base.__new__(cls, state)
if base.__init__ != object.__init__:
base.__init__(obj, state)
return obj
_HEAPTYPE = 1<<9
# Python code for object.__reduce_ex__ for protocols 0 and 1
def _reduce_ex(self, proto):
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
else:
if base is self.__class__:
raise TypeError, "can't pickle %s objects" % base.__name__
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return _reconstructor, args, dict
else:
return _reconstructor, args
# Helper for __reduce_ex__ protocol 2
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def _slotnames(cls):
"""Return a list of slot names for a given class.
This needs to find slots defined by the class and its bases, so we
can't simply return the __slots__ attribute. We must walk down
the Method Resolution Order and concatenate the __slots__ of each
class found there. (This assumes classes don't modify their
__slots__ attribute to misrepresent their slots after the class is
defined.)
"""
# Get the value from a cache in the class if possible
names = cls.__dict__.get("__slotnames__")
if names is not None:
return names
# Not cached -- calculate the value
names = []
if not hasattr(cls, "__slots__"):
# This class has no slots
pass
else:
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
# if class has a single slot, it can be given as a string
if isinstance(slots, basestring):
slots = (slots,)
for name in slots:
# special descriptors
if name in ("__dict__", "__weakref__"):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
names.append('_%s%s' % (c.__name__, name))
else:
names.append(name)
# Cache the outcome in the class if at all possible
try:
cls.__slotnames__ = names
except:
pass # But don't die if we can't
return names
# A registry of extension codes. This is an ad-hoc compression
# mechanism. Whenever a global reference to <module>, <name> is about
# to be pickled, the (<module>, <name>) tuple is looked up here to see
# if it is a registered extension code for it. Extension codes are
# universal, so that the meaning of a pickle does not depend on
# context. (There are also some codes reserved for local use that
# don't have this restriction.) Codes are positive ints; 0 is
# reserved.
_extension_registry = {} # key -> code
_inverted_registry = {} # code -> key
_extension_cache = {} # code -> object
# Don't ever rebind those names: cPickle grabs a reference to them when
# it's initialized, and won't see a rebinding.
def add_extension(module, name, code):
"""Register an extension code."""
code = int(code)
if not 1 <= code <= 0x7fffffff:
raise ValueError, "code out of range"
key = (module, name)
if (_extension_registry.get(key) == code and
_inverted_registry.get(code) == key):
return # Redundant registrations are benign
if key in _extension_registry:
raise ValueError("key %s is already registered with code %s" %
(key, _extension_registry[key]))
if code in _inverted_registry:
raise ValueError("code %s is already in use for key %s" %
(code, _inverted_registry[code]))
_extension_registry[key] = code
_inverted_registry[code] = key
def remove_extension(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code]
def clear_extension_cache():
_extension_cache.clear()
# Standard extension code assignments
# Reserved ranges
# First Last Count Purpose
# 1 127 127 Reserved for Python standard library
# 128 191 64 Reserved for Zope
# 192 239 48 Reserved for 3rd parties
# 240 255 16 Reserved for private use (will never be assigned)
# 256 Inf Inf Reserved for future assignment
# Extension codes are assigned by the Python Software Foundation.
| gpl-3.0 |
jelmer/samba | lib/ldb/_ldb_text.py | 12 | 3564 | # Text wrapper for ldb bindings
#
# Copyright (C) 2015 Petr Viktorin <pviktori@redhat.com>
# Published under the GNU LGPLv3 or later
import sys
import functools
import ldb
def _recursive_encode(obj):
if isinstance(obj, bytes):
return obj
elif isinstance(obj, str):
return obj.encode('utf-8')
else:
return [_recursive_encode(o) for o in obj]
class _WrapBase(object):
@classmethod
def _wrap(cls, wrapped):
self = cls.__new__(cls)
self._wrapped = wrapped
return self
def __len__(self):
return len(self._wrapped)
def __eq__(self, other):
if hasattr(other, '_wrapped'):
return self._wrapped == other._wrapped
else:
return self._wrapped == other
def __ne__(self, other):
if hasattr(other, '_wrapped'):
return self._wrapped != other._wrapped
else:
return self._wrapped != other
def __lt__(self, other):
if hasattr(other, '_wrapped'):
return self._wrapped < other._wrapped
else:
return self._wrapped < other
def __le__(self, other):
if hasattr(other, '_wrapped'):
return self._wrapped >= other._wrapped
else:
return self._wrapped >= other
def __gt__(self, other):
if hasattr(other, '_wrapped'):
return self._wrapped > other._wrapped
else:
return self._wrapped > other
def __ge__(self, other):
if hasattr(other, '_wrapped'):
return self._wrapped >= other._wrapped
else:
return self._wrapped >= other
def __repr__(self):
return '%s.text' % repr(self._wrapped)
class MessageElementTextWrapper(_WrapBase):
"""Text interface for a LDB message element"""
def __iter__(self):
for item in self._wrapped:
yield item.decode('utf-8')
def __getitem__(self, key):
result = self._wrapped[key]
if result is None:
return None
else:
return result.decode('utf-8')
@property
def flags(self):
return self._wrapped.flags
@property
def set_flags(self):
return self._wrapped.set_flags
_wrap_element = MessageElementTextWrapper._wrap
class MessageTextWrapper(_WrapBase):
"""Text interface for a LDB message"""
def __getitem__(self, key):
result = self._wrapped[key]
if result is None:
return None
else:
return _wrap_element(result)
def get(self, *args, **kwargs):
result = self._wrapped.get(*args, **kwargs)
if isinstance(result, ldb.MessageElement):
return _wrap_element(result)
elif isinstance(result, bytes):
return result.decode('utf-8')
else:
return result
def __setitem__(self, key, item):
self._wrapped[key] = _recursive_encode(item)
def __delitem__(self, key):
del self._wrapped[key]
def elements(self):
return [_wrap_element(el) for el in self._wrapped.elements()]
def items(self):
return [(attr, _wrap_element(el)) for attr, el in self._wrapped.items()]
@property
def keys(self):
return self._wrapped.keys
@property
def remove(self):
return self._wrapped.remove
@property
def add(self):
return self._wrapped.add
@property
def dn(self):
return self._wrapped.dn
@dn.setter
def dn(self, new_value):
self._wrapped.dn = new_value
| gpl-3.0 |
wileeam/airflow | airflow/sensors/time_delta_sensor.py | 5 | 1818 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class TimeDeltaSensor(BaseSensorOperator):
"""
Waits for a timedelta after the task's execution_date + schedule_interval.
In Airflow, the daily task stamped with ``execution_date``
2016-01-01 can only start running on 2016-01-02. The timedelta here
represents the time after the execution period has closed.
:param delta: time length to wait after execution_date before succeeding
:type delta: datetime.timedelta
"""
@apply_defaults
def __init__(self, delta, *args, **kwargs):
super().__init__(*args, **kwargs)
self.delta = delta
def poke(self, context):
dag = context['dag']
target_dttm = dag.following_schedule(context['execution_date'])
target_dttm += self.delta
self.log.info('Checking if the time (%s) has come', target_dttm)
return timezone.utcnow() > target_dttm
| apache-2.0 |
xiaonanln/myleetcode-python | src/639. Decode Ways II.py | 1 | 101034 | MOD = 10**9 + 7
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if not s: return 0
L = len(s)
dp = [0] * (L + 1)
dp[0] = 1
dp[1] = 9 if s[0] == '*' else (1 if s[0] != '0' else 0)
memo = {}
def calc(ss):
# print 'calc', ss
try: return memo[ss]
except:
res = memo[ss] = _calc(ss)
return res
def _calc(ss):
if len(ss) == 1:
if ss == '*': return 9
elif ss == '0': return 0
else: return 1
else: # len(ss) == 2
if ss == '**': return 15
elif ss[0] == '*': return 2 if '0' <= ss[1] <= '6' else 1
elif ss[1] == '*':
if ss[0] == '1': return 9
elif ss[0] == '2': return 6
else: return 0
else:
if ss[0] == '0': return 0
return 1 if 1 <= int(ss) <= 26 else 0
for n in xrange(2, L+1):
v1 = (calc(s[n-1]) * dp[n-1]) if dp[n-1] else 0
v2 = (calc(s[n-2:n]) * dp[n-2]) if dp[n-2] else 0
dp[n] = (v1 + v2) % MOD
return dp[L]
import cProfile
cProfile.run("""
print Solution().numDecodings("484142516*2542*469835*81*295773426439***48*8*448732*9178285365814*15833631756489137376*267***6193136*85975*98579483736*1917641533589139747*74**8571629632193243*6778798497959279*3398936**748*71*35621744516542436453121247848245*712495*4587113335454377894922919732653511**595981729922*9455881255*9661593685999*3*9752959983*7993711273987719115367*28161*74822136223836*2881137636873629*66455529133138353165223871644875232265867987*8*92218*295724355315343951824243148333317*63566838114*5851757656*98468*4*8812**534648476697775*8115326715956*765949*513768*16489*855*53657*28251534525*3*988**8769372573932511467264*81561277414611168*794127614547845857864542794491*4344215495145965*539654597765**94679*41443319*235187798642519*4788*5947225*64**589**17986269926*3187945454665515388665*6*76458891411765895993741*912715771**4*367*24714*28187627858614*673476156453417997674562258*96789*6*855*2538497167928116*457*93997244778197946742527**921246411164*86*89928155845284816**9518598831849*412823*3752744*4847122175518364864*9882272*4571785373*947762368491*613*59738494131721313*298*6864396725842855161553*39719*889*21738**2556*67*643542332*854776*671553*91135766*2*58112344862292127349842*651824754842254**44*5199365*2838*8344481836741939651976317774367685991812732398826*36346364274449*7598135*9311*9662348*761238359361593145165*7*3*39345688428793487722*8*338864998571513271626*71*493243*16*2813957*71549*8426*19922*76756729*85886998851*2474795885*9792*8*9986562434961977337226524*3621934923593348626*8773212*538512897*4779*8612461132*641745384269866622*427823*98937249417376*657**62662265359*62159473411321*27*299*961157557*482918**398839*638*55*4918846422655*726279221*5*6595**37997*6*893599393433*4916*71*14*544273416184*5176*7544823995*7591*36578254593438*1937*1958332542**83762714*17333174156*636318262976125856442941123*79**2*57*551689837556941729883192641*26135166237174*69573424244329*1*44448*429872383*6531*16843912394**71715716973**275*64845*276523*388123*277799*6**6*424745939881446555349**46653595*23*44292*87*4711212832493377766*1369519545857752996922225843345177762463392428968822*6989925442573764466474238748*6154*727838177919816*292653*9492417661483558196*42542**47*268825*1*2373*764351253562949191789233887179*93796296262876897992696543529*4669762363689699*14158378*9172691121954*5262776713*1982*7*5792843*19723796**18651*4358375314*74437882235466879761548674469*6338477618*4165815898*26556117**36423688*279869373778*6878715653**9954923516889526*15731554829972991313533465913*87*67888463351897217834296228*889626276113557*2**6*18524584913664773153*37295949*71*21459***42893363387818589685879**72*8558959939611859*668951593559*13165642367591815999*18*618541773592418*726369*37172612*7289*23**336*919585*484541817254**472889967539329712996*217458955*81577*34992**6292518*748*28619321363711481571593927*67859131467846651339757645*31*386487*74*6389922499913631791645379828748*625397*53918598*9*93354873865453164989*281*121*9775*93719343761698232949324*1687123958983***59133737551772795*8833975413*286981*78382562581488*2434341**2886*2862815378169746952***3*43343299338464333*397353*86**95839956493147189462415397*2617842947*89414991145793*745*429418614514837878279648669825112492*6221893*3544*761796942*444348895155*44*61*8*5955827*9*856*22975854426632757*7135456725313614446432584923476*466**84521236*541158714634*111558974*986448352*524994332871186177*341357496*91536*48*14492464*9*712441611317956468718193*6681*98645652*51875*974417827271573211412699492298*889255573167248*788461868*911575513364261248578*7*12*6694776419*268*444488494259*359628257**7679*73*381*86572255*88947*5241*7197*7618752619*6231*1*183695674864315652174*84757821564*4358362376635*4352448564377*5837766922622391597551*67*7*452656525*2721792313166432841566814147488*9*71623127956*17332862*4496881843495**47835*228282716672911274*2437641746*39281429456*513372931793*91*6547961647877543358558681*49336646*46189*99*25365441*2697**99187576399142424**39762856641*5*92861*288869139446*8298928**32*771*86382356934*7769*52*135318*55*9772978183*14*158*15*282395*51*196625452224181837864*64654593**447973*57*48834697331441247113*2792*56*11284428318*92568916*88719629462819**2*68855339362476**25532948665*329136426969234491832962451*7569937575*8*383516334*4858347772524646*9736123169878627797*55629931758353*7817**54791363864*131144218917*664654*9943822*439218283571142672*53**966256983779*24*13521565671**46*41571332*68126248634592449343*496*459*31*5166*37*345*51119164763*89884479926186471979167473894281*7324324234421872693862*46581792335583311284*6842*243*93438552116448694**87*342*74546545893182686*24499*5514959766397569417*1575843**968*999924951186825475*4937*75982*54351746241292215*95**2158*2513*7*96845185411*3121943727884371*1*627585312913386354328859159887174535*849334762946753137*788284*3733*29**7577246875*974249*84889815535893474673*33222999353*457959265898*55*9457*3358186376*7741216988559559723*323511629956517437191*77192453669745*161196474*79538*74*925312793398415172156859482494239646847**5334662899794179*7*3833414397467*57*361852616*98165*253419585787653178*3326*619491*7**24743**7**35838352947583378822437861649733**32793264286**5*322577*6837**657472331549545914188858634*4265589657414296361696969219134917**5934211466*11566589238634*9421717715319*695897*917258129649513*3459*3332455*6741157*28799625777365*9*7338551*68728259825*4264891641**789629683249565792614969584772497846535899154183*58532875938977352*53785136696*8*891*21111466713639256756563187*5512917163*6332548873698669*68*76158946*3557*574355*611864948*6582*9771*426*849213*2**88858*43*41582434712*23713747223493426996985747449*952814797423875992794581114874555849167*474581282*342466988752461759*276475447662285578772152338*8369731956121388*856655*815*481452635518938*844*761*2758*163646*8**95621*36999125818718938291194323669513*424*4729992782**341164223983652*4386311*8687641238151*67*43428389912167825927638*677*463864295744*63925*43892135*92833211755*958117*217246375*2685336485811319*18*198724142738381357294*671954123472275842233873379828156689999672861*817683413126726935237563169*58379217*51479*2161195175846*8181**79626747727*81653*85765*33529*21997942*5*5*896169697996927431633*571941992228657*9*4749716*359*65**786943355144*5*593291812984812788475995*36634476459139149597295*278434642962664984*41867*51861*138853*5948537995325952851*29699*18*456*4*7339853118686**5848154996145*235185*68557333953**18388523922677351789363985544*2172615114297917788687*8348728786*287*796*522524761288136992*2395*8*6*3258*43551*28155*56*4458481197286158557*18*4554*61*925213648918344343621279869622**4515575363*75923*276*3678419468647*113369798528455191736968622389552564496729525177581971285668*16487895376*79556*771367548458668927984372682887914126*314725666867684*591629**2837*756583454385723485122334266*86951263213838832818575317411157255388786519*85789847734315*6975*762*59222915527162*24499773*544*48*6319961553127543841463326336*7476676114364399411*99371675754114449*6*254588853*821427396782581697*72572659617634*7552917424*243*512899345557381862482146135483*59182259681*2895794867173723*42114336*13362*888*9282852191534515983527*182*9228647167832739225*4*586461193173376381173178979988462544716*8956***62843*9855*886347146674*8878*3671*8172692375*8223545*7*25631828513*7584117*61**161841387315452783*8*6*6376283966911565*333*34*626253612562417**33394394*2289*781*724513873*95295123*869865*862*2979769*69221234986552*9*159574885468517*34718812*6618899382838*5876***5268637*1*5*644*9392856492**327471283272676794349*234*99493*19775161814279*378447892*83684*5338322851318*99*63899**5*34*479148225768364833*75641**919*63339498196538552498*446*91778577*323535*122*89372**74866273351241898*7283953586251478774582*15871459742973**918997417151212563249824894466123191823783*89925**22941982293656*863773859889*163683847859*96752298*2256954783673*922141334349*468391**36823857*232563*68458937888655267521344426199145442**64322*5689237672888335982**86532873932285296612578936839258698**3622224833313378639959742241377291*474*37726453*45*159394156711869772238248*61*2575817*798*362186785249395588*51618619441*37986382676*61*6287*447*72412826544971414469842**47558567783936652*2697247637668418*7141198791*2*7311418426*5532641142*55491936**65472472316*746*1581785886271932478537658598*2824328253**64268739767886567342442371811426**38946385274*21817*66892*484836185127915913481*6461844959328143889193577936591112*44912599221497761871244*67*476826*4174949842594797674216947654332872953424539333**9656215*6229429636935146732*79*4473388266*87*78*614*1166116477937222253236259589681158*9989445156418315558*4824768567*2225345288629417116677416*9326894795866317*831146*8271*2328259*62346*5187141987392951638656157353161615343732*659681*852*8411*29273319786283525357*26497286456*65789*685375141*84228453**58778*47879*5**874714979757*269539*48112619*528*7241978232535342*22594955*332422631*3849587827745*342*2135585975264488*2618*23571525351422693435367826*5389673*3*371*47138*68427187368893852*549433519119*767243457118181766136**325974639*9624*513943432225254165266515879282168**381158583**53*6446233416888892**39897962342*616*316113979312*11115976527522555*56528*83*4993*8*7891256297361966*8*36*18692688648442529312**792*36*332222774468*7*725145*314*2486842332427449269321*86473488142*348**713*313783954278191648**257192595673**8313197485*5*9896*6581*6519279627559133677883291919636984*61934282448647*2526*87411317433*335819553189947*79862*95*97542689*5513486**859*5*8*94998916*48286*511897*3116871*23612663191716786421*8252657565591628929316554737*6259384718233684435264766954*324986741797859*239*26237126413*2188346*838511425237234194246*3*74423197664797432435249813339518663382378116**963*9568*7615363*7692192257*7625114327142784155897*339212879956412581847*23639318688*519426*8*15173654263*42*1*311423148566669*86198918*48492113611524826*36288322497*45*6242842914739432**17783383*9336*14947781893*4**131834733135491561813735754655562322*7369691983*8469*817271*711647*3194848226*961*86989*3671459***63312632137*77*958*4**82214*9254753349**22*376*589242966855*7725295346356*263114123476*42941521154214717*681728331*89896157613193168*347272375*18*89391824758211275*27*858*574937546889669355*699718*342164466371523*2*736*58875*37957742*213929*2579619*826244*3951*5672428*9291878*345122347*558582**7723963175*5659934797*88**49653*589241886141827*14*7445461998247193377645153296486851*23755211269724971*58748*953*7263174693976842593*4562*87*3245866447929225*8*455936*896235616793225356897855*96848824336*466525832*3691847184*692182734179151*7289*742668796584262*38795548575*6627648648342973571955255779*99*51355843146434137965*9394*633629*436563697*8*1*85*113963183118416468*629439166224375444363*513929977*3*5399*5776714398928*15486*7*4*8647324788754968**5641581***4536*38**77*8569821*575*9743317632655362249438286117*319934144346568377431635793438526632486374*9131858*4753724474*2459565514*181266*68*559*96474564276111699643469*744314329*2234791252257*6384*5296851*16*972664411261752*37893761967243259845*73*225136888763*25359578*957497111626776489*52936762274*7779899**29*819484458823345284*811343253*68868*69938*6*242442459152549637119*4562186344867794*38617961626713432595861925*464*52123967849154211323968143886842215362549*81677731*594644*57*84474141857343339442563558542223485*28377384427187854749*53*75531692615*53611668999846612636822132178222454929969478539168*65*7*256174325422952*46823814991765792*51795627*4535*8959*62864549646548*65*3664269119562759431678243922512471*95813835385*19*499*71464*7149867959611153*53774931334*33**6962349*4*87878597899624*76227914*131647963777*6347383383973963627455136*7254*745849464662646*7*8313886*958119584175799956544842588*63*21517737494564866595568795968*5195*9385*4619228*4348155632*31149*66348995487*4716523*221796937*38643**9639569391696283*97359*255264392276872272*42713864725*44314*71*5263477563*61222463385554355*858777*896142657*41458175546*77*1217421674542*7296467317888*433342745135223*766113*88536112266783743*5*646353*6421336*758539618732857773341395865177*596951*219968*7*496885**74877992*3**932674*8123881*891464453*46475343844985729922**376782517*653261*271425441486*25171*87162*9689617972562593*668*6*755158382*681*26917636723434682516*418621553981815*512616*361256679495926624451*28186187747513*82874949617**343214*96776775799374*7727773968624*34812*86945*78581126*38538989511515**54228676934127*6451442*73545*9725218*45865223662881162946**839*189*74753439542591284339341212*766159*697852*5221936745226925189239417522387344979485486**71293**5133279*8*293*83668116771543945*44237374424467793662194**24*361816646*122*3683941291716139718149187351*898888*8293*6911213784*29531949*9732319751466729938535554148319452126167138737393*8*955*63633782228297583686493*198557*7535526248824831683218171888654614382122665842*7*4133954984*28976*88986526*79357*47365934957457*4*6862538254842538558*97472944726539934*844712*2277217*889857948583122*776958*62813726682*812***48731744*638443691364143865824862244335458314261157547511**778434495271637449679248*4673838*1*6**33*811516716974554*2441997547*634472853*17175317*9685476814*94576491*89346*676413441551*766269523732*2339947*87*8219245746675*5267541**948947*726482354599217122855729288**626528*4*8627*86856296*261*595329*13*53625355759**9*219135*718*725*3635598514*2947*78431238752879*246354*8*5153*4**9112817123242555433194686675219*78682554837435445749999578713*871759*22465171494182114874919*2*986*8*84199967*3794*27141244394879215418951*3793257463492*23934181674*8*21359362139683712792741*784227**14*699892291865599*61*7798293131*56433276683356498*37622*29575589321464345*856721932358*678347*23**258258751*843563*1*71317621174714273**9*47282568986**736354956889388*51593646633548599****3*9**811871149438*98*49289638693*2567359255322193141*72735541828872*89191589187*2683954727188644116632*2491388*63183*63*8156926861*239528617752978657773382818*333*42556335914*272*4772228166278171*794*3486472653576837*76283139*4634554*49311229623*884653*8674194*9*69558*5472894774*2213689438*4447688*668743*6965**979645*472414377*672**25*2754977*15*6352182871996*113827834715789619211*346598*59624**4*8767947*1136*47*82551*1493197**552991699683*64681858476514*35159*998452899559*551866776171918743144*95723*51*76512636229*42727716934*2397*62*96*34587112585*385463896448824322156118541144621*2*7591921575285938859*1*1554233469224876875648*2*885194847*64927339532*2*68253279276363191746942535*38387115*6243757588333*635*939311327*9166656574933*7976636935447966*79529696*46*28287*417533*43*65726329913354791*56449526721448561637989478*21*15*41*572841861*22*422748799714*1*12161187994367878395192646439*254*697*2265143542418*915684688969**7874423222288*997*761275644594842*427542997*4296*542342221683***325864676679*6647322*699*711777139713583777771*699*1888179*28143997367347418*753432**77665*322999621387*4465529*411225363*54*3194969895229238*4621447*325485512*42669191483468868947*8275629842722182983139833613*897618*5*695*65262479811969889794582*649322995*29*2933*4443357*45*248*4753876814675*237579675617821*28634672568248*931796**548**823266877563144958114299**313477672*3*4176844717833581*372923*981746355878*78*46626562395751438617525*232329*269155257431*51753166*37826431*31*6*2*56239657621937385373725*69784764755*379***97111**42*87561*1199814*2*7**31597*74137*181763195977*884553**84182895258864553776*156*3884873995*2493312*699772179787875138564*7955715595263625959627512*774662588275*64518762771829843642196*4273671196134956387468757*68*5916*71826174738613*348196781976515628843942*5332466959622436*3228*353437391142265947738775378*54294*86*3*97693886383121212125819*263*7*9391462596*2341*4741698**725712673663674993457*56*5**946542761646454664396*9134387765951*178833879737896*36394*85*341345515918244837*54131154495692341*3218*1885236625118395939124746*7775484*739342729429*38976722655446141144519134796*5739**235675688154817626*869111**5**97577331183995712354297579823211949713247594942584*851727119167885532244445514167196171685621*94861844743661*34246128588899214368136687515565*713755192797124*87*57832**457389*27858194*8741375**61328*71552968754*57418728775633821938767186364223*633997225329661574153523712876**549*3737258469828695731142957*38332232125*536954923754*99468159238*7364*8882*43439437531*4525794182969826*564852789861*88471216646392199916*9199218241*7966572*116731*97*58*433266561***116*2*6899768753*347582252878*19*7*17267*63659743*932*77956*9549*15694*2812691241956797**778*3*9*28757278*2764595*87524424344*8272246*92766643392273**2185*814319978758431226697752557263*593423*15642674771641224169376832852478**88*352629371*852988265847663314399944722626*4434*733*69461789689629765429195*4356558*45*9439*2315733625954**24384966196263697*3*31164835295748278639238*995785794756884*564519881751232124*7731837678977*4797822864226566212228*5*266154**15422216387622491455261*87*161785751672248**576454699718958*54425*74168214476389933527*26627114*8289184653*2343*873*3121791945325795151***99484562413323691722593795543*67*65111*7*7867315165457*453163872215375796216347977341563437292*47467279598369788*64456186578*41534317*3478234672961*18791193268*8313554**431748*88348974*19433544*5**73469*89799*67824556258111172413213167772**262733926246*482355355673871381*49*9864*48274882244*469374899415454365573168391162134564*21*6*83998185959151*164154119*1**9227218828*337951923411*4458**6*731*365962183*36242698233557*34*936*7341134729436*882*72926*6541**5**2256642333882926214334484711225115344711567312411273312176115576229*32313*7782762837*54*4134*9841486565*861675745996143*77*3*9*94714*5964*891348131166*75*549469185*58844665229451544258731324742759777266*282497236279558341361363*88584738957436123*7368*25759244338814592*74716*825277*9859*91*37**794535659*75262781121439**592929265*689876737291559*98781283312844*8357*889*747***4389**1**7257235197642786258779*613814*173431572*3*57*24525696*485328819936*63439**6928923576899**37*51238526858*8*63*6*41984327775884735*4252738855*46293695271248191749326181238865256814937184426522661224625736*51485713185718267939973272374169*9992355487779149787137278842631511175*3943179269744731**52454997874222313623798189*68*7344634*8*61152*6772125386213589238836343*477197922296337216563848328581*9312*56834*792*8939462337748258113974*4*365781324*5469946*583*1918314474843587969114246274628811246311*586163*162194*193295*92****33*2684491817446*51998*4742286**2525184321*53383528222355*69317268196897766158691*398933968362836676754**5829154989655445461263785348317654899*81*532998862958*1*5572523127648*7964427858754*17834984965139143599658967224637158296*577*419413766616*6665653744*9388576*3719272*6759*5*8*9857129136214*6388111156671*9272841477679993323527*664189499941572813283921685764461647974797818576*24364231735747**25914487*7488793452581*769*7865*28498936*714728339*18351*6361256721196**8741*159658863671444344331*761256247737194*34922159674425335118228*4511*1863333**83*7842*894829157639654467481574574853961**76459299865*7*62947467*689535378*632114399458*38734519491858422634494613628854983535*1837778*96231*75942854987576362323932821664*278*62277**881896752559768773522394*4*2179581795332114*28892*9246733323632275539313*87955913756789233571175488673145*1758715349*678376242*41387873435*1*366*32471135989423518235115751*53684582841164963*9299767221793483255983686764*343618476974373*953521*9*56887329669934627133**85842*6113624*9441*93376*456758839974*1*7248673*8837442773498*55589532474912752432133151*463486362925264813369898341*2553377977322129773774*1881473256485385266912231353828388376781224933183*778591*1*45229475894534973569245*52669738458233573338824644*966531249846293*68*91523112*6198393421*63881543*54988*784857*352579*3515487962264794726*28199664*88546796646*161261*8633911*75497995*1127513*811426522655291*63522155451221*59821547168123*524513494561798142558*341*4*551782945889*6554624*22*2271**1164*1633352387663286*573*4*46613*1*536745*7952*4436**34173651413*41*477*41858952499493193685444314233*99*92257662672669487971654761575374877*156*884535*662695434832*247314119441*1798513965453434*16572254642827475*9348*58141648174725347576988782521395487582*477198686651*57122*77296894116134697*2282281*842938644348377*6696*54217813127*86882517678*34385*25671734453*321887396929712295231*6*2341**764538927*9*8483691965*5112412653*5*594171555*4378841634726392743246157335744516512661578938992914*9251186724825*4*184668341**3*6*9581924951*748175*821*9*2716911769758783649*5165572*99*17*94*8*15922467496156514798*273938*3826*8171*68284467**2554*44*351*5115968319*7*517833**46462785*245241516943*432587668284832*83443*67153*2365638745373*733*2856582977884446434671*614423578875773655179773*2*28137*195*3*3*23155*4*96572*4*8*595*949215734922*2155471*439727348*549548315*66294*7754444*1***59882242887115159129*84791969*954781*691332*45*8*66215496*53641*85467673198494288194191*8*22489*412*197215858982*98718*4133*7143815498833451457274236829848397*5*5328157*4145436566*555411952427853242419454647377526236446932574222933*6731612131964393631793332385325174*44896339389863186*2612*3*2*316556128924386928*3412*678725127774*161966*925818381575764*277*9367737125*382586792*959983995554555522853*81651276663*1322185646*37424494923787*782372*3*42971675873172**15558*536*3*77758612299189729315718242267*6536844647759828625358**86597654418*597927*6944255925*453134448*167297478*43877474677577*14233445529169141*33214153488623432886844*74115827153175327*8*578*457686*4821*3649765273582679*7568291482344258728931355*2699852336*552549*985149567193*58135*63833*727547*6*682*2*88335955349339429247494739*2948968*41*7468283555955*533816*7588*96*94124375871922528675514135849616685198471566251679*753894879*92*4328194*27*553*4879338947739833894129264*6182*1*13*3757366917*1336272*1957354247465587791117382631667755338567318147*3271763333*79*6975676266*4*1123449325859863*917253515*28371255543541*1646132449345515894126721266745963*56681**9277879769487319373822287349*1479739189315278758316*31623643*5649987587*4651295158878223641239*143859578415293*983*22168695827645*4746671935422392121738371121*134828**61616784853*643549346247311*699584742*6875735*1**95651*4*759893224977**882937582157314*23521278794138872148*851421571*6*5*36438158294374**86789*7894575358797*6763*8216326417*924*4477431366*77294173992462855964933579631634*93*487**419*14*9193*976*6265423933575526154754917*45455248596751211*4*7571488771653144336896*891436297*769*5*4344*6**89*668582412185148365737355*656*423856485996339421197472272984281149752237*85838*628878755**384153516428*114724*68697*2172573168*93495827*7*224*66443837383988616313*9569*533*91673*8294*5**5835452857915*16337*36*84854815876*81496911183*5375*215*59257885*3515533*79986*39*763397758216133312688469936477267669636283924738369243519191521979863*853949*57*615851*55712*447936**6*2716*54*44119246565*9*87722**5987987776318173818553422398*67776871126*9279*534*1*463913623734*6*85312439817952425583427125167*474525*494163314966*64484988*34239545*28435828944538*31166**1597458369589*1*67*436983272*7571*193141636661*8752*62718353674126986422*3*733256296478*191775863647**543434434846*16692883*769362774556162228262*4954*7*916158945266872954662957159763*87667363927156235714755796192871521368821132*614997635*8195351177369222794558*128144757819*1573164457738363*119133296**3*9227828727722528683692873152854653525459156537688623*621*71149*669895213857*523456*7813*7522133339766*774*6521*52463772735654997122*943965*6163668259118691415416598753*4*766371559156949*927***653449*87*17*26129382876343247273379683*72*4219121374596*5*43921897722512673227394321819*3719*799318941*82973537697861*759415943475254**867288343483936561381274532233217583587*94936228716659*5215*8315691*8795125*81*36657175722289931226362*728*1862223*174**172*4665837*44679437492852222687681*2758923152*58377*49246*55*79363*74266*192563113169*458237588763529568*5*679387829325*62858*559*57656673*8568*1864*829614*76848*72294546*7859423*8*9124*86716566439147*77*36*843373976172571975857785412*29293949175599*6657252791872521516314*131838*41213484888*34118*33656513*8682*83**646558*42852*528684**3364287939*3616757*7699866876288445222194133969637285488686339486*969276826356685*28171*1*2*1262**5**77**89176**74**5331*848*7116536*57819988671*735494*332854454985526*467436516*13246781**52921538*1156741279*54898928966*4*565228397939516*8114*816926**3175695666584462146*89*8759453569156712**978*237367142**34**61626299211541*12819127362867*17415*6121791834*2*29271763512718125*46765551*522725*93*111466*16373218779128881*683888486377949559695652748641195*452*48622723269*883*85*15*29*668368*95*6613697435693556343513*6113218498585776388**893272248735255389662215361394972939*67*62581*786537128879*61128*3619769*911163588761771965*3876485**9974846243497*8*6*2*4955*284*886*533121159381716368224112976847*378482*3*32*6111975296354611*7338*35948*5596*6*49924822255668549*78442171581435326413477*8733*27511783631928258753442*9*184*6241385*3356*36*262*6599364*46137334*21*3*56529261562991875458293672959544331541*6**31335769169182497*27319841364*74539948125656*34378619*47766764282577*6*62*464715*425672843223725977*263*97*842574492295589715*559641393*427811929418734553859*8451336248232921786*892792559896*6551*73*1894221635*2313131*8114*1699*31*12211*358192143*18167183*938*644674162573*7443745586*56333261165*6319*5*783326348768236592894231*31758*45532174445*671531251267161641539147372*22656642**818999331246519716*8443*962*39298629882564682457162799718565*2*87816967756176374156412524*6*993*732756531**7922*6941923371894*532176566113*39723969837725113*5*51*77285*1222*12337215229948414313*95281122712*588*755381263767939834*7279965399565584664813*2991*972382747152*5236399127899636373*736*5943256*395926318947828*435484875174345*88299491273636*7178516278643*238*728**672314896*78383733716583*14873814*9575545*8979*4*83751*7518291833*2751*4645*21*3112792324317386*5*67541*119356329**7*67452289*1896262*99917631293223*16381714922999228197182396271883329*77834377942639374581426736836143935654146649387*35*2371892735136*1*6*1671*27578649541918558477552334*38852756379265***657815*3942149739821562*2997988*48914*6*51894966763*1*6188*19*24187551324993685313781*1674962784816757237323291338177**17*88997836918*8628619818691835865*927*2284**221774*976931116491152*7644213837*5654*971748297271832*941925*8*836494931723447398871821223**3394*6447238771636*19761237*235765832*341586*93244929384255952739*95644632242*74358*65863322949984827*87432*61*696471224478*73*91*47833587*7911**4445634275**778819931*28984824485781598372646975599575*922863241364169*7*69316*78197897993531*768226*561*7*1467*2389*612*25254416211*7922678654564134825*28*448928*5797322462944967269425297152*14**743223798113534848948221834424619359927758744668566789444732*2292621989917*937638*818*49747567*37445878*8*4137*75*294491*32816537183852128574655324938361199**86*89761856551468*183*86286816389235141627325568571714658554927125875578675951*34931412537587922938948*481*1*83971662646976***773**4517685386454353768*36926*6*36**52326*49*4758456534917772558225157441274497746933134*31457*8338529*5756536165425*77*263314*624*561791628335*958996852*465365273982581415141*86*8113255494*14666483281*51185**978232317*325882994456179*88377114155354716583*64886*442**1443*63226164346653493*43629478343*781523531**72*2813248**2633862685*59228333175212537588555513427*68718741792678211*29994*769672*9429173747982734**478358*8357821539755844*97766273372487996699*18*173642164262*78375549***748155647749*2922359263317*11176617*7964*6636*58223*521958*2958212*248897*2895422573*4778192*17767*626355637796514739*255**7171567369*287*48**343*8636322249477823386*28321118655489*377934**435632644*8427541515912711426326672*455392864*34213621765986591598948937635138*9978576268*9*4*94192882171465835*8134*3616693262139*6*28825161647639832*8*12*293874*983635773864*32741325167917243376495624117751387812478**4874892948151455668976944*2414*7*358624176533*127271425552323*55976*59233*34*61768498111488555765488571431379847273513*1329287947478424884722225257*392862233957321*3*47*3*1637*265231279681458217*9226898*1*95318878637562*5776614*671885384952168575496463121167891865*3236215*54273694893498826943874*33391593837178581*8486584794395*87938147*66**479585126199335197*887453*61366832166*5737234567748552778*49*6*8826*9148935895829866111431288*7556917655147*45675838927381281856*89911*22678422*17647468*39**1511888986664483**13535614548768*36133761349196599941*71961799*666*376*75328993316711*45**99821799*199536848986929**28817867613791927523353386*51235718825*44651854*627848612136*627*6226617*694559285442992545765847129*81679422996186455435511*913578*7*35424556483758853382675*71246751719*1954666*21878166*278*79815*8519*4*531298575*18293344877213258752*82689755*5887*838555*3368665554128275967158*1213539323*9362879128*22993***242*821*89766879669352166282512379778551528*196393121**4*51995*89964*17315765699573744232654137635575869791914732*6222874*51*45996286164588553*3899857*773897838116256947149219941*22544289925543946362*72*7515297*24394891773354722696*9617**8593845*5195447*843311*889477235755814833384647976*8327613499155182681888912662391*92185*93913632111364891*741479577173263487*4*3*292847785732889715*778549*761567185327191135*71**851994977219135544767796234*41*83488885771913424827913275*62399*4*51458423777*42792381635541752618366345236743777427318793388**23696932949*45537*358*68112563656**94343664893158*6399385878329988626291*36*3745656174*5785233*49327*88881*4*411361842379354556*5*438319*43123564642*29918*48317*85871517697499924817985165*8571648547189197677678977996755*677233639455626312551212**9342235*2451*8*1362**92311*561*5*3594443852671612693485588885865945674*72714551*61444239*5*8*354297**42142388877*3888924492**9*6*8364479982*4851354799652998965579748253942969384859812*215291*8*25728191598151**7953*264711871919833*1*967842**6962597*6127*399598336557489**76*8649*93*62738388249924222331833685695942778359113413196182925118671323134874285621*7467284*51733734**4684*2335147734*7*57768416*912219853485*7668444985437479523164*32235*9822237737944*2*51*69163**15618*6171142743*4*896*47136939787539*215486**1363714658496123579539252*3439936*6817488843*46678887452*16992348666246168573634*84618*9662191229818*113285815*169584331532529*83*2*12298412779786*83293431137671724671592833579183291614*66*2489799977914443711*77688*4542*15261621226351*6924474323593161426*6615168617373985643433184*296*4543*86119*67522467416*4776*21*6296544574573*2575451582*357744282*57868799*8213*246832**163623758871169999131859783651**6351813534888882*2944348***7673717*68988855**2*7812833*6862876577843442733743259*534*5433*858333*4324**6634*91*188992335567349453*55397864966782*752*955*872*623894*23387534638585897*537*1*9523357711847298*843439494634*6128*657837697*54394189229*5732*9563382295156397**529147321*6*137298226246912139785163192**828694241173*45*191*75535867836298825*58371*784811989517*698*1321749141*54865*51224759518467*35493284378335212558558*7641868**788*2281881597674*6681559*73121533356749337737515*8733979385114539428**119652789315571*37452**33*8142773284698654*87737*2417*6627114465712658714997854*276561*52872995642*3*9544897818*3773631**39982*345324572378*76526651532*31381478146847688348*3512699794*3462797543*4*54457124685423664421498534733*45*3426297**17*799665339*7*74288436234957*8298894792841297424123696867189561*542*6*2965715489999152423641262*3*535*866228626123152*4*33571**649358882187164861*749612266887229858*44766*25*497268*549887799933*3*12915*7177551433*49612415262739531534884*633*2195882289955147356368585418462854382352738*1475149373816155489851*8*3*17411548229925874718*374*9356578477654335283374281772722467715325812812*66169984627883698236537315697*897*295339633674987728566*5385*89834*192*95*45*1513*7932366364729273775*376577*55997326*59326888*631237791526815234536546554814188*112525773*6*5622*833326*1*92824477*8**467914479729426138*26572847395661484*471194798877661*714*84235863*16*81266*1239*46738*131336462915*71785755599345*1465884*43688565*7544*7845842797385862*8247666*683459985615*2329789426565872678839*7988114933185927*98961*2556763649523881323768226*8591833264285*8147456*5187718264568935383112194348699484*542862873115569147619*6277147*75482*32*3*84*52*9*6988712973412446*812*6578913342985*4947163746221263989878335738831*89*286*78**7465355*7993*961488*6*334553622425662346857852288554324*5132*11*893*6*63539*98*94*3*152965323269998364778385981232359*1*734991234578826679241537*7279618889587*59694129297771*371244214821333**68*161686626877*941*2796448463291686428958833*324*69*48318888276755772536995*6*2*7*2835294173439456377791612711*615*756*49581382169757181*931*489983*5428567**98*9*8397438564*216944493649*5384451361677*692868691*6369684331284*6175*63618522*9276911736194146759484915256217744*248395*392226*4825326656729243*1463*145776461114982264722*47226956733935*4429379*3887*55654784675*1621441339833159195481337*272412537*45379*97*59398137618512464655293553357*31641*5844223113363123638911158347477896523458255*292246135572522176678675777193415571555*7762969987842695132444254581*55825284784*3317857512418796414921385425923323571*149167**77435216255*98924661622*13**89987827679657183784688843458438458*6579434*95819228813*4*99672194164464296529*1*6238837849*1781*74198235938*697426852549459*9*8168514411*336858189438719745251*37*394489*859*4*1823381164862*9127153275*712*5315153451197337767*34*26416*98323*2*447636162535162234713196786619*8298*565751648247166113679857779276171575683564**66*43519493*5366841**19*29997591318476848961925634731495634876165734132435*836736681892*7196*6*1*468*9497268*94*948**4235734936645*87391947716163*348267*67*178838956*43*855*2848276387576*5*91834666643886*348493648252987511682879477*6*7*831769356171285*1822322744*1*978752926*8525931644576518913215925763*3**2657*1738382691934*97713882337536647*331681716927*885923454357**9*48772884622*65*8*3698*965*85173791548**2*1687529492255971527567878319347916557149*7*486734*93119248295566826659633314218737794*34449354647126345*7*8594*3434785356735683*9*31*5157**341451673154*6*3256189*963859872324933**998671**7*1*7885535*2389836267955*933*549**373927*739291*2387177111*96*735*4**1*556815764*8319*6553382*1671822876*2***347511*54676*4749472*574169*562677187957788185*221*9143851*656*8*7759684221512285362171253921333266614*925871741*6*811353**2692*74485*14528*644187348355517591*1698439787*81*82227582358545*8868*74555582421*7913194195467*78158173632434448697845**11*486664945142*212*8278481359555457*243936915*54539773319395892*43998*42*8562***518*4*4*313*911*391921451668237427177*82*58866558*149673721*7*55823521788354312455459373*61*4254687677837153*126714387355491587*2386*7*1871*699292755971319183494*55*98226863579417*82*422229245244686673*86851434316469284225118247318111**13*412875327*187335*846985427*1616766467*28216198195152424335884466*12272679**4*3285362181455633255919574643*344*3*278418143956374*3683792644575*8287776*8*6397816928215*1558359452*15**71*83943241*43*654144667864816916786714944297171421919522*42613*778*68743959*512864624339781*561929561835812864571739**19582266142549174722551*4842127*466697855*827131369442*62814*7*234585*92*3655744*315558821735*8712983473516974796435618459257982*54138489186*5*667*66482*35*2256*8869834*783274*839816291967996*9974849396*4817329978346*11588865658233*913*95*2866493*152494*277792232816254743619352626267739*664687795399195345559265**4188814484375198742661128347289*2749799936*3**88952171648192*3167*8*5422*4*95*1181559*5761553*5678*24796543552**862125699658396299*8138181292**5377784986226613248*259*6333388*73*42671398131**5*91821*78146*3566568*82774866837224*628*5172353633333**1*1554479967474429237314*8362547857434*543*1426**2178468482194*83664113386175*8*35956*6475848487542*36**4865*46546519*1692*6*365*8386824*654549841587311125971332232291637416875893722843*16387224314825218*8691*61*341*14225199676*8636911218*589332581221569*19276861489597661*237**5379129111471822*113*65936*3983818811*8833833621*492716114753884*218723*212813485*391973*99877866*16*439466726623556*7248358*9836793*536*9141*9568471146394237452*2785*151247128613516*8138426*728359517354962468493612664322*99567136437819238*87**8*6337654586*6523982949*139341727659*744372**68437351166728463652595161*366136*73916179*132*37146283729828723275859*46594*188829*483528566274438*377*996322*9429543986495758977343*711*9366261*14*696536492*763361*22663426674*744317732497289*5*69343498*26457982417997334*9411*5771167268764*7482*874219*91*26447526*933324231829*13992279*12*629*51865875*73449118*331*22788913745127*1485719*8416381423833*1253269256198888*46113836235*872137*633498671641267*938818658963*5*65461262449116985666169482*2353316443*265265216515165912*65859668624225183242*649435699179868148*931272733*23282*262714168715569*183964765289582817484218483744121668844432189*336359223*75662857268579713659265454*12364236*5352312612*7319*7294894*841**22*577452**41*2163*969143118526467621574371899865766442678746893132*2613487*37591753622385842472*794*28268265697927698177147352634*2*34199991217*791239312468329167884*8664*9555243*9346255598677*2*457*56697994*255362117**535*377183775514418*82*5184127396666*8*9873*489*16719527899*931887966549*9664691*49931894786743*2258577942*994921*5559686825411914866347288137248152167343*62*51248544372*5877267689633*92*582567732711581883744377*7*969931412874196796835899874598275845373134532656837*111433*581369815*43719893195*653*2755334*4*372849391*9852551892812797985412924249**261618837*621*98864519*8418*981*264316213799611182596458*53*2556756**9748821*312184117159538*353969*758116184*549129449349119785647*23164972226396972*83785*469246847261339*274389512546416*8749*27824433943876149425925647413369613662784**8*41992255733261375**736758766326673*59469991417439288764**88552544493252*36722*37*24168616696258817215*68359125127565184**971153861477*71169*3289436347361742559*55452372956816517111*2717662474*666337799978276923*69**79969**241322*7176139553226*3623966**2127789917388*71976214127659892676149845226*59*19*671675976124137959*512164*5727652*4211917442132*5853*144931352411*3794137**64**216151**621666427*54195285712121953827442154716762163545*5547526538591268172688155859*6454*593143171*38194**959*6163484738647784872299952*994591416*56*427153427*264241*687463832166*86534276*99832476173417818873899*4574*866*5715382414*837373*913131366*811453569536513528*5525177*35993515482194782433959686887428*4*5627816921913645383457*214*7144735892458143*6*7898184391747121*44933235***2*62*7333597474461*923687754496793*67899269485*924*532*18992161381418992783344899*88597583649343*4*97279187442472*36*4144892546*4764785*49589*94372585592388*5**644649*768*13*1*45*41165789326564586*442315872525442*86525998*9*571951293*8586847*744346136759**45*7*32857531836788*5966819524668**855732489181622419596375*5**95784656329*1336883349122399335178375464756*76189284111325668527126852392*668*983484*494523**3992627231149924796285772172596523*4146223472474**465242776*42329592*3318*9*366121885284**84319479417944924693874513*891*82938395492*5*17975*792*841615163217*696395721578231139742242**5363634271897514*641664846*37*83627798*374547798892998942851619686*3999999747**9824227*1**9334253*8619222*93531315282275266*3734718919348976776324*6315863*23163999744831*894557*99661297*9715514298172*25417*4656851411776471262374625112635592**6*6849296183353196143694871262524*53542*222**9*68549*42446631*31948482*4535*217714*915*7*817893*95293789741375366299188996214492932254648578533*4277277**4134843*2*73*62541*2551374826933391411471351858232*14688337986*639194*675488312993288256*6317496*218129542222653964942*7**17511598*318798*8*452167875958241829554954*32463659752854*2532149983644158*8969874116*81155987*8949383981*4*75748651621965461339545*5715*3862712151368481*861438891721628*946884*66*442936*51563311872698451*6337387328252*276692978833381*474925*243153383536883772528434*71954212*71361481242712*99445*959998563728545*62592324469*53614*8559664634137883131619*474749*5573*9826455582*34868*884776756718916132*91389911*166*47*7*222271497691427389734845*986764124187*85962338174*683729*41931751363551756*48517478942116583*8488852599511**327439*4822129799476493838671211777*76645*8897244*89243*12244413*819*2*6943896112123*8157*6713551325232*7*4464*91*626697552975188373838*2922562875693483*415946931426248759142441284639536717*5363417827*26**376*77583571538326711538786*972539*64737214185458267718246*758742198*547*213149*787172325381175854564688753466578417167**21*371643277189983668*59***691567*86562679879*6*1969*44**9*2347173*974541936771522247165933521*893914915271318645*566**997669236646367812935468*8288262777786*9*1*923*14925471*71*8583132845686824763227577588*1635192*2*727287*6143*788*55613*2821659199*294*9621446974774983546784283276985378121*64*7*823136336452598499789875927431*172894761211734132773584279416838256587281*6735*7**676*91294*4*823263277696*3294657372**39711142739467968*46118*7593186612421748274285742662698122246192655974151781*7**489*3678633423374877829*22611973647*7*97328823*92494*47177*8476828884124**138*6383389*22844591361257728*8519824879*22885497656211*86866122*47*22*4889647*688686*14*178252*656263896975*65338559*356995971671594856667*4213312*553554691774*155973963214736979551585611967966873175636358*612583996195812683*53863317964*14**5*98278*72366789592417998931375217*6*2976791*41*7748*47396595299**8123*2213231524613*117821687*1537454637224*8*7586*148222932282672551466*88*27*9772*3217767*91162996227371*43684153*968*9648625511747183982479*5142454836982924854848*96133114822272*23866271468195851645344823454*5*1*25323433**887*133*32741643644*272511*28*75*5267545*596228416*74182346738645177*935*6429516*5554585525331417*1*378*19277212514669356*181955169634*7844*61237597*52*119142485347846*1265196***776*7857*35754351726798429922916327265123413478*431291152664529*89123525145681374*764832*54393639353213445258671325949*7454685162938198125674864153343655378328494845369*252*9982*64*9**1194465343461234497873968587642*6*2418632524142*22832214722584888737335489*96221846646199685794*89211131365979339214*52*75*32*66852575551*5*117953745192448479665814378552618446999955614*217736858767829453**9181992466*796*42319341*1291*2736241679617165564*349*49*735691*66689251442*5*61*769143829438664571613*472558*945749575136386*88*278*731461463*1325*339663*4995495155*916*18736**7863967167*663*9163*52816842669694917312686629994*36*9961186*6952317594*989282396337796479645589587817*3592426382493888*66187623918*5136855*8455455*21751*757*2513*79198454994118761974575*62848*17837*49**71**5769892519237*7368*55945548769592*991749861334*4*29672974853245233423887267*18*9*647667512562743359677455931165*382198*17316769921982387276*35653952*4*47*916762799744542518394464463289*8978979**2821182881*55543296*3737*383966*252553***268827999565276475561*8192513396459*2*766983377363*954352595879*9487*3*3*5684631268466598386649*16212715*2922*481773*29169486189582*12242119781264*36826618363*3247843198613669991454919517*82256421617367*492716119192*748669692*5*5322829449992214264443434373*216*983859899762327979459754*233973417321639*974624*7817225*1*763*94*24*26273267327865493698575168492955*943284972454743498582742848344317459241557*5931144191*98599794171*162234*585221682824231743*2949*691865587698959684842581558*3*173*9137188989*43464579564658173*2668234856635437*83*51389516*22453*543*315*389244675215964855*965137986786257215529822255*2223*81326551584*85*6*2*64321417*14872262941998758153*5878237489873291536364542222863559*696215725548*67656482463759732534955*859*56421*178175*65*59*3*82997*23181151712363*2*1577524311*842295248*517845***4277982494397318249*1146*79343646*47745243*286**7*82*48*432147988641336*3937987422661692421199716751*4118231224726524724714642752589*2887642771397462192197278916*5233989528388665179*5*822511323895998927242*44*48933*836*8793787995*91439572*5883978291446*9881*74488*4132*48361386*886781758873422185349826213552*42787362986*595653121*7263885814142238131983416788*338481254153179274493773*74567**287767*839626824212733978918111837158698834*1725988935*7769726114489645123277976119352*2*96464893716*1786496229294652897522263846433*576482**886*562245395637*437*18884428*2766668*6873*8*9728985436816157648316566251169*4884*4956169628946*492*16979*62756126824248116154744*23*73327611546568331*181195481781*9378*45612319784893983**7*1642612887574318934521534*5725935996889**94717637739515788925*2795694*363243979*127*18255382*16744199136595794**815296365595686653638723382396696198*1*3837444*84348918775871*54973612878*19278314*36496*5284181**19245864*4649*9673638477579344677*132*969527377264288*6459735713581*98661595674*819*7874361*95532882*586*1*331591518461685112331227388739993**4334439*99433785797571254*76936*35389925*6122542*78893379193775332342948*8278*227341*2321923883445147*98523111355598448*72*5894**2933223*4413*28**93182143798865*773498163282584871193546317837574598342*14259*18*43*79427697372**5993*1799957619525464263939463*4*85326378541321276*1612529451591367672*315744925444956812192*42159629*16864*85*943*86795634991*38*8579*4178566928435428224654282769498311554*97931**5*1272*32*34578*3686274224*3977272743147663791853423586974112185876465859*528*532173119239882532569**388412537*37*526745596146923652*9167814274849499*3848563**2732816165727164721938819873*8671366622462458185322371821152185*987383293939865737*126*15*11324585439*81159145514275*5577931264227455*74749*54412846*196111959*73392697999438*1*551***26674239411*883*134*676491274874*268764673931*6*69726993*49735634399123589187*28749867*3229*12833113234*411376993476*93*847119*12133*7198*217862436324345645488174147*8464139*23*888188373182852961346923639*482519836442599913967*62917988658*367522*2387*54237118754745***4562122*497*31565*3*9945719387871*475*95*9*69476*48*8737*33836*788281733348215142324171311547616741322629*26517385546975567376756*289839837931882*5368733925712742446419429661753938*79995711271254128471952151794688756*2491822826255*6266273*5721*68321965638546824774111348545972*922961887376619*688453934564*65844959422532798831727614*94*735*876811117*5*6846*3311934156151327989578587696157*56714*28*81863*74382115969*2641174*81361561*9*26*127349265134338481*45493982*1938986642239142*6646*85971394779843362*5887369*5658*1818974882573*4253231*49*51694483267*39168648746221753192799*7*2767438*7132597929271848275339985673394276*888343*991744689947**678*39**4784493664239272199*2****46432*636487324326577*77*83272611*351449394*617257293*35*72283155795*462377523192437*8275857969262911*968*44754796822627*14627*497599551*1252891367343153543*494227797349*35*784352715*394*3679869948835166*542*3497165462145*2999142623257*772913732469*78684331787*8117982888176372219995883787938544363*43582413472333*661482236189*457*8774**64*517497377475*324979831*198662*7198*269212*3761*4963134854254831251346455595*63232752*94169*9645853564548*4848735671*348595754384*2758989476*14*2*468961775*692119569167782*62768*56*95539258288384533145248697*58963698931937415156861*84364847953*8*2*66598331346*64288564653476234461*75415371716188536*12442*95**51*4924*5483378361*1933674362619**12288591759648225*33*868179718*8697*8*88454954898*263*2*244*484754*7*43*99397316*89539865255129823833342257*978557716711429746811*83*587767641*29*638726**74371*1413263681756492991329948669*953449*257487727841*671894714592568656992*755989*4736664861256*2*721475184586711657551*415*59*258863593472393912894274471*643412187836839576*534483*1477*71354341313379532*44114244*199475735242736*4*927557374695286142676*27**89*6922821297214854816*5423*92*35862*8165665841*75684*4*65*88848839681297522535116537292*876*5895831*218888787232199514*32273794775134554479357*6292711*9971*278*913682295397459*35*35448313*39*251228944996242*56113*83*55945*2615485*9955198723829*893*85793*349552*66845**93*1452679778863882*7676855*7569*599389425156793376172*389879835**26*3*45859*1*1248653152*682534*1314917236156319239274428786661683285*49448435243961***9*959*572*1171395*4972899976966771*67363592795798*52117772*78798*17*3*2771*5663*7715*867467*7375288573912674348753739*374914*3632*191*959*3151*813*1766721591769125993*63348643*965466779686786742347*561121*1629538172**735678444588*1452868971*499268*61842771669443225*458*984*22755236962*5619*899778*4815954797861852968*949298497894139152983*2892931987*57*528**7*3752777986334866697571974*7516146275567714*9*8498316943*5471434375625656*742567475167427667186*783849795947228943**39785942317*633342*459*4829398*51428*44713923*8278196*4432*9*2552512558377879714161898334*4*44912*493769987699558846**193416458*3*7561598341*75*96*4149**1978295244361424215557*9757*9814661617686648838743143959*1431*327786752579281899432*9551738724116**3573846541*535335894593285367919436599714424157579273266635*89196281234121*842715181522955*74*1887851*981*8751*627223*427261*96*735267*1241838577385932197476*5355326171943165545*6331691471287422466*3488543547342844951*2466*22*99878893278127124524155236*596*849111525*2349594423**9711355452*9913752***75741211924147485741*74*7618338864712946514199259739713125565*1244775**1*5*442473271188146*7696267673*9*438998*671418*81**5286282176179*95*5659328488354543*2*818147*1374459471247338*8369*6985*922938*427343962189179462693455121718277379287917688229559233*5846411671*97917489*1218491*7*79*7157*8429295*85679588735185*2*8659628859131548*3*597788512116*6858128614*86134921392452454969993518*1877**591415235*57441581334264*4316436246729154471682924286192*572*86756173326277259472771738563694759*61191445*222274931844529818*838272168891374798454596*176391411289968*53217221311138168124373*662*33775829988*113*8717259*1796219613*164838936842161615288*8*15621225*8911449735479*9972151988*7**28218*5237986171544942697362761876536872626*9*815979192678*524714429884558823*5*9156*3913*9*898315265824334978995522947*76867*968289466*89172619389*1*7353*2271691691279689639934862291*2692772*3424*52*7*86*4783191**146552615321965437995485818*7662153*17*373433353435741466242889638*754*218*25*85644*25641265852*49732525997611558324823*8459433526**293227115*6628932*67936991895494527159793454275487745466893*63428*453186369118*9214**8725*55239449**1997615**65729*3*814*82397815*5313865*47784117*2*731331212*2162*378229*7264*9359*193119148439546613972985*269529**2*6915675628475**93*31*29398*71899*8991835689*66292*31*7351727782129144*354937*5615*4459431**5*75752274*2***42388936387796*581938612763984213491677431*71178361*819549457714874966863783867**12*99862469423681386738383*278575575817651*8112474835748538225*6196912235249985223748337797327188*22261216675285894*72986363419996275*41372976*577943111*8659659*71**12688628581599148382336*79641*2481137916194175568537755135845*8195727655453845826*273*45467281812521299941*5*9*33269593*63956335691*284212*51855558518*88*9*45836418*42*51*497441734*6*56949267*397357666*9351623*4137*8243*531565637266721*2747852558346378*97241*22328766741455271849221*262563174157*8*4332485178593878*4*12738*4851814271474947419118*24141*93815861359*792318937626274*799345983868*574324*3548187245**5818484142*48118**487466*128362941879231874331482129588424529684958832617744618397848377416534591*542914129*488222854427*16382426775547*668139765157463**63669*119*68788181228953275433*3392657277883494*81846885*745*34733683*7843544*2347*2*535516*948387*4284539**75969195198796*81436291511*584514659283*489568298*91*77578145278141178649249344168723374795439*669444*44589815*69*1*49*99735164*693397521169821*747798*327**122718456535*3475737461813*752749877839277755398481932321*1521*439283529*487266193*22255785716266531813397948*139158981*431112322433*68494291*82312*32328964293531128*745*86*35639631773283567916782*8*62382*173*4117627*63381327655*9992*195394211691334*9391719*3543***7*95184423938*9645317*122*67251*779332649647461437614623*6*9725*11281*29869526549195568245*22**94365291253867279329229*98583161891*16*99564711221512456*94798587*4133374589549153535869369*5*6583138112643773551927659893765568246417427735648*72*73889195*623387**85759156827184957671*567881232414442467145373436771912*617132637931223266732553963321*69**32839242995418892934965527*46549842615*594*63734195222189348112565578*4*3454161156112*9123*98*8*795*885675631*15*74434*3**4971*81853932716527*394669***6212969855*1685354963*5433678965458*984168813*9882*43954144944781939856863*1499498611375742782224858141193239163547*2817594*27289*64*755988*9478*75497138129*1369*526*1*65*61444*328323***917*7144167576493263363264542699319188145*9996*49712*3*644**6886931*339597399678*169*1298645*7518869121**891*5473563**3776885254434115712*38943722189345154**99235*32*57432764394597379375295**3136832741136261545576*8517172989937*332311888598379748*3562356*199917531956172882192583*27998674899372712*1*6612874*8*95644679596411*62*3685*75262854*28*9846*59***526548679191734197824*48561889583553732559441*4168*4*733*369**916*9657867127**225688*9*335945*66816297*73*363**538879*26*71*975994668679987*64*8315*86392473229174**9799*315987716*72468792716486*836*21661**61448*639**8**353666764456656391579741442972293*91445*84*642889*95617331248551348572444537776965*9115416387667817*54118811753145296*82*33821756*51271*66724693346761734*41744252172*8*611454*77675474941767**99653172988683197*4375734*732968*7514*674*4862496122*321747756612256527942*44744143*6*11967554545386271*47273367***9392*38688979*256838312373946192641983778826316142223*982339868941687*2*27499221*785478*3797413944114638*68*55563948*1422*115288616525*577821*98476*218123*3978*2*598*7465661*357317*754939847378885*632*42*6389714221457567911*91548952817234*7781734395215391868*977*92169665418166667186252**469274729382565762398455164**56412221471*2347*1534748*8166674282973198992278953258*5*6379388777131*6192*946891**97592337711758716932278692*125*954*955673685695183477472115*1393345254896163679364*594952387629142278*456448211*4113826145328*28835515235494919*67156932*9*977*2674922657*82678459635*3*768422275838555*325*562612*147928*19628177*96829553881*35143188*32497489634357622466493745756239346455*36*6259*4836827615375473454583213252777361387685*23429796461363215358*319*615616949*9965921*569995*516168162187786394766684458**97261656665**19**56598978*3422823922668*41276*54468345781158*3575462741697388976*6248*11772431*342378858564495866614525118*995622*18*97658*2928*94822258378887527*2381366237296*1342351783814495*995538*9492677923161462457*9157383*93*596478177*65*383766381*1376722882*122243821**87*8834354611*36641658589*57881*424192814*1439779524897*46481363**63661973815243189497229367443974*97815829387551151593923829389439396*6494*3948945386326*3265396*94289*7*347934343847672251881254194581191*6239283366459248389927681*8757738**931369237284674*319127*784375*771829754958345842744*648*1983853145918315*26559*953451687*2665379319*92419695343*7*21747877932*26**16614393*2*7*2834236**25*69422374197669828839*87*46584*32598153*141*64*5441*49267515246356795*754*2798*25681512274628262*615**73749177711917671563*4733747896111**9*84351657*1837621547*3913425221*96278*584158611**33*2*56114849531259743*75*51519*72258815232673389964441966499334279317621154596391122*7*581*1776*25924796311376492*677777141838718535895*6759733412434582*639812419511948472*55177725181699777435913128539**1814476562234676774681*723353*9*352*2967*1744169861143851**762*6*59*351*36631265758*9595584725**233393898146951871538637566677123*791817931876896319919*91175124436277249835717693*573518727161*613874*3398475*7*6659156683268788936741184*34369*771421667954678583*6625**2715166*64333481449951828766568131*87448*8279211156639484355*2355*21*668693954116968951177*2533252*883953117958386*31412119233638134147711427493222562378*354765927927458747878*777325*3992284852582818589582217*5399**89418*6347*823685479928784363964**74212648*13488581752*95**2654171751*35*28581358221715686*494*6468915195549*12856*1*5877*751515*8731567559763427483143684794*9*851617422618326715485777191324866137171473117*3471428617776278318*617423*93198717669596255972725926119371736416*11635*21952384726278694356721276763*1285435329346267685114935685258635324*7462*416749*5*96883**82322735939247874143*237161257275185524677619**655844167714*95382768342*4542412269685463293*48*57*42575648843963554*8966397469*3633859983834991163535*555494873599829249*9341*7175154972131*193*66536596729237135395758812874*66512*455649815*65813912*13922145*339148*6139572268751531*31*9696579341658361617386875673323689856535637356139372*443**64158381*1217535737*4158996*697683973144*634498547576298438**269992887978966447158*97*385372*5*7*24162*1547*253497862*2517*728788798277832293643*18278*3854893762631984893369527*42416756179*6687329*14162436271959665885*6*2*47*783*4888939*44128955525523864*5448614*2893232973924379**76822776968397956776582559967712299596319**6256733268641*9724924*4389*19177*97196*4159*372438*947*769718735*1*48*886918687178*1863656*716936612772116642445687*1325436147964471*558931*814*9863914*1922*523151384334593445299184645*447515181436713934566*13*39537346614533796793174*51159736*6*95*46866131779846298*2*1*54868946167876325919463*787*8*89366724*5551136696173868613918642887817178186821136232333332594819625768125*3938884212*55*9794375*76161223984531581173485964213753955676393*321275972612436888885*397*1863735269484474781976669987982**3248423687971*867914536988789**6*9687414373425391494837522297*8372666772468511121768*83*58*17588981715723628856*519486*3294417*656434296*39*7*892615*2354499*68867855*521317675194777*28881696483225485868984816*76457*8691*39999327*67*773637449494121*82*26181135453585*1417*14*962*77261216*8*21832*7885182975576*1133*88373196*3173364889*8**3362751593878*75171385238685196527918665467634*5119428788754663599784564*4574843715381536*8239629454899415**633511*4873*97*3159**533888674564**24*42386965427181958*9*458776*23749199*63*288157373561426675315692593176153*85765*943746988*9544382848248858*56394*911587916**752515745*534*791995556*716*289766*48*6427581354151796819998*526*856*39*1*6*97196*832361*897394*192552785326166365197167941785811978888*367167*874599**2723584*463*6*8542312534885825344855443331875197489*36421335688537762*328579159658965741564698685*687366193467*92561*436*468*1888177927985663646871*282955*2912*1276315296358*1**471*52749886262553538*9285857857*5868474212487*11*86*5646641*81853643938*875935462492*45*133*82*9*55898391*34172*12537*1*62879*556*516477*1621*637217827*985552476466286233*67*95473538585*34865951922*7*42918353**647885719741*37314891*6984376513233972959*8558497385395*152155323*8*289355*582*1*74279611149*47436*31*532249961785*86*3255*36488*561811746*394547*974317717*33*322457197828**49*44294865697134319*78114168*81*6626*77453829*3*73829561*6915993679713952*3434372813723*3*658*5391*35817458*97*6384469113371598891775*9567554825156937574*634*3615*555958362327367772*6186569324571349916942*5747917*12*27324*7794775251664975818776*3385379322444*5346382*218441127555*571*8349*38**3*478243*44666*99475*62422352578572974764288694127765**547318414848179633*3813551225928*7151157*5392151*28*7*28*58458818558348594738*98867*32793615415453127579513*86*9*8788139844937476514491429*77963155667716*653612362865719398951567215834538112243487646532254698264951825*565611949175864*11775237*565885768829899196524238424461635694938*242953959613712215*727*911122112914*78*761131*285217861543*14292*52561929*469*19663798691818**62374577996**374822139112*9938839373949259456599597865*4694*34*62452537*16739395574234287*91769678878956465722**77444774298161326798498745578*6377328333465586679129998**394424*29829778*86385493*55415*943768955583528978531935819291*9997881349*1919**9**47*989231347*4295654661573*637635685126*935272967193281233482776394**697512162*8*49771431652416239729618677697693*94*8753796347754*7422*466155349473165497558353232*7796813534974552642945567184*683*699189275986825366*1936*4495*548579127117592*655196185546783442878*4186844936759382611*77*4678219586*323558711546122*59**292*93817265*319562144*7341*751972*91236*75978*8*3**649268869221237282216359498778929*49256737445725*9*84234727281748*741483**576*3156431*8*8886*521553911573259969797578*845153674341763561843878*1911343592*348157268726142*819992776493*228614384744*553*312632*6*1228*3945*411496379653488*965498278446*82623859887615219245524141147865714397916*927487389*1631178168867917334432692642896111*6817679116592*26*84277246292711196*9845438156*415324*357688358591335247763*1416614459963*45765142117356559529*17952412929832775637*998*165774311*4523169955712928829859214321*6372*158*53915151763789366649*5639562244128754*3812*28764288799355*92*429*711334686337657988**271247987686778823351768558*926813969*358737127449423268595399368947737*88831518*18466*1265*36918884471*494812*1314225578676113754863197938349*454*691775626692937314**538*8*83*1112*99797976*62399152659361592694274575248*84755211426967364*5623264522158398989*946*658*8844**74*82363424449971679*9322739*6*4813251568298867325133975426651389469434767*2744792963368*5*68568*41*3527215873367*72*95771989928938848475*53196253*42793335615*5755781*1*137*99*391561622334757187633*535513228397*17867745183469477834352483862321532365**1896877823119*696493853**321491*78688238583279*11113*631723879*586797561755982638684959143351387*868*52549*1175449654**2*868*3335947458776441566*6362952249244287135651582152724654778228722583541231*3*693*3*8569791*1428494*245*2*412357647117*6222*4*453699537458958*753*1355211323746*96523845*7**1*75598*3**2259*18*8588*32442*3*73859233327746272568164*564941656549773741222*614692657*664574239839111*22484387*56746445628*4121777941161862819463216*2431*613969665*9221122413581379212841791671633426636267512416741755*7168*22943195221278953377422362*95253371191649183*218743541812*6121145418*28269894*7374274*186468**3773663*2278349888*6158718*264*7781739628967***72214338333857*9771*4366545771569289668426322545*17355556822631587432258823*32679*6124485*4984828443116**675874**41*35*6448*89447993886*791*19781365899529933*286*8541842*83499232719688*6444*23456*13736*62655155847958*54421*8561857387218693948*57436957952394157856*2258*629558**47752879953*5*66578383265329435*7116*3241328891348821998*72458326258**8524198241799476447993638453*8*527584462*6653614111465359493169***44997425213973543318426421*21*1118588157694179**39987728*4289752*832***4988783765*6*4755493633878*4592496156*37*91695196*17719758364*22819898711973265652595612848*3641344*21169712185987513664247739176161891457816684*1893*6*4*882*4243*84996467575118267341*7896**7*3213**73256539721714629548724614332717*4355**1*8348195645265*475*14981356*22*485*721698586*12*872963524*18216239721983*19786*8*69335*38821*289*7233386552362127538672*23*755*243*7846315347195*3859432389*24876486*1469543*95634*5525436354226342737717**459119516*1677741919842998935*29*691455811479954*1*41359163563333612859957529279111594312914392186984548726178583*55725954*273*536155149378*79597*98866241869*252225876*35111*196587*727*7692*8446186356987899521886729*3512*9*869419*56841764*73453998*29994*651*13584219*68638*73*3345778129726943442363257399**8*2345693247488326262357317*42*7941616735*9356*69118*39592*149698396*265*1894*43*51383488*5778169882*9*233534967256276676656193647781997314*4172*793*8338**293346449525664779*14148211*56794429*91281632*6*651859256467*2613*845695957368658*9779598*6526355815*46688459136528866744758*678819528871846*4552178326*85**35528376*7*16*31589*214951*417*23991518*5337614*68751*793142441876*14374*432*991*3842*9*8577*8377423523912619*362876133*418548648453*5852611299*9411732664346365987*232512162984*2575*5374529*882566826*123716*43576*873931971441254887786437*7*4447765797998*513*7*81144*1568755334*71254*217648247*687712852417125*67*31477891*9*6296771979891*4*9259874116561921*3194537159*865524476331945288*5172449886574464294*8827**5769*9*87*55994222736571178657*4271939734381435828629776*37735669149293185124418246323273421311868575293756*31449668447268*48841*273*16*556659736*9*34474862926*97247313*978**57281356989*48584846135233*7214437553792377186542**115485746584258*25748936883*6*4748*93864756*42788488653151436*6989436134167*35298313475823576664456297653323791*599*61998511*8776832282127846737255*54317379921*37422658284665257395*1844696597639989141346**65*573524*1752*2412787*98384777*93482*853*82834*84389297331*76*6*918155744462293491571**4868241384423868932*159682757*99664288842722*5458**2259445*65776257396445955736627343175725861744182*18***6728*91153135487348829585187913717787315945*688*372*645918823484575149117366337178457477616354843821345*166885135847*4148915999721213***631584911722442497296742754487541246*2*36787834*7445*5*85*91845145719897*651169*9273*289893*73875*53817*9718757**75725*13195*9515863*6*88419*81159*1*786446125772372159526*8422**9*99622212949142753313117565*971655*742569*7198738292658527763773979348*747*8381885132449999796174177894761318698799325*98574696*38*66*858817581159*9234554*91*657646*96583*68755*3517443*815472214261176843*44*25399*796994471435915968424193361321443514434786*42584595*464*584**973596749694372*85678*5*1511*986984591*269426425879936*538159932*4499*366338771462*273237919353513459731587255868853551921634295393*2988317428645797*56839185611*6647737*7*383*6488567**63*515561434467834*15346514859957791913*82911887643931621*243*68181841224876732828828*48169198386987454274891836*713*55*821555*91253347*1129847178365392*1157983375362*422363837679*2582392849797161446*5381*46254674996*6648*19198896124618*599*9574124457849862*549494997577782489954452847*6993346956*34743*4455249*5773916635*9416688616548484*6242*311274131816295787161157*2635619984842147243**1*1324419954567926574917684538*23528724*614*61313892647177382*1295914779*44838*7*86754553959839856152*34697548231*1352683563843784556465614273962834*581179354*7*11242851344548495414*37746943378571572916*345971222727921*9623146527721*945*996*92937*2891493184692324487736**317573632447667857869*343114699473222657561421*694114*6762623788823533*63*967139714329537*24*18966782512*121119*66166483144358364368*617243767673169953957*2*2*438*94618264624*469254926891645242796252895615*4525*428*11846218944911359914*85975659472*12626169271*9519458923157121*911379731845*613586496266222312625352219388*33689853966*89276138444499*85126983863*878958*48251332*57793142**6652921588*5461129252517855775357941246371339983559*65558486*22598581*4411491149963154*99*33*6168*27517853198144684647718*1974584355423448*25845*8778666461241836361833378446555*375*3273198253433584889949196485777984594993423299758965223285268*8735*32474879246962594*218621136657631269*3238*2655512259841193981977422*8752*28667747*8432*414231279**541769675515185956*263241791636482253*9537997*3556*417*5321381361214534382622977421587234223249614*72*267554718294777553*956515775*31*278168126216371546628674567*82379*919185428*795*8*9342589525845397155839924167**74141393734*322113*7229*6225523*1*156213783*6*494288338871356218866*863852**5865923*45959745*973*744155871*6**9998111771535627921927729745586*64793719*9664613687197785839383233415596*715213791677*22*7446597*21875696141852229877*99662858554341584*4251396463876346951*27*6413383675*6*214548438572725529*29*748473114653*813695971*634939629451632*4962**3948399*25*288815347376334391322787**4368699862652532853886787743*2657*93*471893**378*1186421573*166649*15441*275*3552915*71376392276145179154*78*7*2*6885288*352592*4132*793195*8316432599141*76989973532591252677296495269248787839682365939686*6**2722476*41223585886721*5*1*18*81134547269161483586485*132461958*2*32698396*5746282579964*65*8114383465923734855**66*7571*83396912362682974536939829523832677177613328722*46665**1*296563*385295493*434*6486978642*251552841819664638643491531*245451993361571711139*625744542696*1**269521537879763*32864925*9*257727859*282735374*91244614368*98553325*29*427*245*2427*7443*3237877834864562758*4888*17626449*8*44567936*3546*6*133349318*7189**861847853131698*32574935143384772861692132*3*811613862651781762882836989224898245962771469*87792394*7633776*95768786*842854429353798*5*48*65397882821621952493625438259819**917152*68648772437*54534*2246622764667**65577*24515*31931**48936*7763284*899859237361633122*5525246528795**8672275923192*147518553613827188497715*43*9459466478***8995*11**229*4124544*993786665793*968*16363797385599333957718*39*6651693319979257*8683287*46852*1645*8283594211719*3166246223754197225*978*26146949335*97*572*65*5239228512968313*2358**434264955893*5957*9584278823*319536541815*25619938*2641833379*918338589227288132233*7672228661488578242867458211***478862949113678897394794757128987229*138*72218*58148196695866*2532*3181898179282687828*568*3337316551***653984*7**136***46497*9813992814767113664147496161515421*251297*971325166*5*855661567153643*99383515225618375814489131*2853*16328418818387*1627*4623*6918183394996587174837*798491964626551639616*53143867995675322585376*12749735161862**221768548391785793138*91756731*6749413*4212571*18484452*43957968*26293441334784137818721*4*85*722*62968274*8121242*77664*926*169936*3339**372488581383426549*121955384858582*7498679693*77778646492915226673793519769576*866549246163455698488315*23*231456544884827765*1*314*72254623*11475*93853886*2957*2*6747679*7159*1937*748*34625819595838817429261*4619914259838553*8583*139361255666252193443779769*87861123462113963*1471293*7915641156496852724*86*7*3617*12373*184946*8*81361396386232229999*513**4933824*6*3*77*4867989372218566*9148*5*2735291267241156793221336769615318848216121346256225126862298333896381867712*85***879391862*786895996*1*4295426783431856551414182*86277585392456542*2588*458168116853*1331557*391463565*313325959782228625431299*23397847287912455422742262*83828*439*47731437534*363246295555567*69*25646*874389*6675585649234*612384253833*143845458381472488*814*37155828588593*39814879999519987927772194915398*35955541935173914691*33112828747*4*94765*296349759*17*359911769582994*7932*2665*543717665**623165298*658*21268324576466*93*32622826448848*1718761787884*693361493987486*986*34736225*72598429*5*1*3*6915488794483611543963414*88233569*638569681**4122*85216*137*97737558*48239*7822*2839961715188*812*2344687294987*28*186*628*494458993*77455851671962168683513*236914*9536946792864145717266366246876*483*47487224152342982657*8636*91766143151313997*926256*64837687982561878481*38214575364569998557895*773835146*8*4811672348*888558511971595892*94*4556*6498162*7663997892117482721*4*828811*2923315982736426967**225734786148185298*825969189*9*929825853146861**763913396*7785564426638956*28*7379*78632**18732868*97721742966*41678183775548779252835966237637211*39513597977469873764897*4414157*838661874**53***435778621818744*766*8785357535162345353144*89685471848726545841962457953559263677983552*54163415346*193879334213756265*3169*7216*91314386617664142844392358*271349*991359478*87**8475936*6584473321271*4759679567*883**6916544973447424*8*64963*8173753**646568893681854756*221823*44551537829766416494688992219*32975329*9*333571595589754436*5352891181*674842721*99198732175598412122*888377678773343533615526576*743159748933632951646392468955*93195349839492784*16295*866527836748521612672545292725*773263*5531**3*1553278122555435*528878*912131173*3654697176184*3776*5419*6599656826*4*1816*965*888466163812475433831831459421*83497781435843*393438447966122146521*775*4*74289169*621*71*5331144757*2378396743*6585595**8446381711749924637682837645858*1*253733*83254241267976144452697***28273365*583252927582594318658**6628*88881264783*286726797638965933472223**4862255948*31746138873521836591157748339*86951386**719866243895792*44568191*355645459*9277454**4872*671437458964152346**6699362344866938516742566162786932299416*35*23*3**9485*381498*2448539*8*9*28777236686956514364*4312593715718*791827461383822593*33*548987**1156*47819*4426649646*221868511*37358665*8512287137714*73251752119374**748378144*8885946764*87184187714899224*6564167554439631122665826156272*484971171*699191557**9*197989754*673*1*531*4134343623*85547353*7462121843228*46319513973414*225231321562*8632783317645978399462297789635*52*4357253*83*12*32951519165397*233711876355572825*896*992335287155173*8*62545*1*2**1734842*6252113*18*44334*13*682155889678144685322296419727*27997856899*3585643131278747995119954176614957*35291339434*24626*1*65*25641589693764257483*266545949*1975928482773258389679258353578*87172*548448865*8138*763318*544127584199*368*728399638986263**3827396224822495354737956392861946526246167162*7*3739357**56948938968*5378411*929214487988874*27*798934597*7528*82685***95655778265729*581784228341*31543669114**91215*529325**98*5285374655537788435614538**32676997319182*39*59888444482147562653*738*6*6*699*914611897153797448194*5682798128*859371*845759539814625154135259*223719885**158554884375942748617*7666136*3****1666174933715*769*9862548142912*9772*231916133199*6767317768371647818*395733735366859*75575586*368637*22815198567147873*444855796445*772*47*632943275974*7238464354338858485944*43493**5811*4244929*52*1363679716*61*8*82952547317122*618246*3*2*57*791789825296*86786427266194492*84981293*658419723291531661245143418748759124838562195*865688455756137196698767293812122699615951563*28555929*2383494286*2928246863415*99921*9951753322362*3**22788*61957376631746999533976955*593897588*116*3837*161*4594933*64965164*2*21*2146531***911775*478619*6519272834*16194214*2394628923956*5469968438997848595174894329481888861*377398*868981644668*835835726957762169529955531194673851764293*8726*6367*4*882*8685*96**4135987375864672244*28496979*96*2572197843321249443*5179*6453637*367547*779695732937824241288746471394674995562615746348642567*7*68341147646*89897*9581297618942752*78637*317652413681*3235967524*939*31928*4144571*713*112*8749696254*4*6383416491885894996*38*1*164*52435277645**1753654*32245267665473325875448*254786*582749489*17*28352155452755*87*96429*3568262**4251*92746992*7**376**665661295816888431848441885497596*6481*1*9978739778*96252**15348488473824361668887726487226936439568883994628*817664451588522229143*6258883773386546*831597633736*5****6711442541273459426432475795262565*8992117246*235152871*82544235*867262897774571*7247769936235*3696768796757599375738116659*3752418289*266516951513*4685267*5*988518667654818*716825855837688569*152**5*698877874688851*3275*551691889949165854746538935626*9*81849336147*4*2*6346627578884586*49439312*3412*13941125859*9158429123*2546*84354696227628*46458332674333511238483286797*77749*726425132172361456445279498*5716851656*8522729747944*499155844*5*557*354299368*161539374783971832714183624474**243557295175999637664822448953*47418795847812*938*881268781*893569384428664*65*37675*2724*2525129494*41356*4357684683735374483*19435733278181612239714*2**1224468441565561322459822649*463*1792*53481467572573314894689*88927415*1*216334193*5856957136713357196428434752874336962117*9779295*6243587*198*66*7736387837537481*3264236679427*8*7873855572*2763*64*91*3948219*94**2*7296*6998488167677*946598182556461499*822*158*728976715697276255539344537425991*911257722161362847495657173599372798826942956*24899955221814432246752446442125854258476956926*9369484871462**622289596831929258354*113428133964943*8*877164*25113979967587**8713615967846925719863141*312423432165586873181275546567677771686294*661*388896*987973814*859897*33889*219563149982689232883459417123639883237784*156621919673554175286886*35423146*71*6148614*424863528725355748946**6239467547*538*599*456716612758595575941147934998276825634359131137*9464*63148159481787*3*778462461183697754461931169333418*66*438581325299**3877123449*65687326862416614124317*68728*1829*83*427529185247713519346678554158*529962768*4271746**13722858437435781789417682883416235547697661**47665953*641543945*5862**7398219*3666512**8232*78594585*38*359547463427463173123656137126*481765*92*749416*21186*4912238446332681541*256182745*367633598236579384982976*9922682436522*577*24828664*5418326324161717***8266391*93*3*9*512551326455884788*77187*53771636322941**5481633267299*888418312785322*246*7*34*18226268*53397641866*5413251262216447*5*22745922*1889768*68514349232*51736377485244153168387377661853946*524936*5*92241464742311197612394*228*556*439772**11235543237379499814*6*8288**72581611*9786893884*7719337993613186272813794184337975*99*954558978*323798318*477964234734255438824953*44588326637274*44*45264391*452*4489256293487*19231278918244236146348635324493711537114662616672187*87394*24*895254167248772882755617231*3*3*36753188458421*389261*5553151*4687476929**41283821453268294448716566231376161669533428162341772919673*3619443456759743433889794565621791697732*169*149741349786987322161938*3217773361258583253647*286626*2765638163*7262*629*9655*236426767395968574927188118578*344*34798274*9788*5951455*67713337*3*5682142251635458*4254833311829*84*762*2566331713*953675415794681171938927*5757186925*7*2734279777571943775373389*74145*9549918938*496*1*68*28511337153557946*6*7*148631345142855245225*566491231124546*148328917652568**267*5349*22789378221353672952344344425999*93322*47649559359989845938735*829536926993**3612815847563199826687*171184*85*36397637*136448*68612224721969625995351215116726*81*1265751*99***1344836122*95527998244*83*659715221218642*4*34339431611744*99282346*282658157568*4547382375*718699868474777772**392412778665375*8455*987117*913194578*237*5311126*1383443*76556494*7918687697848919463273385677757222466193*36569*93293*3141792*243836626841148448343734322237458**992*9*655573115658*3347287449845*328486748*2787196698299999875*39241175172142*17*352115691918216329946*92325966*2535551499*385318567579**2182489994278946**17845553947846*57467232199*641**4*12866**63651759*974169216*75299913965*36547*482772917439*59543876993741119515**3149***94*348*539*497581527*832326*274*9148616*1346898512*5387556638127443*7638888*219414*29481437*733666*762182755*96*9987845*7934321593768**33**9845655348488719841282347785631*85628*17762857866637*7217586*6182321143*49841986417*4414269548*191*4359787*7563122*6248523945282297**4991857659259**15*78679112183827787*776482391841487872342614333457564*582946971574787278991198516*64255*554*57839298*46156875773*685***372782586648515239871365845224467893829736*21483257**347747**411*98758438638**9233435527*491918342*43*2679*9518716438747*7*8154328*3599*9944844167281516366977885859229*52*86*4264174942638424742766669168665*46265664624225964892**8267685913398691919529*667**25248429289*5598856*95446*1838629*56823122356224*9476613144366*93891*112196382693883817686724*81966856*691351921*836234419172654**614734566834*239726649141*5*5884445357*1951747*79615*1*4835632*6492141321318213*395787*35316*583*9*318836399378614141741857719472478793127973867868387*6978288264*6957956876318273337981884173477128*843*2*217*75*7*32113454**6685285*72432*5631775165878412**419874*68295*2846184*23*8954*16*5484946962388568495439839986862892776689458*6618*4467891551573449*465116411619*31845*2124933177*39816*947543133374381*81887711383876788824*535785813237591842647269562*784811598**68*29489553275225714215156293369148194524*5*27714*49715463934*67443187*157*9523345465722***39*39*836*9314684386522724*797*116*23634725352*824279*3743*89136239275124645944219786438*27472*246833828498161832158*284*7417367228828562727969*54187463563*76697415368367124647128593*424173556154289991**72324661333*259142*222*1472*6266379*657688986243262199372577541278342*71481444988237487716214*75974885348729*65919554894**89*1*875922*48911858689*5513945*254252577377842317487787783552459*3116*615712181831577*945865318*453641*93552961396478668252753*2*65**4712152316*4154*265342594526278867262*146977343*8*786195367*72*59856652781418*9717433848589239772*1689575994*796659762*541*448*141362131295135564697722*438522261*63125837789714715*913596325461*8318357585447465649441818983119772342724771*3*668232938449*949*1731173*47713485854487358*314351*944*583573792382246**822*5*147841254771887*39657*548116867837918147*884258969*235999897**21648911573*1*138*48*2433*25128515711*496761*663*786376*7111*635373439722728628**2377*45*6438183771378**972178**674798331283649211983238963946*2456427784826886867682*97484965243813911778***2*3742718368594282351474*2484*764169864395886*52833*34676212736*327*33666459869715*1283539*5584188*25592*91329***7763715*68652631847565741442241717156925264574374325464844*4956198**841249*37342*81299824312194263*58*922**82517863237697*17389*4786914*25135*25871*36752*833671256825332523782*46*3*8176942*3837791239*26*7*54788*91622*449*79582578434933293597745*74254*5*4*14151215697942719752**93217714382362114387*83239295**5895938946137327925138453966*773**1534349624526**663412225*339*977*2*27249*2883872917192558552874*447855448*52*2795*47*84366143398*15*889*986742611977942*238*584*4958279*3155466*4128816*862*733**231718527717682611559****473*77154*58*341949176775163*28234277921324113588193495323*2*986137193*8379394218474293328639191492*7477862*6885686*86761*7928444*72134179642584*6*84222582314681744675743*94534255783*42162746*611548*978*791279968864917311615772*4891546256671*36*62*4562*7476618967389*5691972782248*81822**71698491246*889244**1193*57822952278841637**73*34434149234848*872*4917131152685577389*58489734737*7688141117766777329945*2*13857927591*998755293461676*73*522**195*887962*532**4274651*889*58923*843829238252984*8349**2759339259*45427*1723499571327*911743924522158839892319571626191259653586716997893*79913171318638969*995662438982*634883177964369294647191529697468*7558519125282*338616*7478*8826414516874171836*8486*42853261*217**3799784487366**7818854749*214*2764343896615514*214*8137**96414545159*461623756478*549*1838222732235297549*2172529726191816599*1161771384158*62879145152891*5971458957*7147821*7*67666963*867871587724411228*44899*2642158127*7975*9336178382138*3717*4623171*88861514998925943267543781292161**741*3*237921776929*263647857867335*9*39*29291218731571*9149421268*188118771343*369312529568273*1839778365*432837*9219583433445*3268518352***7**9418*445143762172222*3*7184*4713956*58278238755*94*543223*7*428714*5677187949*86697*7871725967158284*8*53215885626496728371*85699122117*382386762844*7*51314999293987433*29*8911**7****98365129911764184891335417*84983141398512*1*48542*5851**6725892*489233464656472*1*6193339*9618355176435313277514613918*22419887117745784522*785785519*3*32*2*1117763*87922781*99*284959891*18278945*7919659*7382249373273314772759*5763*71789*41412248896*686833**67163774666367**34341451295*39399683274*2567651643754458528171*9771595*5273*8229149458973384636384932382792384659*52764*77*18626845785355745*868439*65*7257224172759*1*3426844579554*57885926876593*23*58163**9415*32*69261458*2568646384538955244333923534129185949722477916*845926846716*23369163847393*596976*28429*211551*4362*6317125243*142122256386982513976**816661355*589**8236972**343**8763115871286*168999184*4*54*521886786355298125975*378242232377*6291*51247*7677*6545434338775497894613577347542248825718*49833*7494*526*775*71*3224545*5597375672822696483782**32115573646882495288462*1419418655532383736388552541399639*928372116821914385136917*262154376725*3*5*266412255**1681444133386766737972137*8988*5949811524522837568*4947951475392825261564*1978462649339772*74282371892574685*99425*47657969424963192216532*1167**869662716396897771*66227323*22947413561835734*38538264798428*284225*68561799388*292959*9468937129*7114912955552481399722551367635938647*38*3533946776*44486917*95229882*5933*8653189757479322973848356792666288**885*57681197286*2*53254*3912152*999746131*64*7215439712694348736*18627415988535629*737553818178*79981*56936817*3631673683914468855*25376289*689857879*91466*1726232744578864*2*5371759452932489523*242378763*445*33124822392*745588239159746424328*6824*94419877348*55567164558439816*811*112517831*14984156*4694**6841931347*8*28344*481251875924728185586561439713212463667158752*824*7835457295872*44553428*1825268733*376763864739483891864*118373318689*67*3111828*798*7*18522495794*4*686432*4573273321652856182*15375968*365541*5*784494*76644*41376558548398571*259729*2675862194*999325431892*16232342988564675378527*26533695548466793859163927*98996222521365*93*92915784*1394241*78624327361879758*358*619524932181671*23457574476*196527143*633989*942322883*7329262834193137286*893*56932*9725542*61342*432996436745246*6*25*1823*4448*418*8969691957168297535685684678535949479*53*635313613922448961485*48989343815*9*2458*369789161*421556131651352451612981659*74123125559287227549759146144826679514327594*9978**7398454287*2815*5461569544*8*68556886989713333763492329*7*9567*63749131176941224391553554*647*231271446*95969393778227858993643276*7*5825572918733*476*594*96386**783993*5387283162391453943433429663942478611*51543919344*472524918842157117*15277793789272*29279612266241852859675615*92956**3786572956234553774422223211***32*8424116219153*71773749673867918*9382*592113132281461668467**992929126*92866847475*6894*924583738*51356284678159367161495461*9999395171724821243879158**684752666469*226122156469*6366672793186317629934*57156794*716*333784368279144547535*71552*898143*7657896*8349*3735348783*238*4*79234547314927448168822*759467*81*815224692411791738498*932676184558334155113391973339989*6*3438*72*2739413713*1912*9488123175939157234269655787*69*65456995421979759149373149472547*79*4*175534*561381375*959761788152*93*51826894*7896671*93495925355964727*2**217884186827598368412148333*9329233663*58644*63497596156*858851279697221311348729464755485898*8942883572*1546695*7786961463577946*1751862759**2552*33939*49*346*85*396863389*3835664*7*92518639828528512837341235*57116685*482*849732*2775832477*82*4382454519*4**165*28*7546*8426649776189972723684*39934714676*7755887958557*4*997*38667419814*292*23*2932938221292*81688136154637738879369541*1223491632488487258*1511412952176*19939*7257534664711*276364641993*485347279928111472998*466472121612173135679*9923*31566159766*2525983471938425**4214674**8417191667493944*99234814631113187472618684*2618*1764627843*2662813764*639142*81*9533578*629438*9**5995864947186824488*56748287265471915377914736892654*9*1958688587198919494652891582*142*99459131897355831*49*7313923*915**833673971918553391338424346*32*863131*964929589892936542*8275**2564*69775951121**398*15223258*8*319448476*2176*9872*7799**78467575*282543119681177937*28831591*86**71613921193898444561*77*7428933913*684*52*3984967591735896937*2691688789668*85932358*1124**1326433534954341*571277279668699586528251276397178438573131451778*9*2524288414558243844483*82119319797981293312234141157244331193455892682958717884868592514613*75*716967*66*881387877935*4586*4*98*5555142195416*556974112864*418*453832*2718783581*5764199854853896483811285572428489497719256116999*9858*71167233997247553899*461519659312*7326*755*4*892429639*87384*77244133512227*8767849*58882639*279283848163431118*42*26286551*458366687676455*1453414581*72*59174*38525115156*8349845429*215927827325218662881152754813*939296886845*8115164422453476795*478*5193*4*1412971174977436966129491*4*16461226126*9859*48923875568*322*77482*3789*25286656386*2695*19*867159*77877389441572337*371276581664737*3*2812*69629848**7*44229486*337447235684624*2641*147332858*6*688298575833831382*9256*12261956392*357312553113335239*843229928443331*842812773349151381*56394*19751389513168384678**712389212845*9712*652354446947*3*198747*286327793186*8641963*57489149693*65241*4171381562*181*63158269587795*966688995982769473*664*979*465771*758*547*5191856525878497983*98696964788182397912*44*41423*46*6*8883489457*617887223834334*3166845157149459876*11737*292234519*99847149711*97546287127486619741664471*69616613619**2776437**8*78872222534*32389139585446992526*172753745*91*8*438896777863253615729252472*97132959245*5*5277**98*59*154*9587373**694595*49881523*5614*789161*692983811235841*88744111277*853371*5*4562*9798*319898677873*32498334662235*7253*23627238*3512465644*2875718759773*66175*51*935526822*29115821137*396*29617748*3*2156568322*71*1375495756587517683656**3*3545949*4889*51884*445649933681152333934*673674153753332854*954**78378*2*5945979259994*2526864445*3173*5967447*4656287379*21869*36845138846458194184321747449*418781385152*743848435599351122793394539596453465379257**15865**79648681181551289927*14199369**49*1281*9*5254197*148425*741672533*5156499484744744*88834292*5*7935217369881194798*3*38785439433*83*1*63996*9968156841426*237*58*838273617146***76149875*1495759291753822167771757677929355479564*493998965497*6236*5875256412312*514158*9775755*721515752814662389929244323516165751*8271833678363*12127777476**624367686*5277133861*3882779588315538*1591641727194613271158127742722637*3354349358854744756159499*113758599*163*375934564681*719834921345*31*7549931*5277446597**4677261933215554**391254**83758235*2129*72469666733*3*6286*8*19281447967*93845*174219825957*641916657464894656556573556847486*5*5941*211135139128415*929824*1965614935***9833226372425414551136182336*4*34292*3*718*1489245817566818969*55994977*853677**64949434587649313*1825421426126744*3*1*69*2824334*766123837863772389*7734*2*4*6653*797737377*4572543679637172427483577236*42*9831*57622953812911729211741*6*8488119333969615972**77224589594453392857844552166*74449643*994379796221879785687*42823*6*858373*224797161921531172462869*6*1692*134174612826222521*53191461462175**821762316453183*3394234816*878*2*4946511118*4*13619584988193742527*873427949139958412*87*55968799729488787174*81261*183134495936527657698652*36*5916398674*28773763647145622*711499258524674*9952*335*6967291*4686562**51159428*4678*45*784*425*881139*6495389386279*45389895*891**8417**4887846*53*35999267319453**942514*2481588977788445715*19723*58161*415*493131152*75*87*91322116*9*97234*681146271448818*45729*9549612164329979117*717543*364276941895*12*889*8378632419423*28*3**75*34683168667463993*5*961461849388254**39571774436*666495719625358*378334*185863681599297314476423*816319645912639875998*571665754236**3831236662842843565*132449471811899365749*5*4528*28455854738*4595*9*326219641659425*14591772793218*19579924948374*85571*6569*5543226447**446132438*8762745*44*5963*141416*58117642647*522*85775732675773712733365323592831866864278*287743932784*338685323*4726966519723*33372*5978*93382958757627629881678418728476576679*363724471*56*692574932124784*3979*112452*138*652583*67979345*265*7717282814*4*68178*8132331465389394*56891191762846*196855692*89438541*766*98227972*2891632*9692*162741*2768149722533*52497951*83*38*11538**115159*76*9579471718431*1586622*283291*93731522941992*82176*79369912942*27533258*8537*4647769277214921*96532611*2789489754917742276*7*837155787919169447225357185822*67987653941942164988*9546222996*9*187255379534*1*53498*98*36394268752812633839749276152125*1*5382*415211712*2*96976*452*48*71728352543262926*29929741298*7524986344457*4*14984488962669*3764229448999763148*9726*8825125782439996947*7487137771*4388147771*67314264531147184*68*3652899951472457473576715*686624491966*52696166*67252*2663281358233893*5853*61856583**75*49689394787*48*591436**87*8441134933956762257794755177693718928354124275958529238*2317625**5*94383393375942547394784429967987487573655885*1371*138452835872545459*78*611993261428818*1328556261113843*39662178724759*55712*29*922**6579*749*179274358327886476643197464*9223598149*4375671628*47243122*4824683*96665268196*9536515*833563445*76961545427*623783439*13951267*385956*225785*7*6*69*5177157462*865573768558449374983*81293441269931*19364453923966612639*7*95972234825227512*4926493443*8*93876*2313*1127368266341963545791143215814913184*93631767317151526*83474637255929229641555944*6815697*25838746197*3*6*583*9**98629628222284589*863135**77*631119344726511749475285993428327657*3265845*4416788263996137*26*1586366819464285971776278*9844668977585184224867452215955*9358497696344582*795*786486381578297381698656164*651*98273735979558715935531499119176243491241158556*445182835*2*8718*322667544866152*56614871*1893*527118445974473563*64493526724662741252159363972593269*19938134*98863851289572782*3645894874322393781411455154645*567123787*8*627335697252186934235*73*71*8526334562*3951*8445*56214358*44414992*8665574363*5*1476*879693*2*25472327*392939984521326184787935158554369744*122882224381472514188827*5419395795669629274399583424246249376628864367443499328448389775129975972845*48543164264*84387674489787991491716877659277*421*955855739651*6585842152116127816836*8*412*57941393185663114264388*88487874*87997396661371583963965141*1643574*9881647487178183349422*157815942537417*2721766187*219884917213846361651447659835352927333222734786391*935*64534555*78*4343742*86884694549864356537*95*9184*8666282328886378*7527777733*88*47615*2795632494***784642299483*59891767*11181162517*74867282584173598319293728733*74829672675*7657449*16**366535478*8786486393638581596576734256**141347959955156866161142763837233346652912837856617*39959542681497*345954728*9136492976*88667781216813357**268577*1759416**51354121187*5*3*93*3899382338795*52463*842*9*3*832*414681147389672474452*28997*351*29442*645725254287946684*8914*8386663664457416959462693223*8316413993981*2137124189886*18*272492*3321799215675536497*6156534*9*9312986189577318631***92697*87961566214949841166***777*83559*4754*46215347787*1316978*245873127816269*464847*22143653191449775431759966499262*8538821231692831326157*92399813**3925126983*875589759*533141631744*75967491781795827643519526929*257*4767*9*281633334159717272491825665129359142619*6662773256986142716*29767*297763481*464586656892*64413124649357*285**23683819842*791185465***86*15378362571249852*5*42*64676623*6282225239358*5552743417643454121*5312*4717998673634682*35*4853*9*1252112627263*222348653*5975676253973173288*2*28171993471981198**1456894878972*18267129*899*55*52794196119*52*58318138227442282666341338916693*12117825*28697899*936*82148916692292712*1558612735171*1625129275469355883*6435797838218*4*74469741895*7772*871273678553797568468431647656472976768343525**97838387229921366733463772117986*426*197987334146953653983153351493*1*13658913*28927691779331653879943623218**1578851913269551998226145*7**172996*73**182218727319**8*14947992881965479646569613878593*12513*5817399719*98*1*65276769435993454318924367*179862*73751*531792838141315*613666325397343*81922***69762418395*3493152*6381*348311245*859324*9921324*7735*2153928442596*1935*1576*81348382*61*4277197298362237912296482346937261265371966462***6531319728749655*2457433246356278538*36356*9*659761238**43397*4412414749739*4718996535*4363664928****3123*611794*4878439765858791*2292*1556734485*67498951566161155616**3576436339939622458*6581131*9*19*7441*462163765899*5953384*862592*85221*31361435*2666249791398585*949573115335788*4297*93*7518**1*1915659126**2813*21456567928278938*125732*662293788162258*7487479642631*766426672536952664425659872723297436*133344938574741*83381662953332788181*653355*22755639*51353815*81*3846254258616231586854615833845*652824*95422**795225*5772*78573411*19968791*4476668*69**7575951*81173*3322*35934*8791493*1*536935485237589577951939576171*7555*627397485793284718188582*791593646165137729683711389938846*571683615362*83816443126345815197*244483854152458156462749947611978543**771146566236548974964714446463213*3*316**3199219943*36979*8146264829*949*7793848912732255378851551871*41736583*541283*725716*84492639429581633346*36296856954666581597132334947619765755154299371528384247652339969128721169348*592*256388659*7*648362**4*81641133*11*76386539971828821182429539715563947984*235589*14135769744615444767972475*5491*46659*6*23734214161422576751473*5842728422*8357449566331984955682*9695477357**3867115959*63642482158436665569742298221*233269945393571861*2439537682535317*29377948624363271448456525754397525318936*4268486356*541399542*1365*3951683263413963498252784587477223928485194618*4155912511*92*581352*58954447482926845*77956*52*957496824*359897358221*527*394635*2996248863424*65*233492*668792729*5857*5314191*276595157877987882682188961548445368*1468663732696657714353455486732466218357588*59896*14281838533771298*3967*4787819*4113884337261862432354996659319744554742*3424*1338171368**2112853*7*81*456678**9*4189847129*37*3*2264175113777718*9676152492774626526181872*5114221*81*8155167*1542987158769751117*358533554684972186887483683464629*55*876115633615292383*614**71178761641984*6117239471747758598687121836854646219351973519786*91*5*8487615476383519*314394**548275764223491436142*1695*75336488442555225353278763676***762*79897388*33612497251221625816629683434771613975587157644156*578135147256865687644*45*8399646*98*335739959*473118549516451589847*25794388898778127386239627*9248973595262*5148488143*288*696658487*99*9757476714539*1427219873*22678448697495234549**4491369676521369234*42336661849771776*7*43169987**326778371*3*3557*38522*624741764371488**1*924331513*76868149962*5945*566664***94736651*7929*9637727*356486662362*82*6*79939949962773427*7938397427737251862488355*53346*348641942228752*7134189637697712*9*76414315396167436564828788637881*9447286376381*2*7963626271861879*33*57588187313284665894717*62371962995258**8*6*248*14527971*6416313587557*592576937*8*849794*61*134*188765516666526821*19*8744*83949*821*3**22*4858*2742399*533456437642557854774*3762625476381*8471753228*2344*618979**3122551*761*985*7*97983149789**744814*4*86382421*4582266*1*53487564*9583248*1736**97373615143941331517697681*1468993*9583226429733134373165579953169*1831**776*683156*1136536*4921581983*4511824128447*353*866**83553642*473*327395145691*94964*419262716222969442812335*6716175837816954*3*5655*6864487755*964658795139787256638493*417764**5144317453741534565742*6741589176*671923928863854*9285341669511148*391*5864997911468629*5466495*355*78*883384146*462544673719728*9636942683885311821*35*84775675*1721293*679229645*553*51*2*327*96*7*7859659546312731916*7*52482676*5827*8653*1399585198841782958345157523144211253189149*951*39858*69*8731775751398849*8282992641354*2*19124411761771934*92234111178584*4546216937*45964969*6642643*516772787**3922515474862388459693236619362495228*122*54791***5236462464839*5717**972478228865879891943997421824921374**52759871868*495295871969233771*543593*741771533*84433836656776741555452*99***3743664716859831*6116**78*1427251*4*87832291742314*174882471374518298299897264359972875*1614155*1628712869456*989922496919**262355132953*167768663*367289872288335435767423*3534*7*612211274*7446334*914*7626*3223328133399851655928898672585449225*7*461*53*6433*3992928679328*2154396428*292826437*72344*12*1347974*77722638418321146153544*2*68578263425134641*11487129553483591814*7173991627681335*73637954*3326563*56955**76123**4921*48*74899155699*37764259395*7488*979964*949*45715*73563*39*48946488861582*96965*557*575968499244143764443545*546*49545*1914134*8252336745796598*499626234356*893945486931254183*1383634424382439295758371715*54514*3*93626693*658218259957*31615851483**38*315187857669*9466111445287974*6399256611513788*932553116*585464**81762456532467759225373547**593481935*3233846939399644333*585876764994575245896184*38179367478628619292254997932499*2357218*78337717*786971*663283*26473*171771956742372577*18*35838231572543125827412*34588*422*95634*2*2*226739529695848*178561*91221453476579581291695767969859642955414834572*12*328347685662731*322516*4195*35*17*2213434676919411915739*58*7845*315442271737831789831872188797659845192543*77349393415*31761181484479**8*218236991852*363298232523797164422*441645*472*4613*386839124736756*74*33*145*45227518327799769*6*58541367675469*3646519426626242637375*6841*7**4738*953424*658858584*229156474255*989398*3269143127*442426*87186*69663967*28812*386724753249*491662*9**798759835396237448516699*7177464171711332654622676622*2199126984325295388635152*9447165583229757288822119769381868588*49933933437233*5*183*288423931413886*6*74662216661277592839*84*325*5386652**617974665419847862*36633*75*4856757611365481*857594685316573663449498667**41246758*4326332264534326135274461724499*531*2566*998*5245257*8*49379*89297962947379642696*62282782315125*4453491*826*92922711824*5663872*32993971297757*966542989735945*487166988738931449*88393815191*675*61*17**25*77151796715419864983**85458731262835151**446436653566993673757332542457*8*471*936562993694*91324426767*15618978858622379238414139658969*4328**889639412839*5672*5461288599415755672545926187648137188215458*661134*71695863822397622341*7*3167316**1697865251792753369**49672537183542552*9446961633585*71*176354177951*95867495387113562425321743713465751*238*2222457889*926697758399469*966627318194821542851*99699227847973159286*159629196412418715916*199855286*89*258535364*484846134766372361126264*16451187*928121863*442533*5797511*53856**5591*274*995*5311547**428249847*774388*95939189828242336932*4873394*4454*24292*3*9287*8*4297999258268966618*4*5358638317934947627357976521*174*9327381718519918*77*5*8171*63581351761679522896652*282149591684997378793*842*989*92*12322*1*2*271873745*1*5*2*21649**5812*95599983756329674664884*96*299921117*1531875681992868*8*98*8243*1278932784866332366154667546743155185*39975997547335751*2848985*765*113891928363*327866233673368726297622236384416*46*5*468*7619166418977698569696116177521369446*33854*295*973466333766723*57576296673891286956238577155**9*43549787885761*15*23739514655*765784928834269*842*355342419318*642621272382721277514*549934446493*236219*9964222639*4**74631429312*62*5144677314486769425482156924*627424377*4982886*56184233*464987287337791177253*3283666885*2469*6716575289*36849892*516*15679666*979288627893199775497175")
""")
| apache-2.0 |
jlillest/micropython | drivers/onewire/onewire.py | 66 | 11789 | """
OneWire library ported to MicroPython by Jason Hildebrand.
TODO:
* implement and test parasite-power mode (as an init option)
* port the crc checks
The original upstream copyright and terms follow.
------------------------------------------------------------------------------
Copyright (c) 2007, Jim Studt (original old version - many contributors since)
OneWire has been maintained by Paul Stoffregen (paul@pjrc.com) since
January 2010.
26 Sept 2008 -- Robin James
Jim Studt's original library was modified by Josh Larios.
Tom Pollard, pollard@alum.mit.edu, contributed around May 20, 2008
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Much of the code was inspired by Derek Yerger's code, though I don't
think much of that remains. In any event that was..
(copyleft) 2006 by Derek Yerger - Free to distribute freely.
"""
import pyb
from pyb import disable_irq
from pyb import enable_irq
class OneWire:
def __init__(self, pin):
"""
Pass the data pin connected to your one-wire device(s), for example Pin('X1').
The one-wire protocol allows for multiple devices to be attached.
"""
self.data_pin = pin
self.write_delays = (1, 40, 40, 1)
self.read_delays = (1, 1, 40)
# cache a bunch of methods and attributes. This is necessary in _write_bit and
# _read_bit to achieve the timing required by the OneWire protocol.
self.cache = (pin.init, pin.value, pin.OUT_PP, pin.IN, pin.PULL_NONE)
pin.init(pin.IN, pin.PULL_UP)
def reset(self):
"""
Perform the onewire reset function.
Returns 1 if a device asserted a presence pulse, 0 otherwise.
If you receive 0, then check your wiring and make sure you are providing
power and ground to your devices.
"""
retries = 25
self.data_pin.init(self.data_pin.IN, self.data_pin.PULL_UP)
# We will wait up to 250uS for
# the bus to come high, if it doesn't then it is broken or shorted
# and we return a 0;
# wait until the wire is high... just in case
while True:
if self.data_pin.value():
break
retries -= 1
if retries == 0:
raise OSError("OneWire pin didn't go high")
pyb.udelay(10)
# pull the bus low for at least 480us
self.data_pin.low()
self.data_pin.init(self.data_pin.OUT_PP)
pyb.udelay(480)
# If there is a slave present, it should pull the bus low within 60us
i = pyb.disable_irq()
self.data_pin.init(self.data_pin.IN, self.data_pin.PULL_UP)
pyb.udelay(70)
presence = not self.data_pin.value()
pyb.enable_irq(i)
pyb.udelay(410)
return presence
def write_bit(self, value):
"""
Write a single bit.
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
self._write_bit(value, pin_init, pin_value, Pin_OUT_PP)
def _write_bit(self, value, pin_init, pin_value, Pin_OUT_PP):
"""
Write a single bit - requires cached methods/attributes be passed as arguments.
See also write_bit()
"""
d0, d1, d2, d3 = self.write_delays
udelay = pyb.udelay
if value:
# write 1
i = disable_irq()
pin_value(0)
pin_init(Pin_OUT_PP)
udelay(d0)
pin_value(1)
enable_irq(i)
udelay(d1)
else:
# write 0
i = disable_irq()
pin_value(0)
pin_init(Pin_OUT_PP)
udelay(d2)
pin_value(1)
enable_irq(i)
udelay(d3)
def write_byte(self, value):
"""
Write a byte. The pin will go tri-state at the end of the write to avoid
heating in a short or other mishap.
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
for i in range(8):
self._write_bit(value & 1, pin_init, pin_value, Pin_OUT_PP)
value >>= 1
pin_init(Pin_IN, Pin_PULL_UP)
def write_bytes(self, bytestring):
"""
Write a sequence of bytes.
"""
for byte in bytestring:
self.write_byte(byte)
def _read_bit(self, pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP):
"""
Read a single bit - requires cached methods/attributes be passed as arguments.
See also read_bit()
"""
d0, d1, d2 = self.read_delays
udelay = pyb.udelay
pin_init(Pin_IN, Pin_PULL_UP) # TODO why do we need this?
i = disable_irq()
pin_value(0)
pin_init(Pin_OUT_PP)
udelay(d0)
pin_init(Pin_IN, Pin_PULL_UP)
udelay(d1)
value = pin_value()
enable_irq(i)
udelay(d2)
return value
def read_bit(self):
"""
Read a single bit.
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
return self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
def read_byte(self):
"""
Read a single byte and return the value as an integer.
See also read_bytes()
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
value = 0
for i in range(8):
bit = self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
value |= bit << i
return value
def read_bytes(self, count):
"""
Read a sequence of N bytes.
The bytes are returned as a bytearray.
"""
s = bytearray(count)
for i in range(count):
s[i] = self.read_byte()
return s
def select_rom(self, rom):
"""
Select a specific device to talk to. Pass in rom as a bytearray (8 bytes).
"""
assert len(rom) == 8, "ROM must be 8 bytes"
self.reset()
self.write_byte(0x55) # ROM MATCH
self.write_bytes(rom)
def read_rom(self):
"""
Read the ROM - this works if there is only a single device attached.
"""
self.reset()
self.write_byte(0x33) # READ ROM
rom = self.read_bytes(8)
# TODO: check CRC of the ROM
return rom
def skip_rom(self):
"""
Send skip-rom command - this works if there is only one device attached.
"""
self.write_byte(0xCC) # SKIP ROM
def depower(self):
self.data_pin.init(self.data_pin.IN, self.data_pin.PULL_NONE)
def scan(self):
"""
Return a list of ROMs for all attached devices.
Each ROM is returned as a bytes object of 8 bytes.
"""
devices = []
self._reset_search()
while True:
rom = self._search()
if not rom:
return devices
devices.append(rom)
def _reset_search(self):
self.last_discrepancy = 0
self.last_device_flag = False
self.last_family_discrepancy = 0
self.rom = bytearray(8)
def _search(self):
# initialize for search
id_bit_number = 1
last_zero = 0
rom_byte_number = 0
rom_byte_mask = 1
search_result = 0
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
# if the last call was not the last one
if not self.last_device_flag:
# 1-Wire reset
if not self.reset():
self._reset_search()
return None
# issue the search command
self.write_byte(0xF0)
# loop to do the search
while rom_byte_number < 8: # loop until through all ROM bytes 0-7
# read a bit and its complement
id_bit = self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
cmp_id_bit = self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
# check for no devices on 1-wire
if (id_bit == 1) and (cmp_id_bit == 1):
break
else:
# all devices coupled have 0 or 1
if (id_bit != cmp_id_bit):
search_direction = id_bit # bit write value for search
else:
# if this discrepancy if before the Last Discrepancy
# on a previous next then pick the same as last time
if (id_bit_number < self.last_discrepancy):
search_direction = (self.rom[rom_byte_number] & rom_byte_mask) > 0
else:
# if equal to last pick 1, if not then pick 0
search_direction = (id_bit_number == self.last_discrepancy)
# if 0 was picked then record its position in LastZero
if search_direction == 0:
last_zero = id_bit_number
# check for Last discrepancy in family
if last_zero < 9:
self.last_family_discrepancy = last_zero
# set or clear the bit in the ROM byte rom_byte_number
# with mask rom_byte_mask
if search_direction == 1:
self.rom[rom_byte_number] |= rom_byte_mask
else:
self.rom[rom_byte_number] &= ~rom_byte_mask
# serial number search direction write bit
#print('sd', search_direction)
self.write_bit(search_direction)
# increment the byte counter id_bit_number
# and shift the mask rom_byte_mask
id_bit_number += 1
rom_byte_mask <<= 1
# if the mask is 0 then go to new SerialNum byte rom_byte_number and reset mask
if rom_byte_mask == 0x100:
rom_byte_number += 1
rom_byte_mask = 1
# if the search was successful then
if not (id_bit_number < 65):
# search successful so set last_discrepancy,last_device_flag,search_result
self.last_discrepancy = last_zero
# check for last device
if self.last_discrepancy == 0:
self.last_device_flag = True
search_result = True
# if no device found then reset counters so next 'search' will be like a first
if not search_result or not self.rom[0]:
self._reset_search()
return None
else:
return bytes(self.rom)
| mit |
Forrestburgundy/Pi-Mission-Control | screens/wordclock/layouts/english.py | 2 | 2752 | '''This is a custom layout for the RPi InfoScreen wordclock screen.
Custom layouts can be created for the screen by creating a new file in the
"layouts" folder.
Each layout must have the following variables:
LAYOUT: The grid layout. Must be a single string.
MAP: The mapping required for various times (see notes below)
COLS: The number of columns required for the grid layout
SIZE: The size of the individual box containing your letter.
Tuple in (x, y) format.
FONTSIZE: Font size for the letter
'''
# Layout is a single string variable which will be looped over by the parser.
LAYOUT = ("ITQISHCUBMWLRPI"
"AOQUARTERFDHALF"
"TWENTYSFIVEGTEN"
"TOXPASTNYTWELVE"
"ONESIXTHREENINE"
"SEVENTWOXELEVEN"
"EIGHTENFOURFIVE"
"RPIO'CLOCKHAMPM")
# Map instructions:
# The clock works by rounding the time to the nearest 5 minutes.
# This means that you need to have settngs for each five minute interval "m00"
# "m00", "m05".
# The clock also works on a 12 hour basis rather than 24 hour:
# "h00", "h01" etc.
# There are three optional parameters:
# "all": Anything that is always shown regardless of the time e.g. "It is..."
# "am": Wording/symbol to indicate morning.
# "pm": Wording/symbol to indicate afternoon/evening
MAP = {
"all": [0, 1, 3, 4],
"m00": [108, 109, 110, 111, 112, 113, 114],
"m05": [37,38, 39, 40, 48, 49, 50, 51],
"m10": [42, 43, 44, 48, 49, 50, 51],
"m15": [15, 17, 18, 19, 20, 21, 22, 23, 48, 49, 50, 51],
"m20": [30, 31, 32, 33, 34, 35, 48, 49, 50, 51],
"m25": [30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 48, 49, 50, 51],
"m30": [26, 27, 28, 29, 48, 49, 50, 51],
"m35": [30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 45, 46],
"m40": [30, 31, 32, 33, 34, 35, 45, 46],
"m45": [15, 17, 18, 19, 20, 21, 22, 23, 45, 46],
"m50": [42, 43, 44, 45, 46],
"m55": [37, 38, 39, 40, 45, 46],
"h01": [60, 61, 62],
"h02": [80, 81, 82],
"h03": [66, 67, 68, 69, 70],
"h04": [97, 98, 99, 100],
"h05": [101, 102, 103, 104],
"h06": [63, 64, 65],
"h07": [75, 76, 77, 78, 79],
"h08": [90, 91, 92, 93, 94],
"h09": [71, 72, 73, 74],
"h10": [94, 95, 96],
"h11": [84, 85, 86, 87, 88, 89],
"h12": [54, 55, 56, 57, 58, 59],
"am": [116, 117],
"pm": [118, 119]
}
# Number of columns in grid layout
COLS = 15
# Size of letter in grid (x, y)
SIZE = (53, 60)
# Font size of letter
FONTSIZE = 40
# Is our language one where we need to increment the hour after 30 mins
# e.g. 9:40 is "Twenty to ten"
HOUR_INCREMENT = True
| gpl-3.0 |
yousafsyed/casperjs | bin/Lib/encodings/utf_32.py | 180 | 5128 | """
Python 'utf-32' Codec
"""
import codecs, sys
### Codec APIs
encode = codecs.utf_32_encode
def decode(input, errors='strict'):
return codecs.utf_32_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_32_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
def getstate(self):
# state info we return to the caller:
# 0: stream is in natural order for this platform
# 2: endianness hasn't been determined yet
# (we're never writing in unnatural order)
return (2 if self.encoder is None else 0)
def setstate(self, state):
if state:
self.encoder = None
else:
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_32_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
def getstate(self):
# additonal state info from the base class must be None here,
# as it isn't passed along to the caller
state = codecs.BufferedIncrementalDecoder.getstate(self)[0]
# additional state info we pass to the caller:
# 0: stream is in natural order for this platform
# 1: stream is in unnatural order
# 2: endianness hasn't been determined yet
if self.decoder is None:
return (state, 2)
addstate = int((sys.byteorder == "big") !=
(self.decoder is codecs.utf_32_be_decode))
return (state, addstate)
def setstate(self, state):
# state[1] will be ignored by BufferedIncrementalDecoder.setstate()
codecs.BufferedIncrementalDecoder.setstate(self, state)
state = state[1]
if state == 0:
self.decoder = (codecs.utf_32_be_decode
if sys.byteorder == "big"
else codecs.utf_32_le_decode)
elif state == 1:
self.decoder = (codecs.utf_32_le_decode
if sys.byteorder == "big"
else codecs.utf_32_be_decode)
else:
self.decoder = None
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
self.encoder = None
codecs.StreamWriter.__init__(self, stream, errors)
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
def encode(self, input, errors='strict'):
if self.encoder is None:
result = codecs.utf_32_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, errors)
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
(object, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_32_le_decode
elif byteorder == 1:
self.decode = codecs.utf_32_be_decode
elif consumed>=4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (object, consumed)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-32',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.