text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import socket
import struct
import sys
import os
import time
# see com.intellij.idea.SocketLock for the server side of this interface
RUN_PATH = u'$RUN_PATH$'
CONFIG_PATH = u'$CONFIG_PATH$'
SYSTEM_PATH = u'$SYSTEM_PATH$'
def print_usage(cmd):
print(('Usage:\n' +
' {0} -h | -? | --help\n' +
' {0} [project_dir]\n' +
' {0} [-l|--line line] [project_dir|--temp-project] file[:line]\n' +
' {0} diff <left> <right>\n' +
' {0} merge <local> <remote> [base] <merged>').format(cmd))
def process_args(argv):
args = []
skip_next = False
for i, arg in enumerate(argv[1:]):
if arg == '-h' or arg == '-?' or arg == '--help':
print_usage(argv[0])
exit(0)
elif i == 0 and (arg == 'diff' or arg == 'merge' or arg == '--temp-project'):
args.append(arg)
elif arg == '-l' or arg == '--line':
args.append(arg)
skip_next = True
elif skip_next:
args.append(arg)
skip_next = False
else:
path = arg
if ':' in arg:
file_path, line_number = arg.rsplit(':', 1)
if line_number.isdigit():
args.append('-l')
args.append(line_number)
path = file_path
args.append(os.path.abspath(path))
return args
def try_activate_instance(args):
port_path = os.path.join(CONFIG_PATH, 'port')
token_path = os.path.join(SYSTEM_PATH, 'token')
if not (os.path.exists(port_path) and os.path.exists(token_path)):
return False
with open(port_path) as pf:
port = int(pf.read())
with open(token_path) as tf:
token = tf.read()
s = socket.socket()
s.settimeout(0.3)
try:
s.connect(('127.0.0.1', port))
except (socket.error, IOError):
return False
found = False
while True:
try:
path_len = struct.unpack('>h', s.recv(2))[0]
path = s.recv(path_len).decode('utf-8')
if os.path.abspath(path) == os.path.abspath(CONFIG_PATH):
found = True
break
except (socket.error, IOError):
return False
if found:
cmd = 'activate ' + token + '\0' + os.getcwd() + '\0' + '\0'.join(args)
if sys.version_info.major >= 3: cmd = cmd.encode('utf-8')
encoded = struct.pack('>h', len(cmd)) + cmd
s.send(encoded)
time.sleep(0.5) # don't close the socket immediately
return True
return False
def start_new_instance(args):
if sys.platform == 'darwin':
if len(args) > 0:
args.insert(0, '--args')
os.execvp('open', ['-a', RUN_PATH] + args)
else:
bin_file = os.path.split(RUN_PATH)[1]
os.execv(RUN_PATH, [bin_file] + args)
ide_args = process_args(sys.argv)
if not try_activate_instance(ide_args):
start_new_instance(ide_args)
|
{
"content_hash": "a8b40f66683bcf0a8314e29bd185fec0",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 85,
"avg_line_length": 28.66346153846154,
"alnum_prop": 0.5206306608520631,
"repo_name": "semonte/intellij-community",
"id": "0563b524fb54bfa20c2ae3d6480be3c2a015c4c2",
"size": "3028",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "platform/platform-resources/src/launcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "20665"
},
{
"name": "AspectJ",
"bytes": "182"
},
{
"name": "Batchfile",
"bytes": "60580"
},
{
"name": "C",
"bytes": "211556"
},
{
"name": "C#",
"bytes": "1264"
},
{
"name": "C++",
"bytes": "197528"
},
{
"name": "CMake",
"bytes": "1675"
},
{
"name": "CSS",
"bytes": "201445"
},
{
"name": "CoffeeScript",
"bytes": "1759"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groovy",
"bytes": "3222009"
},
{
"name": "HLSL",
"bytes": "57"
},
{
"name": "HTML",
"bytes": "1895767"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "164918191"
},
{
"name": "JavaScript",
"bytes": "570364"
},
{
"name": "Jupyter Notebook",
"bytes": "93222"
},
{
"name": "Kotlin",
"bytes": "4472431"
},
{
"name": "Lex",
"bytes": "147154"
},
{
"name": "Makefile",
"bytes": "2352"
},
{
"name": "NSIS",
"bytes": "51270"
},
{
"name": "Objective-C",
"bytes": "27941"
},
{
"name": "Perl",
"bytes": "903"
},
{
"name": "Perl6",
"bytes": "26"
},
{
"name": "Protocol Buffer",
"bytes": "6680"
},
{
"name": "Python",
"bytes": "25421481"
},
{
"name": "Roff",
"bytes": "37534"
},
{
"name": "Ruby",
"bytes": "1217"
},
{
"name": "Scala",
"bytes": "11698"
},
{
"name": "Shell",
"bytes": "65719"
},
{
"name": "Smalltalk",
"bytes": "338"
},
{
"name": "TeX",
"bytes": "25473"
},
{
"name": "Thrift",
"bytes": "1846"
},
{
"name": "TypeScript",
"bytes": "9469"
},
{
"name": "Visual Basic",
"bytes": "77"
},
{
"name": "XSLT",
"bytes": "113040"
}
],
"symlink_target": ""
}
|
import pytest
from selenium.webdriver.wpewebkit.options import Options
@pytest.fixture
def options():
return Options()
def test_set_binary_location(options):
options.binary_location = "/foo/bar"
assert options._binary_location == "/foo/bar"
def test_get_binary_location(options):
options._binary_location = "/foo/bar"
assert options.binary_location == "/foo/bar"
def test_creates_capabilities(options):
options._arguments = ["foo"]
options._binary_location = "/bar"
caps = options.to_capabilities()
opts = caps.get(Options.KEY)
assert opts
assert "foo" in opts["args"]
assert opts["binary"] == "/bar"
def test_is_a_baseoptions(options):
from selenium.webdriver.common.options import BaseOptions
assert isinstance(options, BaseOptions)
|
{
"content_hash": "1fb674eba16ddd80e6e3284d08a0f5e8",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 61,
"avg_line_length": 23.61764705882353,
"alnum_prop": 0.701120797011208,
"repo_name": "valfirst/selenium",
"id": "b43e01b8a3b5e4b550501874a0627453b6d702da",
"size": "1591",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "py/test/unit/selenium/webdriver/wpewebkit/wpewebkit_options_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "825"
},
{
"name": "Batchfile",
"bytes": "4443"
},
{
"name": "C",
"bytes": "82917"
},
{
"name": "C#",
"bytes": "2990022"
},
{
"name": "C++",
"bytes": "2285448"
},
{
"name": "CSS",
"bytes": "1049"
},
{
"name": "Dockerfile",
"bytes": "1737"
},
{
"name": "HTML",
"bytes": "1379853"
},
{
"name": "Java",
"bytes": "6286458"
},
{
"name": "JavaScript",
"bytes": "2535395"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Python",
"bytes": "988077"
},
{
"name": "Ragel",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "1036679"
},
{
"name": "Rust",
"bytes": "45287"
},
{
"name": "Shell",
"bytes": "29804"
},
{
"name": "Starlark",
"bytes": "401750"
},
{
"name": "TypeScript",
"bytes": "126843"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
}
|
'''Definition of several collection types (list, dict, set,...)'''
import types
from JumpScale.core.pmtypes.base import BaseType, NO_DEFAULT
class Dictionary(BaseType):
'''Generic dictionary type'''
NAME = 'dictionary'
@staticmethod
def check(value):
'''Check whether provided value is a dict'''
return isinstance(value, dict)
def get_default(self, obj):
if self._default is NO_DEFAULT:
return dict()
return dict(self._default)
class List(BaseType):
'''Generic list type'''
NAME = 'list'
@staticmethod
def check(value):
'''Check whether provided value is a list'''
return isinstance(value, list)
def get_default(self, obj):
if self._default is NO_DEFAULT:
return list()
return list(self._default)
class Set(BaseType):
'''Generic set type'''
NAME = 'set'
@staticmethod
def check(value):
'''Check whether provided value is a set'''
return isinstance(value, set)
|
{
"content_hash": "2097cfd35e95082194ececa49d977ee9",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 66,
"avg_line_length": 24.046511627906977,
"alnum_prop": 0.6150870406189555,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "b96c923a48307cd09c17f9f7222e4e4b0ba02364",
"size": "2657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/core/pmtypes/CollectionTypes.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
}
|
"""
Monkey Patch httplib.HTTPResponse to buffer reads of headers. This can improve
performance when making large numbers of small HTTP requests. This module
also provides helper functions to make HTTP connections using
BufferedHTTPResponse.
.. warning::
If you use this, be sure that the libraries you are using do not access
the socket directly (xmlrpclib, I'm looking at you :/), and instead
make all calls through httplib.
"""
from urllib import quote
import logging
import time
# pylint: disable=E0611
from eventlet.green.httplib import CONTINUE, HTTPConnection, HTTPMessage, \
HTTPResponse, HTTPSConnection, _UNKNOWN
DEFAULT_TIMEOUT = 30
logger = logging.getLogger(__name__) # pylint: disable=C0103
# pylint: disable=R0902
class BufferedHTTPResponse(HTTPResponse):
"""HTTPResponse class that buffers reading of headers"""
# pylint: disable=C0103
def __init__(self, sock, debuglevel=0, strict=0,
method=None): # pragma: no cover
self.sock = sock
self.fp = sock.makefile('rb')
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
# pylint: disable=E1101,E0203,W0201
def expect_response(self):
self.fp = self.sock.makefile('rb', 0)
version, status, reason = self._read_status()
if status != CONTINUE:
self._read_status = lambda: (version, status, reason)
self.begin()
else:
self.status = status
self.reason = reason.strip()
self.version = 11
self.msg = HTTPMessage(self.fp, 0)
self.msg.fp = None
# pylint: disable=W0232
class BufferedHTTPConnection(HTTPConnection):
"""HTTPConnection class that uses BufferedHTTPResponse"""
response_class = BufferedHTTPResponse
# pylint: disable=W0201
def connect(self):
self._connected_time = time.time()
return HTTPConnection.connect(self)
# pylint: disable=W0201
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
self._method = method
self._path = url
return HTTPConnection.putrequest(self, method, url, skip_host,
skip_accept_encoding)
# pylint: disable=E1101
def getexpect(self):
response = BufferedHTTPResponse(self.sock, strict=self.strict,
method=self._method)
response.expect_response()
return response
def getresponse(self):
response = HTTPConnection.getresponse(self)
logger.debug(("HTTP PERF: %(time).5f seconds to %(method)s "
"%(host)s:%(port)s %(path)s)"),
{'time': time.time() - self._connected_time, 'method': self._method,
'host': self.host, 'port': self.port, 'path': self._path})
return response
# pylint: disable=R0913
def http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False, key_file=None,
cert_file=None, timeout=None):
"""
Helper function to create an HTTPConnection object. If ssl is set True,
HTTPSConnection will be used. However, if ssl=False, BufferedHTTPConnection
will be used, which is buffered for backend Swift services.
:param ipaddr: IPv4 address to connect to
:param port: port to connect to
:param device: device of the node to query
:param partition: partition on the device
:param method: HTTP method to request ('GET', 'PUT', 'POST', etc.)
:param path: request path
:param headers: dictionary of headers
:param query_string: request query string
:param ssl: set True if SSL should be used (default: False)
:param key_file: Private key file (not needed if cert_file has private key)
:param cert_file: Certificate file (Keystore)
:returns: HTTPConnection object
"""
path = quote('/' + device + '/' + str(partition) + path)
# pylint: disable=E1121, E1124
return http_connect_raw(ipaddr, port, device, partition, method, path,
headers, query_string, ssl, key_file, cert_file,
timeout=timeout)
# pylint: disable=W0201
def http_connect_raw(ipaddr, port, method, path, headers=None,
query_string=None, ssl=False, key_file=None,
cert_file=None, timeout=None):
"""
Helper function to create an HTTPConnection object. If ssl is set True,
HTTPSConnection will be used. However, if ssl=False, BufferedHTTPConnection
will be used, which is buffered for backend Swift services.
:param ipaddr: IPv4 address to connect to
:param port: port to connect to
:param method: HTTP method to request ('GET', 'PUT', 'POST', etc.)
:param path: request path
:param headers: dictionary of headers
:param query_string: request query string
:param ssl: set True if SSL should be used (default: False)
:param key_file: Private key file (not needed if cert_file has private key)
:param cert_file: Certificate file (Keystore)
:returns: HTTPConnection object
"""
if timeout is None:
timeout = DEFAULT_TIMEOUT
if ssl:
conn = HTTPSConnection('%s:%s' % (ipaddr, port), key_file=key_file,
cert_file=cert_file, timeout=timeout)
else:
conn = BufferedHTTPConnection('%s:%s' % (ipaddr, port),
timeout=timeout)
if query_string:
path += '?' + query_string
conn.path = path
conn.putrequest(method, path)
if headers:
# pylint: disable=E1103
for header, value in headers.iteritems():
conn.putheader(header, value)
# pylint: disable=E1103
conn.endheaders()
return conn
|
{
"content_hash": "ef864301f6c636ccfcd59f11c1026c4d",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 38.08383233532934,
"alnum_prop": 0.6305031446540881,
"repo_name": "admiyo/keystone",
"id": "db64175d1b425aaa2477e02d62f7c7b6d4818931",
"size": "6950",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/common/bufferedhttp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32945"
},
{
"name": "JavaScript",
"bytes": "67937"
},
{
"name": "Python",
"bytes": "1339048"
},
{
"name": "Shell",
"bytes": "7400"
},
{
"name": "XSLT",
"bytes": "52086"
}
],
"symlink_target": ""
}
|
"""Warpper for testing
"""
# Coding: utf-8
# File name: test.py
# Created: 2016-07-27
# Description:
# v0.0: File created.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import embeddings
import graph
import util
import numpy as np
from matplotlib import colors
import pickle
import logging
import os
import time
from keras.optimizers import Adam
from sklearn.manifold import TSNE
from sklearn.preprocessing import normalize
import matplotlib.pyplot as plt
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from exp_config import *
g = graph.graph_from_pickle('data/{}.graph'.format(dataset_name))
# read ground truth if exists
#try:
# {id: com} dictionary
# correct_labels = pickle.load(open("data/{}.community", "rb"))
#except:
# correct_labels = {i: 0 for i in range(len(g.nodes()))}
if not os.path.exists("logs"):
os.makedirs("logs")
if not os.path.exists("save"):
os.makedirs("save")
exp_name = "nce_{}_e{}_ed{}_ne{}_ns{}_nw{}_wl{}_ws{}_it{}_nb{}_lr{}_ci{}_adam".format(
dataset_name, epoch, emb_dim, neg_samp, num_skip, num_walk,
walk_length, window_size, iters, num_batches, learning_rate,
contrast_iter)
logging.basicConfig(filename=os.path.join("logs", exp_name+".log"),
level=logging.INFO,
format="%(asctime)s %(message)s")
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.getLogger("").addHandler(console)
if training:
for method in methods:
method_name = method + "_" + exp_name
model = embeddings.EmbeddingNet(graph=g, epoch=epoch, emb_dim=emb_dim,
neg_samp=neg_samp, num_skip=num_skip,
num_walk=num_walk, walk_length=walk_length,
window_size=window_size, iters=iters,
contrast_iter=contrast_iter,
learning_rate=learning_rate)
model.build(optimizer='adam')
logging.info("start training {}".format(method))
start = time.time()
if method == "rand":
model.train(mode='random_walk', num_batches=num_batches,
save_dir=os.path.join("weights", method_name))
elif method == "motif":
model.train(mode='motif_walk', num_batches=num_batches,
save_dir=os.path.join("weights", method_name))
elif method == "contrast":
model.train_mce(pos='motif_walk', neg="random_walk",
num_batches=num_batches,
save_dir=os.path.join("weights", method_name))
else:
raise ValueError("methods must be in {'rand', 'motif', 'contrast'}")
elapsed_time = time.time() - start
logging.info('finish training: Time: {}[s]'.format(elapsed_time))
weights = model.get_weights()
weight_path = os.path.join("save", method_name + ".weight")
pickle.dump(weights, open(weight_path, "wb"))
if visualize:
topk = [c for c, _ in Counter(g.coms).most_common(topk_labels)]
vis_nodes = [i for i, c in g.coms.items() if c in topk]
vis_labels = [g.coms[i] for i in vis_nodes]
color_map = ["red", "blue", "green", "gold", "purple", "oragnge", "cyan"]
label_colors = [color_map[c%len(color_map)] for c in vis_labels]
fig = plt.figure(figsize=(15, 45))
fig.suptitle(exp_name, fontsize=16)
for i, method in enumerate(methods):
method_name = method + "_" + exp_name
weight_path = os.path.join("save", method_name + ".weight")
weights = pickle.load(open(weight_path, "rb"))
embed = weights[0][vis_nodes, :]
# Normalize
if normalize_embed:
embed = normalize(embed)
embed_tsne = TSNE(learning_rate=tsne_learning_rate).fit_transform(embed)
color_map = list(colors.cnames.keys())
a = plt.subplot(311 + i)
a.set_title("{} walk embedding".format(method))
a.scatter(embed_tsne[:, 0], embed_tsne[:, 1],
c=label_colors, s=30)
plt.savefig(os.path.join("save", exp_name + '.png'))
plt.show()
|
{
"content_hash": "5366b15e7dc578a296d3c640ea822301",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 86,
"avg_line_length": 35.15573770491803,
"alnum_prop": 0.5989741198414549,
"repo_name": "gear/motifwalk",
"id": "5936e5fee7fbb7bc5f1c552a8edfaad8a0b1c2c5",
"size": "4289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/src/mane/run_exp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "19444158"
},
{
"name": "PostScript",
"bytes": "3502"
},
{
"name": "Python",
"bytes": "306067"
},
{
"name": "Roff",
"bytes": "13529332"
},
{
"name": "Shell",
"bytes": "4391"
},
{
"name": "TeX",
"bytes": "314493"
}
],
"symlink_target": ""
}
|
class Diffusion_Embedding(object):
def __init__(self,
source_path,
file_template,
subjects,
output_path,
diff_time = 0,
diff_alpha = 0.5,
diff_ncomp = 10,
subjects_subset = None,
output_suffix = 'embedding',
ftype = 'npy_timeseries',
surf = 'fsaverage4',
mwall = False,
tp = None,
affinity_metric = 'correlation',
realign_method = 'STATIS'):
"""
source_path : path with source timeseries/matrices, string
file_template : template name for the files with timeseries, string
subjects : the list of subjects to be inserted into file_template, list
output_path : where to output the results, string
diff_time : Diffusion time for individual embeddings, float
diff_alpha : Value of diffusion operator, float
diff_ncomp : Number of components to extract, int
subjects_subset : subjects from which the template should be created, list (optional)
output_prefix : prefix for output files, string
ftype : type of source files (npy), string
surf : surface for medial wall removal, string
mwall : is medial wall present in source?, bool
tp : timepoints to extract from source, list of arrays
affinity_metric : affinity metric to use between timeseries
"""
import numpy as np
import os
import glob
self.source_path = source_path
self.file_template = file_template
self.subjects = subjects
if subjects_subset == None:
self.subjects_subset = subjects
else:
self.subjects_subset = subjects_subset
self.ftype = ftype
self.surf = surf
self.mwall = mwall
self.tp = tp
self.affinity_metric = affinity_metric
self.output_suffix = output_suffix
self.output_path = output_path
self.diff_time = diff_time
self.diff_alpha = diff_alpha
self.diff_ncomp = diff_ncomp
if not os.path.isdir(self.output_path):
os.mkdir(self.output_path)
def get_source_files(self):
# Get the list of input files and store it
import os
self.input_files_ = [os.path.join(self.source_path, self.file_template % s) for s in self.subjects]
self.template_subjects_ = [s for s in self.subjects_subset if s in self.subjects]
def load_data(self, index):
# Load data and optionally select timepoints from indicated source timeseries
import numpy as np
# Load data
if self.ftype == 'npy_timeseries' or self.ftype == 'npy_matrix':
data = np.load(self.input_files_[index])
# Select timepoints if neccessary
if self.tp == None:
data_sel = data
elif len(self.tp) == 1:
data_sel = data[:,tp]
elif len(self.tp) > 1:
data_sel = data[:,tp[index]]
# Select rows if desired
if self.mwall == True:
self.remove_medial_wall(data_sel)
elif self.mwall == False:
self.current_data_ = data_sel
def remove_medial_wall(self, data_sel):
# Remove the medial wall vertices based on provided surface shape
import os
import nibabel as nib
import numpy as np
fspath = os.environ.get('FREESURFER_HOME')
nv = nib.freesurfer.io.read_geometry(os.path.join(fspath, 'subjects', self.surf, 'surf', 'lh.pial'))[0].shape[0]
lhcort = np.sort(nib.freesurfer.io.read_label(os.path.join(fspath, 'subjects', self.surf, 'label', 'lh.cortex.label')))
rhcort = np.sort(nib.freesurfer.io.read_label(os.path.join(fspath, 'subjects', self.surf, 'label', 'rh.cortex.label')))+nv
cortex = np.hstack([lhcort,rhcort])
self.current_data_ = data_sel[cortex,:]
def check_data(self):
# Check if the timeseries makes sense, i.e. does not contain NaNs or Inf
import numpy as np
nancount = np.sum(np.isnan(self.current_data_))
infcount = np.sum(np.isinf(self.current_data_))
if np.logical_and(nancount == 0, infcount == 0):
self.current_ok_ = True
else:
print "Found %d NaN and %d Inf values in the timeseries" % (nancount, infcount)
self.current_ok_ = False
def calculate_affinity(self):
import numpy as np
if not self.current_ok_:
raise ValueError('There is something wrong with the timeseries, cannot proceed')
if self.affinity_metric == 'correlation':
self.current_cmat_ = np.corrcoef(self.current_data_)
self.current_cmat_ = (self.current_cmat_ + 1) / 2
def embed_affinity(self):
import time
from mapalign import embed
stime = time.time()
self.compute_diffusion_map()
print "Diffusion embedding took %d seconds" % (time.time()-stime)
def compute_embeddings(self):
import os
import numpy as np
self.get_source_files()
self.embedded_files_ = []
tdir = os.path.join(self.output_path, 'embeddings')
if not os.path.isdir(tdir):
os.mkdir(tdir)
for i, s in enumerate(self.subjects):
f = os.path.join(tdir , s + '_' + self.output_suffix + '.npz')
if os.path.isfile(f):
print "Embedding already computed for subject %s, skipping" % s
self.embedded_files_.append(f)
continue
self.load_data(i)
self.check_data()
if self.ftype == 'npy_timeseries':
self.calculate_affinity()
self.embed_affinity()
np.savez(f, self.current_res_)
self.embedded_files_.append(f)
if len(self.embedded_files_) == len(self.subjects):
self.embedding_complete_ = True
self.template_files_ = [os.path.join(tdir, s + '_' + self.output_suffix + '.npz') for s in self.template_subjects_]
else:
self.embedding_complete_ = False
def realign_embeddings(self, filelist = None):
from pySTATIS import statis
import numpy as np
import os
if filelist is not None:
self.template_files_ = filelist
self.X_ev_ = []
self.X_em_ = []
print "Getting data for STATIS..."
for i, f in enumerate(self.template_files_):
t = np.load(f)['arr_0'].item()
n = t['vectors'][:,0]
ev = (t['vectors'].T/n).T
self.X_ev_.append(ev[:,1:11])
self.X_em_.append( (ev*t['orig_lambdas'])[:,1:11] )
print "Running STATIS..."
self.statis_ = statis.statis(self.X_ev_, self.subjects_subset, os.path.join(self.output_path,'statis_results.npy'))
def project_template_subjects(self):
"""
Create projections of individual embeddings onto the template.
This function is for subjects who participated in the template creation process.
"""
from pySTATIS import statis
import os
self.projection_path_ = os.path.join(self.output_path, 'projections')
if not os.path.isdir(self.projection_path_):
os.mkdir(self.projection_path_)
statis.project_back(self.X_em_, self.statis_['Q'], self.projection_path_, self.subjects_subset)
def compute_markov_matrix(self, skip_checks=False, overwrite=False):
"""
Slightly modified code originally written by Satrajit Ghosh (satra@mit.edu github.com/satra/mapalign)
"""
import numpy as np
import scipy.sparse as sps
L = self.current_cmat_
alpha = self.diff_alpha
use_sparse = False
if sps.issparse(L):
use_sparse = True
if not skip_checks:
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
if not _graph_is_connected(L):
raise ValueError('Graph is disconnected')
ndim = L.shape[0]
if overwrite:
L_alpha = L
else:
L_alpha = L.copy()
if alpha > 0:
# Step 2
d = np.array(L_alpha.sum(axis=1)).flatten()
d_alpha = np.power(d, -alpha)
if use_sparse:
L_alpha.data *= d_alpha[L_alpha.indices]
L_alpha = sps.csr_matrix(L_alpha.transpose().toarray())
L_alpha.data *= d_alpha[L_alpha.indices]
L_alpha = sps.csr_matrix(L_alpha.transpose().toarray())
else:
L_alpha = d_alpha[:, np.newaxis] * L_alpha
L_alpha = L_alpha * d_alpha[np.newaxis, :]
# Step 3
d_alpha = np.power(np.array(L_alpha.sum(axis=1)).flatten(), -1)
if use_sparse:
L_alpha.data *= d_alpha[L_alpha.indices]
else:
L_alpha = d_alpha[:, np.newaxis] * L_alpha
return L_alpha
def compute_diffusion_map(self, skip_checks=False, overwrite=False):
"""
Slightly modified code originally written by Satrajit Ghosh (satra@mit.edu github.com/satra/mapalign)
Compute the diffusion maps of a symmetric similarity matrix
L : matrix N x N
L is symmetric and L(x, y) >= 0
alpha: float [0, 1]
Setting alpha=1 and the diffusion operator approximates the
Laplace-Beltrami operator. We then recover the Riemannian geometry
of the data set regardless of the distribution of the points. To
describe the long-term behavior of the point distribution of a
system of stochastic differential equations, we can use alpha=0.5
and the resulting Markov chain approximates the Fokker-Planck
diffusion. With alpha=0, it reduces to the classical graph Laplacian
normalization.
n_components: int
The number of diffusion map components to return. Due to the
spectrum decay of the eigenvalues, only a few terms are necessary to
achieve a given relative accuracy in the sum M^t.
diffusion_time: float >= 0
use the diffusion_time (t) step transition matrix M^t
t not only serves as a time parameter, but also has the dual role of
scale parameter. One of the main ideas of diffusion framework is
that running the chain forward in time (taking larger and larger
powers of M) reveals the geometric structure of X at larger and
larger scales (the diffusion process).
t = 0 empirically provides a reasonable balance from a clustering
perspective. Specifically, the notion of a cluster in the data set
is quantified as a region in which the probability of escaping this
region is low (within a certain time t).
skip_checks: bool
Avoid expensive pre-checks on input data. The caller has to make
sure that input data is valid or results will be undefined.
overwrite: bool
Optimize memory usage by re-using input matrix L as scratch space.
References
----------
[1] https://en.wikipedia.org/wiki/Diffusion_map
[2] Coifman, R.R.; S. Lafon. (2006). "Diffusion maps". Applied and
Computational Harmonic Analysis 21: 5-30. doi:10.1016/j.acha.2006.04.006
"""
M = self.compute_markov_matrix(skip_checks, overwrite)
from scipy.sparse.linalg import eigsh, eigs
import numpy as np
ndim = self.current_cmat_.shape[0]
# Step 4
func = eigs
if self.diff_ncomp is not None:
lambdas, vectors = func(M, k=self.diff_ncomp + 1)
else:
lambdas, vectors = func(M, k=max(2, int(np.sqrt(ndim))))
del M
if func == eigsh:
lambdas = lambdas[::-1]
vectors = vectors[:, ::-1]
else:
lambdas = np.real(lambdas)
vectors = np.real(vectors)
lambda_idx = np.argsort(lambdas)[::-1]
lambdas = lambdas[lambda_idx]
vectors = vectors[:, lambda_idx]
# Step 5
psi = vectors/vectors[:, [0]]
olambdas = lambdas.copy()
if self.diff_time == 0:
lambdas = lambdas[1:] / (1 - lambdas[1:])
else:
lambdas = lambdas[1:] ** float(self.diff_time)
lambda_ratio = lambdas/lambdas[0]
threshold = max(0.05, lambda_ratio[-1])
n_components_auto = np.amax(np.nonzero(lambda_ratio > threshold)[0])
n_components_auto = min(n_components_auto, ndim)
if self.diff_ncomp is None:
self.diff_ncomp = n_components_auto
self.current_emb_ = psi[:, 1:(self.diff_ncomp + 1)] * lambdas[:self.diff_ncomp][None, :]
self.current_res_ = dict(lambdas=lambdas, orig_lambdas = olambdas, vectors=vectors,
n_components=self.diff_ncomp, diffusion_time=self.diff_time,
n_components_auto=n_components_auto)
|
{
"content_hash": "7b0e534fe213c4c449385aefd2d23afe",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 130,
"avg_line_length": 36.295392953929536,
"alnum_prop": 0.5748525349062943,
"repo_name": "mfalkiewicz/diffusion_maps",
"id": "acdb25ceaf3e83848a87166608b4666f14b2d115",
"size": "13393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diffusion_embedding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "21865324"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TextpositionsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="textpositionsrc", parent_name="funnelarea", **kwargs
):
super(TextpositionsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "c60bd78c258ee9e0d944ee01b9524ab8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 33.46153846153846,
"alnum_prop": 0.6229885057471264,
"repo_name": "plotly/plotly.py",
"id": "09e27c8241cbb57e0d8bc129a718aaae61b954ef",
"size": "435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnelarea/_textpositionsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import sys
from tabulator import Stream
print('Parse csv format:')
source = 'data/table.csv'
with Stream(source, headers='row1') as stream:
print(stream.headers)
for row in stream:
print(row)
print('\nParse linear tsv format:')
source = 'data/table.tsv'
with Stream(source, headers='row1') as stream:
print(stream.headers)
for row in stream:
print(row)
print('\nParse json with dicts:')
source = 'file://data/table-dicts.json'
with Stream(source) as stream:
print(stream.headers)
for row in stream:
print(row)
print('\nParse json with lists:')
source = 'file://data/table-lists.json'
with Stream(source, headers='row1') as stream:
print(stream.headers)
for row in stream:
print(row)
print('\nParse xls format:')
source = 'data/table.xls'
with Stream(source, headers='row1') as stream:
print(stream.headers)
for row in stream:
print(row)
print('\nParse xlsx format:')
source = 'data/table.xlsx'
with Stream(source, headers='row1') as stream:
print(stream.headers)
for row in stream:
print(row)
# print('\nLoad from stream scheme:')
source = io.open('data/table.csv', mode='rb')
with Stream(source, headers='row1', format='csv') as stream:
print(stream.headers)
for row in stream:
print(row)
print('\nLoad from text scheme:')
source = 'text://id,name\n1,english\n2,中国人\n'
with Stream(source, headers='row1', format='csv') as stream:
print(stream.headers)
for row in stream:
print(row)
print('\nLoad from http scheme:')
source = 'https://raw.githubusercontent.com'
source += '/okfn/tabulator-py/master/data/table.csv'
with Stream(source, headers='row1') as stream:
print(stream.headers)
for row in stream:
print(row)
print('\nUsage of inline lists:')
source = [['id', 'name'], ['1', 'english'], ('2', '中国人')]
with Stream(source, headers='row1') as stream:
print(stream.headers)
for row in stream:
print(row)
print('\nUsage of inline lists (keyed):')
source = [{'id': '1', 'name': 'english'}, {'id': '2', 'name': '中国人'}]
with Stream(source) as stream:
print(stream.headers)
for row in stream:
print(row)
print('\nIter with keyed rows representation:')
source = [{'id': '1', 'name': 'english'}, {'id': '2', 'name': '中国人'}]
with Stream(source, headers=1) as stream:
print(stream.headers)
for row in stream.iter(keyed=True):
print(row)
print('\nTable reset and read limit:')
source = 'data/table.csv'
with Stream(source, headers='row1') as stream:
print(stream.headers)
print(stream.read(limit=1))
stream.reset()
print(stream.read(limit=1))
print('\nLate headers (on a second row):')
source = 'data/special/late_headers.csv'
with Stream(source, headers='row2') as stream:
print(stream.headers)
for row in stream:
print(row)
print('\nSpaces in headers:')
source = 'https://raw.githubusercontent.com/datasets/gdp/master/data/gdp.csv'
with Stream(source, headers='row1') as stream:
print(stream.headers)
for row in stream.read(limit=5):
print(row)
|
{
"content_hash": "a5268bc2b87f299e8593c6d0b57cf333",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 77,
"avg_line_length": 25.138461538461538,
"alnum_prop": 0.6612607099143207,
"repo_name": "frictionlessdata/tabulator-py",
"id": "21209f5b5386043fe8b4bb4fa84fa25ff2294497",
"size": "3316",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "examples/stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "71830"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "176300"
}
],
"symlink_target": ""
}
|
__author__ = 'ed adkins'
import os
import datetime
import argparse
import logging
import sys
from connect import Devicemanager
from create_output_file import write_file, create_dir
FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(format=FORMAT, filename=".logs/lab_manager.log", level=logging.INFO)
LOGGER = logging.getLogger('labrat.args_capture_all_cfg')
LOGGER_OUT = logging.StreamHandler(sys.stdout)
LOGGER_OUT.setFormatter(logging.Formatter(FORMAT))
LOGGER.addHandler(LOGGER_OUT)
def user_args():
""" Get user commands and return a dictionary with the user input.
Returns:
A dict containing the user input including username, password, host file and command
"""
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--INPUT", help="name of hosts JSON file", required=True)
parser.add_argument("-u", "--USERNAME", help="username of hosts", required=True)
parser.add_argument("-p", "--PASSWORD", help="password of hosts", required=True)
args = parser.parse_args()
params = {key : value for key, value in args._get_kwargs()}
return params
def get_config(un_pw_hf_cmd):
""" Creates a file with the output from the device and the device IP address as the filename.
Args:
un_pw_hf_cmd: A list containing the username, password, host file and command.
"""
host_file_path = 'host_files/' + un_pw_hf_cmd[2]
cfg_backup = Devicemanager(un_pw_hf_cmd[0], un_pw_hf_cmd[1], host_file_path)
for device, response in cfg_backup.get_command(un_pw_hf_cmd[3]).items():
formatted_output = map(lambda line: line.rstrip('\n'), response)
create_dir(device, formatted_output)
LOGGER.info("Config capture for {0} complete...".format(device))
if __name__ == "__main__":
get_config([user_args()['USERNAME'],
user_args()['PASSWORD'],
user_args()['INPUT'],
'show running-config'])
LOGGER.info("Job complete!..")
|
{
"content_hash": "a08fecdf7186ec68fe1b4dccb8c0af54",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 97,
"avg_line_length": 37.716981132075475,
"alnum_prop": 0.6688344172086043,
"repo_name": "ejmadkins/labRat",
"id": "e80674ff4a224afca65b4d9a23cdc63d93f1e4d3",
"size": "2021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "args_capture_all_cfg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28253"
}
],
"symlink_target": ""
}
|
import numpy as np
from moviepy.decorators import audio_video_fx, convert_parameter_to_seconds
def _mono_factor_getter():
return lambda t, duration: np.minimum(t / duration, 1)
def _stereo_factor_getter(nchannels):
def getter(t, duration):
factor = np.minimum(t / duration, 1)
return np.array([factor for _ in range(nchannels)]).T
return getter
@audio_video_fx
@convert_parameter_to_seconds(["duration"])
def audio_fadein(clip, duration):
"""Return an audio (or video) clip that is first mute, then the
sound arrives progressively over ``duration`` seconds.
Parameters
----------
duration : float
How long does it take for the sound to return to its normal level.
Examples
--------
>>> clip = VideoFileClip("media/chaplin.mp4")
>>> clip.fx(audio_fadein, "00:00:06")
"""
get_factor = (
_mono_factor_getter()
if clip.nchannels == 1
else _stereo_factor_getter(clip.nchannels)
)
return clip.transform(
lambda get_frame, t: get_factor(t, duration) * get_frame(t),
keep_duration=True,
)
|
{
"content_hash": "6523eff70868d53ff30e41f344c365b2",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 75,
"avg_line_length": 24.977777777777778,
"alnum_prop": 0.6352313167259787,
"repo_name": "Zulko/moviepy",
"id": "e62c6412ede7507b5ba6bcb47450cca15ff43514",
"size": "1124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moviepy/audio/fx/audio_fadein.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "622"
},
{
"name": "Python",
"bytes": "536587"
}
],
"symlink_target": ""
}
|
import __main__
__main__.pymol_argv = ['pymol','-qc']
#__main__.pymol_argv = ['pymol','']
import sys,time,os
import pymol
pymol.finish_launching()
sys.path.append("/home/scratch/software/Pymol-script-repo-master")
import my_elbow_angle_ab
my_file = sys.argv[1]
chain1 = sys.argv[2]
chain2 = sys.argv[3]
limit_h = sys.argv[4]
limit_l = sys.argv[5]
end = my_file.find(".pdb")
file_name = my_file[0:end]
pymol.cmd.load(my_file,file_name)
pymol.cmd.split_states(file_name)
states = pymol.cmd.get_object_list()
# skip first entry (first state is identical)
iterstates = iter(states)
next(iterstates)
my_out_name = file_name + "_elbows.dat"
my_out = open(my_out_name, "w")
for state in iterstates:
angle = my_elbow_angle_ab.elbow_angle(state,chain1,chain2,limit_h,limit_l)
my_out.write("%i\n" % angle)
my_out.close()
pymol.cmd.quit()
|
{
"content_hash": "c136f65cb1e538235e69171c08aa9197",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 22.83783783783784,
"alnum_prop": 0.6887573964497041,
"repo_name": "demharters/git_scripts",
"id": "8f58cd2b871a83cc681577ecd05483d36c8f29f3",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calc_elbow_traj_ab.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "245659"
},
{
"name": "Python",
"bytes": "227443"
},
{
"name": "Shell",
"bytes": "1760"
}
],
"symlink_target": ""
}
|
from enum import Enum
from Utils import Status
class ExtensionErrorCodeEnum(Enum):
success = 1
ExtensionTempTerminalState = 4
error_parameter = 11
error_12 = 12
error_wrong_time = 13
error_same_taskid = 14
error_http_failure = 15
error_upload_status_blob = 16
error = 2
FailedRetryableSnapshotFailedNoNetwork = 76
FailedRetryableSnapshotFailedRestrictedNetwork = 761
FailedRetryableFsFreezeFailed = 201
class ExtensionErrorCodeHelper:
ExtensionErrorCodeDict = {
ExtensionErrorCodeEnum.success : Status.ExtVmHealthStateEnum.green,
ExtensionErrorCodeEnum.ExtensionTempTerminalState : Status.ExtVmHealthStateEnum.green,
ExtensionErrorCodeEnum.error : Status.ExtVmHealthStateEnum.green,
ExtensionErrorCodeEnum.error_12 : Status.ExtVmHealthStateEnum.green,
ExtensionErrorCodeEnum.FailedRetryableFsFreezeFailed : Status.ExtVmHealthStateEnum.yellow,
ExtensionErrorCodeEnum.error_parameter : Status.ExtVmHealthStateEnum.yellow,
ExtensionErrorCodeEnum.error_http_failure : Status.ExtVmHealthStateEnum.red,
ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedRestrictedNetwork : Status.ExtVmHealthStateEnum.red,
ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedNoNetwork : Status.ExtVmHealthStateEnum.red
}
@staticmethod
def StatusCodeStringBuilder(ExtErrorCodeEnum):
return " StatusCode." + ExtErrorCodeEnum.name + ","
|
{
"content_hash": "1c43693f6ad756858a9960137040f983",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 116,
"avg_line_length": 40.86486486486486,
"alnum_prop": 0.7493386243386243,
"repo_name": "jasonzio/azure-linux-extensions",
"id": "43cbfa750d744b605bc0f647cb5edc76ea2fcdaa",
"size": "1512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VMBackup/main/ExtensionErrorCodeHelper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39294"
},
{
"name": "JavaScript",
"bytes": "22883"
},
{
"name": "Makefile",
"bytes": "3751"
},
{
"name": "PowerShell",
"bytes": "23680"
},
{
"name": "Python",
"bytes": "3695846"
},
{
"name": "Shell",
"bytes": "21547"
}
],
"symlink_target": ""
}
|
from twisted.internet import reactor, protocol
from Config import Config
class Server(protocol.Protocol):
def connectionMade(self):
print('Unity connected')
self.transport.write(b"send from server")
def dataReceived(self, data):
print(data.decode("utf-8"))
def connectionLost(self, reason):
print('Unity disconnected')
class ServerFactory(protocol.ServerFactory):
protocol = Server
if __name__ == '__main__':
config = Config('config.ini');
config = config.get()
factory = ServerFactory()
reactor.listenTCP(int(config['port']), factory)
print('Server start...')
reactor.run()
|
{
"content_hash": "2cd6d9880586663f3a5f809c1cec723c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 51,
"avg_line_length": 21.933333333333334,
"alnum_prop": 0.6595744680851063,
"repo_name": "Dirilean/Road",
"id": "85a52eaa7ead6dc0b71634182a81f5a0ac575b3f",
"size": "706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/Server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "23077"
},
{
"name": "Python",
"bytes": "1289"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import errno
import gc
import os
import pprint
import socket
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg
from ceilometer.openstack.common._i18n import _LI
from ceilometer.openstack.common import log as logging
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port
|
{
"content_hash": "614a082d2b86d0c640d89387a782518c",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 77,
"avg_line_length": 31.496124031007753,
"alnum_prop": 0.6300762983017475,
"repo_name": "m1093782566/openstack_org_ceilometer",
"id": "bc544762b3ae57de059d16d572e2d40f1e4bcd1e",
"size": "4774",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ceilometer/openstack/common/eventlet_backdoor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2657375"
},
{
"name": "Shell",
"bytes": "3204"
}
],
"symlink_target": ""
}
|
import sqlite3
import os, sys
# Import of my custom classes
sys.path.append(os.path.abspath("./classes/"))
from config_loader import *
CONFIGURATION_FILE = "./config.xml"
def get_configuration():
conf = ConfigLoader(CONFIGURATION_FILE)
conf.loadFile()
configuration = conf.getConfiguration()
configuration.filename = CONFIGURATION_FILE
return configuration
if __name__ == '__main__':
# Load configuration from config.xml
configuration = get_configuration()
if configuration == None:
sys.exit(1)
connect = sqlite3.connect(configuration.database_filename)
cursor = connect.cursor()
cursor.execute("DROP TABLE IF EXISTS thermometer")
cursor.execute("DROP TABLE IF EXISTS sensor")
cursor.execute("DROP TABLE IF EXISTS measurement")
cursor.execute("CREATE TABLE thermometer (id INTEGER PRIMARY KEY, thermometer_id INTEGER, hostname TEXT, \
port TEXT, title TEXT, description TEXT, latitude TEXT, longitude TEXT)")
cursor.execute("CREATE TABLE sensor (id INTEGER PRIMARY KEY, sensor_id INTEGER, description TEXT, \
thermometer_id INTEGER, FOREIGN KEY (thermometer_id) REFERENCES thermometer(id))")
cursor.execute("CREATE TABLE measurement (id INTEGER PRIMARY KEY, thermometer_id INTEGER, \
sensor_id INTEGER, celsius TEXT, fahrenheit TEXT, humidity TEXT, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, \
FOREIGN KEY (thermometer_id) REFERENCES thermometer(id), FOREIGN KEY (sensor_id) REFERENCES sensor(id))")
connect.commit()
connect.close()
|
{
"content_hash": "e39cd7e55a35f838e9bb161cbdaf6dd1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 113,
"avg_line_length": 36.38095238095238,
"alnum_prop": 0.737565445026178,
"repo_name": "marekhakala/temperaturehub",
"id": "23bc1ec81e59c6ee3417cb9c8b8f0870e0a5f598",
"size": "2161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Temperature_HUB_server/init_db.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "125"
},
{
"name": "Python",
"bytes": "47302"
},
{
"name": "XSLT",
"bytes": "6193"
}
],
"symlink_target": ""
}
|
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.dialogflow_v2.types import fulfillment
from google.cloud.dialogflow_v2.types import fulfillment as gcd_fulfillment
from google.cloud.location import locations_pb2 # type: ignore
from google.longrunning import operations_pb2
from .base import FulfillmentsTransport, DEFAULT_CLIENT_INFO
class FulfillmentsGrpcTransport(FulfillmentsTransport):
"""gRPC backend transport for Fulfillments.
Service for managing
[Fulfillments][google.cloud.dialogflow.v2.Fulfillment].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def get_fulfillment(
self,
) -> Callable[[fulfillment.GetFulfillmentRequest], fulfillment.Fulfillment]:
r"""Return a callable for the get fulfillment method over gRPC.
Retrieves the fulfillment.
Returns:
Callable[[~.GetFulfillmentRequest],
~.Fulfillment]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_fulfillment" not in self._stubs:
self._stubs["get_fulfillment"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.Fulfillments/GetFulfillment",
request_serializer=fulfillment.GetFulfillmentRequest.serialize,
response_deserializer=fulfillment.Fulfillment.deserialize,
)
return self._stubs["get_fulfillment"]
@property
def update_fulfillment(
self,
) -> Callable[
[gcd_fulfillment.UpdateFulfillmentRequest], gcd_fulfillment.Fulfillment
]:
r"""Return a callable for the update fulfillment method over gRPC.
Updates the fulfillment.
Returns:
Callable[[~.UpdateFulfillmentRequest],
~.Fulfillment]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_fulfillment" not in self._stubs:
self._stubs["update_fulfillment"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.Fulfillments/UpdateFulfillment",
request_serializer=gcd_fulfillment.UpdateFulfillmentRequest.serialize,
response_deserializer=gcd_fulfillment.Fulfillment.deserialize,
)
return self._stubs["update_fulfillment"]
def close(self):
self.grpc_channel.close()
@property
def cancel_operation(
self,
) -> Callable[[operations_pb2.CancelOperationRequest], None]:
r"""Return a callable for the cancel_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_operation" not in self._stubs:
self._stubs["cancel_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/CancelOperation",
request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["cancel_operation"]
@property
def get_operation(
self,
) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
r"""Return a callable for the get_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
self._stubs["get_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["get_operation"]
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
]:
r"""Return a callable for the list_operations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
self._stubs["list_operations"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
)
return self._stubs["list_operations"]
@property
def list_locations(
self,
) -> Callable[
[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse
]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_locations" not in self._stubs:
self._stubs["list_locations"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/ListLocations",
request_serializer=locations_pb2.ListLocationsRequest.SerializeToString,
response_deserializer=locations_pb2.ListLocationsResponse.FromString,
)
return self._stubs["list_locations"]
@property
def get_location(
self,
) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_location" not in self._stubs:
self._stubs["get_location"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/GetLocation",
request_serializer=locations_pb2.GetLocationRequest.SerializeToString,
response_deserializer=locations_pb2.Location.FromString,
)
return self._stubs["get_location"]
@property
def kind(self) -> str:
return "grpc"
__all__ = ("FulfillmentsGrpcTransport",)
|
{
"content_hash": "ade532aef1fee00af42370e8219725ac",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 91,
"avg_line_length": 44.61827956989247,
"alnum_prop": 0.6200747077961201,
"repo_name": "googleapis/python-dialogflow",
"id": "70eed03825651dcf3d7562859862e25e591f2a11",
"size": "17198",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/dialogflow_v2/services/fulfillments/transports/grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "11184005"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
requirements_path = os.path.join(os.path.dirname(__file__), 'requirements.txt')
with open(requirements_path, "r") as f:
REQUIREMENTS = [x.strip() for x in f.readlines()]
setup(
name='django-hbs-makemessages',
version='0.9.6',
license='MIT',
description='Library providing makemessages for Handlebars.js templates',
long_description=README,
url='https://github.com/rafalp/django-hbs-makemessages',
author=u'Rafał Pitoń',
author_email='kontakt@rpiton.com',
install_requires=REQUIREMENTS,
py_modules=['djangohbs', 'django-admin-hbs'],
scripts=[
'django-admin-hbs.py'
],
test_suite="test.main",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
{
"content_hash": "292358ab18fd4ad75b4641549a8638bf",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 35.48717948717949,
"alnum_prop": 0.6322254335260116,
"repo_name": "rafalp/django-hbs-makemessages",
"id": "24b8bb55ba8de0df9e3afd7f620aaa8954bbe242",
"size": "1409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1112"
},
{
"name": "Python",
"bytes": "21410"
}
],
"symlink_target": ""
}
|
"""Run a web flow for oauth2.
"""
import os
import socket
import webbrowser
from oauth2client import client
from oauth2client import tools
from googlecloudsdk.core import log
from googlecloudsdk.core import config
try:
# pylint:disable=g-import-not-at-top
from urlparse import parse_qsl
except ImportError:
# pylint:disable=g-import-not-at-top
from cgi import parse_qsl
class Error(Exception):
"""Exceptions for the flow module."""
class AuthRequestRejectedException(Error):
"""Exception for when the authentication request was rejected."""
class AuthRequestFailedException(Error):
"""Exception for when the authentication request was rejected."""
class ClientRedirectHandler(tools.ClientRedirectHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
# pylint:disable=invalid-name, This method is overriden from the base class.
def do_GET(self):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
query = self.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
self.server.query_params = query
if 'code' in query:
page = 'oauth2_landing.html'
else:
page = 'oauth2_landing_error.html'
html_path = os.path.join(
config.GoogleCloudSDKPackageRoot(), 'core', 'credentials', page)
with open(html_path) as html_file:
self.wfile.write(html_file.read())
def Run(flow, launch_browser=True, http=None,
auth_host_name='localhost', auth_host_port_start=8085):
"""Run a web flow to get oauth2 credentials.
Args:
flow: oauth2client.OAuth2WebServerFlow, A flow that is ready to run.
launch_browser: bool, If False, give the user a URL to copy into
a browser. Requires that they paste the refresh token back into the
terminal. If True, opens a web browser in a new window.
http: httplib2.Http, The http transport to use for authentication.
auth_host_name: str, Host name for the redirect server.
auth_host_port_start: int, First port to try for serving the redirect. If
this port is taken, it will keep trying incrementing ports until 100
have been tried, then fail.
Returns:
oauth2client.Credential, A ready-to-go credential that has already been
put in the storage.
Raises:
AuthRequestRejectedException: If the request was rejected.
AuthRequestFailedException: If the request fails.
"""
if launch_browser:
success = False
port_number = auth_host_port_start
while True:
try:
httpd = tools.ClientRedirectServer((auth_host_name, port_number),
ClientRedirectHandler)
except socket.error, e:
if port_number > auth_host_port_start + 100:
success = False
break
port_number += 1
else:
success = True
break
if success:
flow.redirect_uri = ('http://%s:%s/' % (auth_host_name, port_number))
authorize_url = flow.step1_get_authorize_url()
webbrowser.open(authorize_url, new=1, autoraise=True)
message = 'Your browser has been opened to visit:'
log.err.Print('{message}\n\n {url}\n\n'.format(
message=message,
url=authorize_url,
))
httpd.handle_request()
if 'error' in httpd.query_params:
raise AuthRequestRejectedException('Unable to authenticate.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
raise AuthRequestFailedException(
'Failed to find "code" in the query parameters of the redirect.')
else:
message = ('Failed to start a local webserver listening on any port '
'between {start_port} and {end_port}. Please check your '
'firewall settings or locally running programs that may be '
'blocking or using those ports.')
log.warn(message.format(
start_port=auth_host_port_start,
end_port=port_number,
))
launch_browser = False
log.warn('Defaulting to URL copy/paste mode.')
if not launch_browser:
flow.redirect_uri = client.OOB_CALLBACK_URN
authorize_url = flow.step1_get_authorize_url()
message = 'Go to the following link in your browser:'
log.err.Print('{message}\n\n {url}\n\n'.format(
message=message,
url=authorize_url,
))
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError, e:
raise AuthRequestFailedException(e)
return credential
|
{
"content_hash": "62f19b6d5f0a2874259e870a509e20d8",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 78,
"avg_line_length": 31.063291139240505,
"alnum_prop": 0.6666666666666666,
"repo_name": "Plantain/sms-mailinglist",
"id": "98a15ec5ed5695cca4cc40ccf0af649e98a62336",
"size": "4959",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/googlecloudsdk/core/credentials/flow.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Erlang",
"bytes": "1479"
},
{
"name": "Perl",
"bytes": "6919"
},
{
"name": "Python",
"bytes": "4968506"
},
{
"name": "R",
"bytes": "274"
},
{
"name": "Shell",
"bytes": "1540"
}
],
"symlink_target": ""
}
|
cfg = {
"slaves": ["192.168.1.2#ma"],
"source": "/home/bob/*"
}
cmd = "rsync -av {source} simple@{ip}::{module} --password-file=/etc/rsync.secrets"
from pyinotify import WatchManager, Notifier, ProcessEvent
from pyinotify import IN_DELETE, IN_CLOSE_WRITE
import signal
import threading
import sys
import subprocess
class MyProcessEvent(ProcessEvent):
def process_IN_DELETE(self, event):
self.do(event)
def process_IN_CLOSE_WRITE(self, event):
self.do(event)
def do(self, event):
def invoke(cmd):
subprocess.Popen(cmd, shell=True).communicate()
source = cfg.get("source")
for slaves in cfg.get("slaves"):
ip, module = slaves.split("#")
threading.Thread(target=invoke, args=(cmd.format(source=source, ip=ip, module=module),)).start()
class MyNotify(object):
def __init__(self, path):
self.wm = WatchManager()
mask = IN_DELETE|IN_CLOSE_WRITE
self.notifier = Notifier(self.wm, MyProcessEvent())
self.wm.add_watch(path, mask, rec=True)
def run(self):
signal.signal(signal.SIGHUP, lambda: sys.exit(0))
while True:
try:
self.notifier.process_events()
if self.notifier.check_events():
self.notifier.read_events()
except Exception:
self.notifier.stop()
break
m = MyNotify('/home/bob')
m.run()
|
{
"content_hash": "f320b5c9c1a990ea62dc12e974af1261",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 108,
"avg_line_length": 32.17777777777778,
"alnum_prop": 0.6022099447513812,
"repo_name": "nagexiucai/shityun",
"id": "3a133f2687c3b2677d630f0add364233c3f0d3b9",
"size": "1787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rsyncfs/sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24477"
},
{
"name": "HTML",
"bytes": "25156"
},
{
"name": "JavaScript",
"bytes": "827410"
},
{
"name": "PHP",
"bytes": "25"
},
{
"name": "Python",
"bytes": "13560"
},
{
"name": "Shell",
"bytes": "479"
}
],
"symlink_target": ""
}
|
import datetime
import unittest
from base import BaseTestCase
from project.server.models import User
class TestCustomerBlueprint(BaseTestCase):
def test_customer_route_requires_login(self):
# Ensure logout route requres logged in user.
response = self.client.get('/customer/items', follow_redirects=True)
self.assertIn(b'Please login', response.data)
def test_customer_login_behaves_correctly(self):
# Ensure logout behaves correctly - regarding the session.
with self.client:
self.client.post(
'/login',
data=dict(email="ad@min.com", password="admin_user"),
follow_redirects=True
)
response = self.client.get('/customer/items', follow_redirects=True)
self.assertIn(b'Customers', response.data)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "cada05a547ef1be805194809457fc38b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 33.03703703703704,
"alnum_prop": 0.6423766816143498,
"repo_name": "slobodz/TeamServices",
"id": "66a992dc7fe66ab94f737269d3acbdb44ec2fc89",
"size": "934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/tests/test_customer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "317"
},
{
"name": "Dockerfile",
"bytes": "442"
},
{
"name": "HTML",
"bytes": "41940"
},
{
"name": "JavaScript",
"bytes": "3408"
},
{
"name": "Python",
"bytes": "157691"
},
{
"name": "Shell",
"bytes": "2912"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import # Avoid importing `importlib` from this package.
import decimal
import datetime
from importlib import import_module
import unicodedata
from django.conf import settings
from django.utils import dateformat, numberformat, datetime_safe
from django.utils.encoding import force_str
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.translation import get_language, to_locale, check_for_language
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
ISO_INPUT_FORMATS = {
'DATE_INPUT_FORMATS': ('%Y-%m-%d',),
'TIME_INPUT_FORMATS': ('%H:%M:%S', '%H:%M:%S.%f', '%H:%M'),
'DATETIME_INPUT_FORMATS': (
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M',
'%Y-%m-%d'
),
}
def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang, format_module_path=None):
"""
Does the heavy lifting of finding format modules.
"""
if not check_for_language(lang):
return
if format_module_path is None:
format_module_path = settings.FORMAT_MODULE_PATH
format_locations = []
if format_module_path:
if isinstance(format_module_path, six.string_types):
format_module_path = [format_module_path]
for path in format_module_path:
format_locations.append(path + '.%s')
format_locations.append('django.conf.locale.%s')
locale = to_locale(lang)
locales = [locale]
if '_' in locale:
locales.append(locale.split('_')[0])
for location in format_locations:
for loc in locales:
try:
yield import_module('%s.formats' % (location % loc))
except ImportError:
pass
def get_format_modules(lang=None, reverse=False):
"""
Returns a list of the format modules found
"""
if lang is None:
lang = get_language()
modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH)))
if reverse:
return list(reversed(modules))
return modules
def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, returns the format for the current
language (locale), defaults to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
format_type = force_str(format_type)
if use_l10n or (use_l10n is None and settings.USE_L10N):
if lang is None:
lang = get_language()
cache_key = (format_type, lang)
try:
cached = _format_cache[cache_key]
if cached is not None:
return cached
else:
# Return the general setting by default
return getattr(settings, format_type)
except KeyError:
for module in get_format_modules(lang):
try:
val = getattr(module, format_type)
for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
if iso_input not in val:
if isinstance(val, tuple):
val = list(val)
val.append(iso_input)
_format_cache[cache_key] = val
return val
except AttributeError:
pass
_format_cache[cache_key] = None
return getattr(settings, format_type)
get_format_lazy = lazy(get_format, six.text_type, list, tuple)
def date_format(value, format=None, use_l10n=None):
"""
Formats a datetime.date or datetime.datetime object using a
localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
def time_format(value, format=None, use_l10n=None):
"""
Formats a datetime.time object using a localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
"""
Formats a numeric value using localization settings
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n or (use_l10n is None and settings.USE_L10N):
lang = get_language()
else:
lang = None
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
decimal_pos,
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
force_grouping=force_grouping
)
def localize(value, use_l10n=None):
"""
Checks if value is a localizable type (date, number...) and returns it
formatted as a string using current locale format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, bool):
return mark_safe(six.text_type(value))
elif isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
else:
return value
def localize_input(value, default=None):
"""
Checks if an input value is a localizable type and returns it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value)
elif isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = force_str(default or get_format('DATETIME_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = force_str(default or get_format('DATE_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.time):
format = force_str(default or get_format('TIME_INPUT_FORMATS')[0])
return value.strftime(format)
return value
def sanitize_separators(value):
"""
Sanitizes a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if settings.USE_L10N and isinstance(value, six.string_types):
parts = []
decimal_separator = get_format('DECIMAL_SEPARATOR')
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
thousand_sep = get_format('THOUSAND_SEPARATOR')
for replacement in set([
thousand_sep, unicodedata.normalize('NFKD', thousand_sep)]):
value = value.replace(replacement, '')
parts.append(value)
value = '.'.join(reversed(parts))
return value
|
{
"content_hash": "1ccbf7806daf8719540f6f7e1b846153",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 114,
"avg_line_length": 36.03056768558952,
"alnum_prop": 0.6381044721851896,
"repo_name": "YYWen0o0/python-frame-django",
"id": "521ab5c7634549dd568dddef5af8b52a613deac8",
"size": "8251",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "django/utils/formats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53353"
},
{
"name": "JavaScript",
"bytes": "102434"
},
{
"name": "Python",
"bytes": "9808771"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
import logging
import pika
import json
import time
import Queue
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.ERROR)
class AsyncConsumer(object):
"""This is an example consumer that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
ACK_INTERVAL = 1
def __init__(self, amqp_url, queue, exchange, routing_key):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str amqp_url: The AMQP url to connect with
"""
self._connection = None
self._channel = None
self._closing = False
self._consumer_tag = None
self._url = amqp_url
self._queue = queue
self._exchange = exchange
self._routing_key = routing_key
self._exchange_type = 'topic'
self.msg_queue = Queue.Queue()
self.ack_queue = Queue.Queue()
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
LOGGER.info('Connecting to %s', self._url)
return pika.SelectConnection(pika.URLParameters(self._url),
self.on_connection_open,
stop_ioloop_on_close=False)
def close_connection(self):
"""This method closes the connection to RabbitMQ."""
LOGGER.info('Closing connection')
self._connection.close()
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
LOGGER.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
self._channel = None
if self._closing:
self._connection.ioloop.stop()
else:
LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(5, self.reconnect)
def on_connection_open(self, unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
LOGGER.info('Connection opened')
self.add_on_connection_close_callback()
self.open_channel()
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start()
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
LOGGER.warning('Channel %i was closed: (%s) %s',
channel, reply_code, reply_text)
self._connection.close()
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
channel.basic_qos(prefetch_count=2000)
self.setup_exchange(self._exchange)
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
LOGGER.info('Declaring exchange %s', exchange_name)
self._channel.exchange_declare(self.on_exchange_declareok,
exchange_name,
self._exchange_type, durable=True, auto_delete=False)
def on_exchange_declareok(self, unused_frame):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
"""
LOGGER.info('Exchange declared')
self.setup_queue(self._queue)
def setup_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
LOGGER.info('Declaring queue %s', queue_name)
self._channel.queue_declare(self.on_queue_declareok, queue_name, durable=True, auto_delete=False)
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
LOGGER.info('Binding %s to %s with %s',
self._exchange, self._queue, self._routing_key)
self._channel.queue_bind(self.on_bindok, self._queue,
self._exchange, self._routing_key)
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
LOGGER.info('Adding consumer cancellation callback')
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
LOGGER.info('Consumer was cancelled remotely, shutting down: %r',
method_frame)
if self._channel:
self._channel.close()
def acknowledge_message(self, delivery_tag):
"""Acknowledge the message delivery from RabbitMQ by sending a
Basic.Ack RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
"""
LOGGER.info('Acknowledging message %s', delivery_tag)
self._channel.basic_ack(delivery_tag)
def on_message(self, unused_channel, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel unused_channel: The channel object
:param pika.Spec.Basic.Deliver: basic_deliver method
:param pika.Spec.BasicProperties: properties
:param str|unicode body: The message body
"""
LOGGER.info('Received message # %s from %s: %s',
basic_deliver.delivery_tag, properties.app_id, body)
data = json.loads(body)
headers = properties.headers or {}
headers['routing_key'] = basic_deliver.routing_key
self.msg_queue.put({'delivery_tag' : basic_deliver.delivery_tag, 'body' : body, 'headers': headers})
#self.acknowledge_message(basic_deliver.delivery_tag)
def on_cancelok(self, unused_frame):
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
:param pika.frame.Method unused_frame: The Basic.CancelOk frame
"""
LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer')
self.close_channel()
def stop_consuming(self):
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
if self._channel:
LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ')
self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
def start_consuming(self):
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
LOGGER.info('Issuing consumer related RPC commands')
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(self.on_message,
self._queue, )
self.schedule_ack()
def schedule_ack(self):
if self._closing:
return
LOGGER.info('Scheduling next message for %0.1f seconds',
self.ACK_INTERVAL)
self._connection.add_timeout(self.ACK_INTERVAL,
self.do_ack)
def do_ack(self):
try:
ack = self.ack_queue.get(False)
while ack is not None:
self.acknowledge_message(ack)
ack = self.ack_queue.get(False)
except Queue.Empty:
pass
self.schedule_ack()
def on_bindok(self, unused_frame):
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
:param pika.frame.Method unused_frame: The Queue.BindOk response frame
"""
LOGGER.info('Queue bound')
self.start_consuming()
def close_channel(self):
"""Call to close the channel with RabbitMQ cleanly by issuing the
Channel.Close RPC command.
"""
LOGGER.info('Closing the channel')
self._channel.close()
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
LOGGER.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def run(self):
"""Run the example consumer by connecting to RabbitMQ and then
starting the IOLoop to block and allow the SelectConnection to operate.
"""
self._connection = self.connect()
self._connection.ioloop.start()
def stop(self):
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
will be invoked by pika, which will then closing the channel and
connection. The IOLoop is started again because this method is invoked
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
exception stops the IOLoop which needs to be running for pika to
communicate with RabbitMQ. All of the commands issued prior to starting
the IOLoop will be buffered but not processed.
"""
LOGGER.info('Stopping')
self._closing = True
self.stop_consuming()
self._connection.ioloop.start()
LOGGER.info('Stopped')
def start_consuming(consumer):
consumer.run()
def main():
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
example = AsyncConsumer('amqp://guest:guest@localhost:5672/%2F','quotation_daily', 'auction', '#')
import threading
thread = threading.Thread(target = start_consuming, args = [example])
try:
thread.start()
count = 0
while True:
#print "%s/%s" % (count, example.msg_queue.qsize())
item = example.msg_queue.get()
example.ack_queue.put(item['delivery_tag'])
count += 1
except KeyboardInterrupt:
example.stop()
if __name__ == '__main__':
main()
|
{
"content_hash": "16057f343dceff3c7beed867ad280f35",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 108,
"avg_line_length": 38.208955223880594,
"alnum_prop": 0.6359375,
"repo_name": "kevenli/Flowy",
"id": "b32e02129e7eb2d951d4ebf0d6d2195781371fe8",
"size": "15360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtime/AsyncConsumer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "640"
},
{
"name": "Java",
"bytes": "51845"
},
{
"name": "Python",
"bytes": "544911"
},
{
"name": "Shell",
"bytes": "2080"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import logging
import numpy as np
import tempfile
import copy
import gomill
from gomill import common, boards, sgf, sgf_moves, gtp_states
import utils
import analyze_board
"""
Basic Player / Bot objects;
Player should be gomill compatible envelope which actually generates
moves, resigns, ..
Bot should be the object that actually does the core work, e.g. computing
Move probability, ..
"""
class Player(object):
def __init__(self):
self.handlers = { 'name' : self.handle_name,
'quit' : self.handle_quit }
self.name = None
def genmove(self, game_state, player):
"""
game_state is gomill.gtp_states.Game_state
:returns: gomill.Move_generator_result
"""
raise NotImplementedError
def handle_name(self, args):
if self.name is None:
return self.__class__.__name__
return self.name
def handle_quit(self, args):
pass
def get_handlers(self):
return self.handlers
def __str__(self):
return "<%s>"%self.handle_name([])
class DistWrappingMaxPlayer(Player):
"""
A simple wrapping bot which chooses next move to be the one with the biggest (therefore the name)
probability. The probabilities are computed by the wrapped bot's gen_probdist().
"""
def __init__(self, bot):
super(DistWrappingMaxPlayer, self).__init__()
self.bot = bot
self.handlers['ex-dist'] = self.handle_ex_dist
self.handlers['move_probabilities'] = self.handle_move_probabilities
self.move_num = 0
def genmove(self, game_state, player):
self.move_num += 1
dist = self.bot.gen_probdist(game_state, player)
result = gtp_states.Move_generator_result()
if dist is not None:
move = np.unravel_index(np.argmax(dist), dist.shape)
result.move = move
logging.debug("%s valid moves\n%s"%(self,
utils.dist_stats(dist)))
logging.debug("%s move %d: playing %s"%(self,
self.move_num,
gomill.common.format_vertex(move)))
else:
result.pass_move = True
logging.debug("%s move %d: playing pass"%(self, self.move_num))
return result
def handle_quit(self, args):
self.bot.close()
def handle_move_probabilities(self, args):
return self.bot.move_probabilities()
def handle_ex_dist(self, args):
top = 3
if args:
try:
top = gomill.gtp_engine.interpret_int(args[0])
except IndexError:
gtp_engine.report_bad_arguments()
return self.bot.dist_stats(top)
class DistWrappingSamplingPlayer(Player):
"""
A simple wrapping bot which randomly samples next move based on the moves' probability
distribution, computed by the wrapped bot's gen_probdist().
Never passes.
"""
def __init__(self, bot):
super(DistWrappingSamplingPlayer, self).__init__()
self.bot = bot
def genmove(self, game_state, player):
dist = self.bot.gen_probdist(game_state, player)
result = gtp_states.Move_generator_result()
if dist is not None:
# choose an intersection with probability given by the dist
coord = np.random.choice((game_state.board.side ** 2), 1, p=dist.ravel())[0]
move = (coord / game_state.board.side, coord % game_state.board.side)
result.move = move
else:
result.pass_move = True
return result
def handle_quit(self, args):
self.bot.close()
class RandomPlayer(Player):
def genmove(self, game_state, player):
result = gtp_states.Move_generator_result()
# pass
if game_state.move_history and not game_state.move_history[-1].move:
result.pass_move = True
return result
else:
for i in xrange(10):
row, col = np.random.choice(game_state.board.side, 2)
## TODO this might be incorrect move
# but nobody will use the RandomPlayer anyway
if not game_state.board.get(row, col):
result.move = (row, col)
return result
result.resign = True
return result
class WrappingGnuGoPlayer(Player):
def __init__(self, player, passing=True, resigning=False):
super(WrappingGnuGoPlayer, self).__init__()
self.player = player
self.passing = passing
self.resigning = resigning
hp = copy.copy(player.get_handlers())
hp.update(self.handlers)
self.handlers = hp
def genmove(self, game_state, color):
result = gtp_states.Move_generator_result()
logging.debug("%s enter"%(self))
move = self.gnu_go_move(game_state, color)
# pass if GnuGo tells us to do so
if self.passing and move == 'pass':
result.pass_move = True
return result
elif self.resigning and move == 'resign':
result.resign = True
return result
else:
logging.debug("%s not listening, descend"%(self))
return self.player.genmove(game_state, color)
def gnu_go_move(self, game_state, color):
assert isinstance(game_state.board, gomill.boards.Board) # for wingide code completion
game = gomill.sgf.Sgf_game(size=game_state.board.side)
gomill.sgf_moves.set_initial_position(game, game_state.board)
node = game.get_root()
node.set('KM', game_state.komi)
node.set('PL', color)
with tempfile.NamedTemporaryFile() as sgf_file:
sgf_file.write(game.serialise())
sgf_file.flush()
gg_move = utils.get_gnu_go_response(sgf_file.name, color)
return gg_move.lower()
class DistributionBot(object):
def __init__(self):
self.last_dist = None
self.last_player = None
def __str__(self):
return "<%s>"%(self.__class__.__name__)
def gen_probdist_raw(self, game_state, player):
"""
The core method to implement for distribution bots.
It needs not
:return: a numpy array of floats of shape (board.side, board.side), or None for pass
the array should be normalized to 1
"""
raise NotImplementedError
def gen_probdist(self, game_state, player):
"""
Generates a correct move probability distribution for the next move,
using the gen_probdist_raw().
Correct means that it zeroes out probability of playing incorrect move,
such as move forbidden by ko, suicide and occupied points.
Stores the dist and the player.
:return: a numpy array of floats of shape (board.side, board.side), or None for pass
the array is normalized to 1
"""
dist = self.gen_probdist_raw(game_state, player)
if dist is not None:
correct_moves = analyze_board.board2correct_move_mask(game_state.board, player)
if game_state.ko_point:
correct_moves[game_state.ko_point[0]][game_state.ko_point[1]] = 0
# compute some debugging stats of the incorrect moves first
incorrect_dist = (1 - correct_moves) * dist
logging.debug("%s incorrect moves\n%s"%(self,
utils.dist_stats(incorrect_dist)))
# keep only correct moves
dist = correct_moves * dist
s = dist.sum()
if s > 0.0:
dist = dist / dist.sum()
else:
logging.debug("No valid moves, PASSING.")
dist = None
self.last_dist = dist
self.last_player = player
return self.last_dist
def move_probabilities(self):
if self.last_dist is not None:
ret = []
for row, col in np.transpose(np.nonzero(self.last_dist)):
ret.append( "%s %f"%(gomill.common.format_vertex((row, col)),
self.last_dist[row][col]))
return '\n'.join(ret)
return ''
def dist_stats(self, top=3):
if self.last_dist is not None:
return utils.dist_stats(self.last_dist, top)
return ''
def close(self):
"""Called upon exit, to allow for resource freeup."""
pass
class RandomDistBot(DistributionBot):
def gen_probdist_raw(self, game_state, player):
return np.random.random((game_state.board.side, game_state.board.side))
if __name__ == "__main__":
def test_bot():
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.DEBUG)
player = DistWrappingMaxPlayer(RandomDistBot())
class State:
pass
s = State()
b = gomill.boards.Board(3)
s.board = b
b.play(1, 1, "b")
b.play(0, 1, "b")
logging.debug("\n"+gomill.ascii_boards.render_board(b))
mv = player.genmove(s, 'w').move
b.play(mv[0], mv[1], 'w')
logging.debug("\n"+gomill.ascii_boards.render_board(b))
logging.debug("best move is " + gomill.common.format_vertex(mv))
logging.debug("\n" + str(player.bot.last_dist))
logging.debug(utils.dist_stats(player.bot.last_dist))
test_bot()
|
{
"content_hash": "d8f63825036d5f14df048b900fc58839",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 101,
"avg_line_length": 34.65217391304348,
"alnum_prop": 0.5781053952321205,
"repo_name": "jmoudrik/deep-go-wrap",
"id": "7d05948108d06b6bfea4a6c5f6aeb8054b9e031d",
"size": "9564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepgo/players.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90777"
}
],
"symlink_target": ""
}
|
"""
===============================================================================
Compute a sparse inverse solution using the Gamma-Map empirical Bayesian method
===============================================================================
See Wipf et al. "A unified Bayesian framework for MEG/EEG source imaging."
NeuroImage, vol. 44, no. 3, pp. 947?66, Mar. 2009.
"""
# Author: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.inverse_sparse import gamma_map
from mne.viz import plot_sparse_source_estimates
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
# Read the evoked response and crop it
condition = 'Left visual'
evoked = mne.read_evokeds(evoked_fname, condition=condition,
baseline=(None, 0))
evoked.crop(tmin=-50e-3, tmax=300e-3)
# Read the forward solution
forward = mne.read_forward_solution(fwd_fname, surf_ori=True,
force_fixed=False)
# Read noise covariance matrix and regularize it
cov = mne.read_cov(cov_fname)
cov = mne.cov.regularize(cov, evoked.info)
# Run the Gamma-MAP method
alpha = 0.5
stc, residual = gamma_map(evoked, forward, cov, alpha, xyz_same_gamma=True,
return_residual=True)
# View in 2D and 3D ("glass" brain like 3D plot)
# Show the sources as spheres scaled by their strength
scale_factors = np.max(np.abs(stc.data), axis=1)
scale_factors = 0.5 * (1 + scale_factors / np.max(scale_factors))
plot_sparse_source_estimates(
forward['src'], stc, bgcolor=(1, 1, 1),
modes=['sphere'], opacity=0.1, scale_factors=(scale_factors, None),
fig_name="Gamma-MAP")
# Show the evoked response and the residual for gradiometers
ylim = dict(grad=[-120, 120])
evoked = mne.pick_types_evoked(evoked, meg='grad', exclude='bads')
evoked.plot(titles=dict(grad='Evoked Response Gradiometers'), ylim=ylim,
proj=True)
residual = mne.pick_types_evoked(residual, meg='grad', exclude='bads')
residual.plot(titles=dict(grad='Residuals Gradiometers'), ylim=ylim,
proj=True)
|
{
"content_hash": "e51a71abf0b838df3302651edaf937c5",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 79,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.6461342395921835,
"repo_name": "agramfort/mne-python",
"id": "4eb3208357bd783b0190dea7fab94496e34d3583",
"size": "2354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/inverse/plot_gamma_map_inverse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "3751581"
},
{
"name": "Shell",
"bytes": "4011"
}
],
"symlink_target": ""
}
|
"""Support for a User resource in Skytap."""
from skytap.Environments import Environments
from skytap.framework.ApiClient import ApiClient
import skytap.framework.Utils as Utils
from skytap.models.SkytapResource import SkytapResource
class User(SkytapResource):
"""One Skytap User resource."""
def __init__(self, user_json):
"""Create a Skytap User object."""
super(User, self).__init__(user_json)
def _calculate_custom_data(self):
"""Add custom data.
Standarize sso_enabled to a boolean.
Create 'name' from first_name and last_name.
Create an 'admin' flag from account_role.
If there are environments with this user, turn them into objects.
"""
if 'sso_enabled' in self.data:
self.data['sso'] = bool(self.sso_enabled)
else:
self.data['sso'] = False
self.data['name'] = self.first_name + ' ' + self.last_name
if 'account_role' in self.data:
self.data['admin'] = self.account_role == 'admin'
if 'configurations' in self.data:
if isinstance(self.data['configurations'], list):
if len(self.data['configurations']) > 0:
if isinstance(self.data['configurations'][0], dict):
self.data['configurations'] = Environments(self.data['configurations']) # noqa
def delete(self, transfer_user):
"""Delete the user."""
if isinstance(transfer_user, User):
transfer_user = transfer_user.id
if not isinstance(transfer_user, int):
raise TypeError('transfer_user must be a User or int.')
Utils.info('Deleting user: ' +
str(self.id) + ' (' + self.name + ') and transferring ' +
'resources to user id: ' + str(transfer_user))
api = ApiClient()
transfer = {"transfer_user_id": str(transfer_user)}
response = api.rest(self.url,
transfer,
'DELETE')
return response
|
{
"content_hash": "8a054cc364be3faf794cab1438e58f81",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 103,
"avg_line_length": 38.867924528301884,
"alnum_prop": 0.5820388349514564,
"repo_name": "FulcrumIT/skytap",
"id": "10e4a0f9210b2c6a6c29d1ce372015fa5d65eceb",
"size": "2060",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "skytap/models/User.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88548"
}
],
"symlink_target": ""
}
|
"""A thin wrapper around datastore query RPC calls.
This provides wrappers around the internal only datastore_pb library and is
designed to be the lowest-level API to be used by all Python datastore client
libraries for executing queries. It provides a layer of protection so the actual
RPC syntax can change without affecting client libraries.
Any class, function, field or argument starting with an '_' is for INTERNAL use
only and should not be used by developers!
"""
__all__ = ['Batch',
'Batcher',
'CompositeFilter',
'CompositeOrder',
'CorrelationFilter',
'Cursor',
'FetchOptions',
'FilterPredicate',
'Order',
'PropertyFilter',
'PropertyOrder',
'Query',
'QueryOptions',
'ResultsIterator',
'make_filter',
'apply_query',
'inject_results']
import base64
import collections
import pickle
from google.net.proto import ProtocolBuffer
from google.appengine.datastore import entity_pb
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api.search import geo_util
from google.appengine.datastore import datastore_index
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_pbs
from google.appengine.datastore import datastore_rpc
from google.appengine.datastore import datastore_v4_pb
class _BaseComponent(object):
"""A base class for query components.
Currently just implements basic == and != functions.
"""
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self is other or self.__dict__ == other.__dict__
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return equal
return not equal
def make_filter(name, op, values):
"""Constructs a FilterPredicate from the given name, op and values.
Args:
name: A non-empty string, the name of the property to filter.
op: One of PropertyFilter._OPERATORS.keys(), the operator to use.
values: A supported value, the value to compare against.
Returns:
if values is a list, a CompositeFilter that uses AND to combine all
values, otherwise a PropertyFilter for the single value.
Raises:
datastore_errors.BadPropertyError: if the property name is invalid.
datastore_errors.BadValueError: if the property did not validate correctly
or the value was an empty list.
Other exception types (like OverflowError): if the property value does not
meet type-specific criteria.
"""
datastore_types.ValidateProperty(name, values)
properties = datastore_types.ToPropertyPb(name, values)
if isinstance(properties, list):
filters = [PropertyFilter(op, prop) for prop in properties]
return CompositeFilter(CompositeFilter.AND, filters)
else:
return PropertyFilter(op, properties)
def _make_key_value_map(entity, property_names):
"""Extracts key values from the given entity.
Args:
entity: The entity_pb.EntityProto to extract values from.
property_names: The names of the properties from which to extract values.
Returns:
A dict mapping property names to a lists of key values.
"""
value_map = dict((name, []) for name in property_names)
for prop in entity.property_list():
if prop.name() in value_map:
value_map[prop.name()].append(
datastore_types.PropertyValueToKeyValue(prop.value()))
if datastore_types.KEY_SPECIAL_PROPERTY in value_map:
value_map[datastore_types.KEY_SPECIAL_PROPERTY] = [
datastore_types.ReferenceToKeyValue(entity.key())]
return value_map
_query_converter = datastore_pbs._QueryConverter(
datastore_pbs.get_entity_converter())
class _PropertyComponent(_BaseComponent):
"""A component that operates on a specific set of properties."""
def _get_prop_names(self):
"""Returns a set of property names used by the filter."""
raise NotImplementedError
class FilterPredicate(_PropertyComponent):
"""An abstract base class for all query filters.
All sub-classes must be immutable as these are often stored without creating a
defensive copy.
"""
def __call__(self, entity):
"""Applies the filter predicate to the given entity.
Args:
entity: the datastore_pb.EntityProto to test.
Returns:
True if the given entity matches the filter, False otherwise.
"""
return self._apply(_make_key_value_map(entity, self._get_prop_names()))
def _apply(self, key_value_map):
"""Apply the given component to the comparable value map.
A filter matches a list of values if at least one value in the list
matches the filter, for example:
'prop: [1, 2]' matches both 'prop = 1' and 'prop = 2' but not 'prop = 3'
Note: the values are actually represented as tuples whose first item
encodes the type; see datastore_types.PropertyValueToKeyValue().
Args:
key_value_map: A dict mapping property names to a list of
comparable values.
Return:
A boolean indicating if the given map matches the filter.
"""
raise NotImplementedError
def _prune(self, key_value_map):
"""Removes values from the given map that do not match the filter.
When doing a scan in the datastore, only index values that match the filters
are seen. When multiple values that point to the same entity are seen, the
entity only appears where the first value is found. This function removes
all values that don't match the query so that the first value in the map
is the same one the datastore would see first.
Args:
key_value_map: the comparable value map from which to remove
values. Does not need to contain values for all filtered properties.
Returns:
A value that evaluates to False if every value in a single list was
completely removed. This effectively applies the filter but is less
efficient than _apply().
"""
raise NotImplementedError
def _to_pb(self):
"""Internal only function to generate a pb."""
raise NotImplementedError(
'This filter only supports in memory operations (%r)' % self)
def _to_pbs(self):
"""Internal only function to generate a list of pbs."""
return [self._to_pb()]
def _to_pb_v4(self):
"""Internal only function to generate a v4 pb."""
raise NotImplementedError(
'This filter only supports in memory operations (%r)' % self)
class _SinglePropertyFilter(FilterPredicate):
"""Base class for a filter that operates on a single property."""
def _get_prop_name(self):
"""Returns the name of the property being filtered."""
raise NotImplementedError
def _apply_to_value(self, value):
"""Apply the filter to the given value.
Args:
value: The comparable value to check.
Returns:
A boolean indicating if the given value matches the filter.
"""
raise NotImplementedError
def _get_prop_names(self):
return set([self._get_prop_name()])
def _apply(self, value_map):
for other_value in value_map[self._get_prop_name()]:
if self._apply_to_value(other_value):
return True
return False
def _prune(self, value_map):
if self._get_prop_name() not in value_map:
return True
values = [value for value in value_map[self._get_prop_name()]
if self._apply_to_value(value)]
value_map[self._get_prop_name()] = values
return bool(values)
class PropertyFilter(_SinglePropertyFilter):
"""An immutable filter predicate that constrains a single property."""
_OPERATORS = {
'<': datastore_pb.Query_Filter.LESS_THAN,
'<=': datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL,
'>': datastore_pb.Query_Filter.GREATER_THAN,
'>=': datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL,
'=': datastore_pb.Query_Filter.EQUAL,
}
_OPERATORS_INVERSE = dict((value, key)
for key, value in _OPERATORS.iteritems())
_OPERATORS_TO_PYTHON_OPERATOR = {
datastore_pb.Query_Filter.LESS_THAN: '<',
datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL: '<=',
datastore_pb.Query_Filter.GREATER_THAN: '>',
datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL: '>=',
datastore_pb.Query_Filter.EQUAL: '==',
}
_INEQUALITY_OPERATORS = frozenset(['<', '<=', '>', '>='])
_INEQUALITY_OPERATORS_ENUM = frozenset([
datastore_pb.Query_Filter.LESS_THAN,
datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL,
datastore_pb.Query_Filter.GREATER_THAN,
datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL,
])
_UPPERBOUND_INEQUALITY_OPERATORS = frozenset(['<', '<='])
def __init__(self, op, value):
"""Constructor.
Args:
op: A string representing the operator to use.
value: A entity_pb.Property, the property and value to compare against.
Raises:
datastore_errors.BadArgumentError if op has an unsupported value or value
is not an entity_pb.Property.
"""
if op not in self._OPERATORS:
raise datastore_errors.BadArgumentError('unknown operator: %r' % (op,))
if not isinstance(value, entity_pb.Property):
raise datastore_errors.BadArgumentError(
'value argument should be entity_pb.Property (%r)' % (value,))
super(PropertyFilter, self).__init__()
self._filter = datastore_pb.Query_Filter()
self._filter.set_op(self._OPERATORS[op])
self._filter.add_property().CopyFrom(value)
@property
def op(self):
raw_op = self._filter.op()
return self._OPERATORS_INVERSE.get(raw_op, str(raw_op))
@property
def value(self):
return self._filter.property(0)
def __repr__(self):
prop = self.value
name = prop.name()
value = datastore_types.FromPropertyPb(prop)
return '%s(%r, <%r, %r>)' % (self.__class__.__name__, self.op, name, value)
def _get_prop_name(self):
return self._filter.property(0).name()
def _apply_to_value(self, value):
if not hasattr(self, '_cmp_value'):
if self._filter.op() == datastore_pb.Query_Filter.EXISTS:
return True
self._cmp_value = datastore_types.PropertyValueToKeyValue(
self._filter.property(0).value())
self._condition = ('value %s self._cmp_value' %
self._OPERATORS_TO_PYTHON_OPERATOR[self._filter.op()])
return eval(self._condition)
def _has_inequality(self):
"""Returns True if the filter predicate contains inequalities filters."""
return self._filter.op() in self._INEQUALITY_OPERATORS_ENUM
@classmethod
def _from_pb(cls, filter_pb):
self = cls.__new__(cls)
self._filter = filter_pb
return self
def _to_pb(self):
"""Returns the internal only pb representation."""
return self._filter
def _to_pb_v4(self):
"""Returns a datastore_v4_pb.Filter representation of the filter."""
filter_pb = datastore_v4_pb.Filter()
prop_filter_pb = filter_pb.mutable_property_filter()
_query_converter._v3_filter_to_v4_property_filter(self._filter,
prop_filter_pb)
return filter_pb
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.PropertyFilter is unsupported.')
def __eq__(self, other):
if self.__class__ is not other.__class__:
if other.__class__ is _PropertyRangeFilter:
return [self._filter] == other._to_pbs()
return NotImplemented
return self._filter == other._filter
class _PropertyRangeFilter(_SinglePropertyFilter):
"""A filter predicate that represents a range of values.
Since we allow multi-valued properties there is a large difference between
"x > 0 AND x < 1" and "0 < x < 1." An entity with x = [-1, 2] will match the
first but not the second.
Since the datastore only allows a single inequality filter, multiple
in-equality filters are merged into a single range filter in the
datastore (unlike equality filters). This class is used by
datastore_query.CompositeFilter to implement the same logic.
"""
_start_key_value = None
_end_key_value = None
@datastore_rpc._positional(1)
def __init__(self, start=None, start_incl=True, end=None, end_incl=True):
"""Constructs a range filter using start and end properties.
Args:
start: A entity_pb.Property to use as a lower bound or None to indicate
no lower bound.
start_incl: A boolean that indicates if the lower bound is inclusive.
end: A entity_pb.Property to use as an upper bound or None to indicate
no upper bound.
end_incl: A boolean that indicates if the upper bound is inclusive.
"""
if start is not None and not isinstance(start, entity_pb.Property):
raise datastore_errors.BadArgumentError(
'start argument should be entity_pb.Property (%r)' % (start,))
if end is not None and not isinstance(end, entity_pb.Property):
raise datastore_errors.BadArgumentError(
'start argument should be entity_pb.Property (%r)' % (end,))
if start and end and start.name() != end.name():
raise datastore_errors.BadArgumentError(
'start and end arguments must be on the same property (%s != %s)' %
(start.name(), end.name()))
if not start and not end:
raise datastore_errors.BadArgumentError(
'Unbounded ranges are not supported.')
super(_PropertyRangeFilter, self).__init__()
self._start = start
self._start_incl = start_incl
self._end = end
self._end_incl = end_incl
@classmethod
def from_property_filter(cls, prop_filter):
op = prop_filter._filter.op()
if op == datastore_pb.Query_Filter.GREATER_THAN:
return cls(start=prop_filter._filter.property(0), start_incl=False)
elif op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL:
return cls(start=prop_filter._filter.property(0))
elif op == datastore_pb.Query_Filter.LESS_THAN:
return cls(end=prop_filter._filter.property(0), end_incl=False)
elif op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL:
return cls(end=prop_filter._filter.property(0))
else:
raise datastore_errors.BadArgumentError(
'Unsupported operator (%s)' % (op,))
def intersect(self, other):
"""Returns a filter representing the intersection of self and other."""
if isinstance(other, PropertyFilter):
other = self.from_property_filter(other)
elif not isinstance(other, _PropertyRangeFilter):
raise datastore_errors.BadArgumentError(
'other argument should be a _PropertyRangeFilter (%r)' % (other,))
if other._get_prop_name() != self._get_prop_name():
raise datastore_errors.BadArgumentError(
'other argument must be on the same property (%s != %s)' %
(other._get_prop_name(), self._get_prop_name()))
start_source = None
if other._start:
if self._start:
result = cmp(self._get_start_key_value(), other._get_start_key_value())
if result == 0:
result = cmp(other._start_incl, self._start_incl)
if result > 0:
start_source = self
elif result < 0:
start_source = other
else:
start_source = other
elif self._start:
start_source = self
end_source = None
if other._end:
if self._end:
result = cmp(self._get_end_key_value(), other._get_end_key_value())
if result == 0:
result = cmp(self._end_incl, other._end_incl)
if result < 0:
end_source = self
elif result > 0:
end_source = other
else:
end_source = other
elif self._end:
end_source = self
if start_source:
if end_source in (start_source, None):
return start_source
result = _PropertyRangeFilter(start=start_source._start,
start_incl=start_source._start_incl,
end=end_source._end,
end_incl=end_source._end_incl)
result._start_key_value = start_source._start_key_value
result._end_key_value = end_source._end_key_value
return result
else:
return end_source or self
def _get_start_key_value(self):
if self._start_key_value is None:
self._start_key_value = datastore_types.PropertyValueToKeyValue(
self._start.value())
return self._start_key_value
def _get_end_key_value(self):
if self._end_key_value is None:
self._end_key_value = datastore_types.PropertyValueToKeyValue(
self._end.value())
return self._end_key_value
def _apply_to_value(self, value):
"""Apply the filter to the given value.
Args:
value: The comparable value to check.
Returns:
A boolean indicating if the given value matches the filter.
"""
if self._start:
result = cmp(self._get_start_key_value(), value)
if result > 0 or (result == 0 and not self._start_incl):
return False
if self._end:
result = cmp(self._get_end_key_value(), value)
if result < 0 or (result == 0 and not self._end_incl):
return False
return True
def _get_prop_name(self):
if self._start:
return self._start.name()
if self._end:
return self._end.name()
assert False
def _to_pbs(self):
pbs = []
if self._start:
if self._start_incl:
op = datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL
else:
op = datastore_pb.Query_Filter.GREATER_THAN
pb = datastore_pb.Query_Filter()
pb.set_op(op)
pb.add_property().CopyFrom(self._start)
pbs.append(pb)
if self._end:
if self._end_incl:
op = datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL
else:
op = datastore_pb.Query_Filter.LESS_THAN
pb = datastore_pb.Query_Filter()
pb.set_op(op)
pb.add_property().CopyFrom(self._end)
pbs.append(pb)
return pbs
def _to_pb_v4(self):
"""Returns a datastore_v4_pb.Filter representation of the filter."""
filter_pb = datastore_v4_pb.Filter()
composite_filter = filter_pb.mutable_composite_filter()
composite_filter.set_operator(datastore_v4_pb.CompositeFilter.AND)
if self._start:
if self._start_incl:
op = datastore_v4_pb.PropertyFilter.GREATER_THAN_OR_EQUAL
else:
op = datastore_v4_pb.PropertyFilter.GREATER_THAN
pb = composite_filter.add_filter().mutable_property_filter()
pb.set_operator(op)
pb.mutable_property().set_name(self._start.name())
datastore_pbs.get_entity_converter().v3_property_to_v4_value(
self._start, True, pb.mutable_value())
if self._end:
if self._end_incl:
op = datastore_v4_pb.PropertyFilter.LESS_THAN_OR_EQUAL
else:
op = datastore_v4_pb.PropertyFilter.LESS_THAN
pb = composite_filter.add_filter().mutable_property_filter()
pb.set_operator(op)
pb.mutable_property().set_name(self._end.name())
datastore_pbs.get_entity_converter().v3_property_to_v4_value(
self._end, True, pb.mutable_value())
return filter_pb
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return (self._start == other._start and
self._end == other._end and
(self._start_incl == other._start_incl or self._start is None) and
(self._end_incl == other._end_incl or self._end is None))
class _PropertyExistsFilter(FilterPredicate):
"""A FilterPredicate that matches entities containing specific properties.
Only works as an in-memory filter. Used internally to filter out entities
that don't have all properties in a given Order.
"""
def __init__(self, names):
super(_PropertyExistsFilter, self).__init__()
self._names = frozenset(names)
def _apply(self, value_map):
for name in self._names:
if not value_map.get(name):
return False
return True
def _get_prop_names(self):
return self._names
def _prune(self, _):
raise NotImplementedError
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
class CorrelationFilter(FilterPredicate):
"""A filter that isolates correlated values and applies a sub-filter on them.
This filter assumes that every property used by the sub-filter should be
grouped before being passed to the sub-filter. The default grouping puts
each value in its own group. Consider:
e = {a: [1, 2], b: [2, 1, 3], c: 4}
A correlation filter with a sub-filter that operates on (a, b) will be tested
against the following 3 sets of values:
{a: 1, b: 2}
{a: 2, b: 1}
{b: 3}
In this case CorrelationFilter('a = 2 AND b = 2') won't match this entity but
CorrelationFilter('a = 2 AND b = 1') will. To apply an uncorrelated filter on
c, the filter must be applied in parallel to the correlation filter. For
example:
CompositeFilter(AND, [CorrelationFilter('a = 2 AND b = 1'), 'c = 3'])
If 'c = 3' was included in the correlation filter, c would be grouped as well.
This would result in the following values:
{a: 1, b: 2, c: 3}
{a: 2, b: 1}
{b: 3}
If any set of correlated values match the sub-filter then the entity matches
the correlation filter.
"""
def __init__(self, subfilter):
"""Constructor.
Args:
subfilter: A FilterPredicate to apply to the correlated values
"""
self._subfilter = subfilter
@property
def subfilter(self):
return self._subfilter
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.subfilter)
def _apply(self, value_map):
base_map = dict((prop, []) for prop in self._get_prop_names())
value_maps = []
for prop in base_map:
grouped = self._group_values(prop, value_map[prop])
while len(value_maps) < len(grouped):
value_maps.append(base_map.copy())
for value, map in zip(grouped, value_maps):
map[prop] = value
return self._apply_correlated(value_maps)
def _apply_correlated(self, value_maps):
"""Applies sub-filter to the correlated value maps.
The default implementation matches when any value_map in value_maps
matches the sub-filter.
Args:
value_maps: A list of correlated value_maps.
Returns:
True if any the entity matches the correlation filter.
"""
for map in value_maps:
if self._subfilter._apply(map):
return True
return False
def _group_values(self, prop, values):
"""A function that groups the given values.
Override this function to introduce custom grouping logic. The default
implementation assumes each value belongs in its own group.
Args:
prop: The name of the property who's values are being grouped.
values: A list of opaque values.
Returns:
A list of lists of grouped values.
"""
return [[value] for value in values]
def _get_prop_names(self):
return self._subfilter._get_prop_names()
class CompositeFilter(FilterPredicate):
"""An immutable filter predicate that combines other predicates.
This class proactively merges sub-filters that are combined using the same
operator. For example:
CompositeFilter(AND, [f1, f2, CompositeFilter(AND, [f3, f4]), f5, f6])
is equivalent to:
CompositeFilter(AND, [f1, f2, f3, f4, f5, f6])
Currently filters can only be combined using an AND operator.
"""
AND = 'and'
_OPERATORS = frozenset([AND])
def __init__(self, op, filters):
"""Constructor.
Args:
op: The operator to use to combine the given filters
filters: A list of one or more filters to combine
Raises:
datastore_errors.BadArgumentError if op is not in CompsiteFilter.OPERATORS
or filters is not a non-empty list containing only FilterPredicates.
"""
if not op in self._OPERATORS:
raise datastore_errors.BadArgumentError('unknown operator (%s)' % (op,))
if not filters or not isinstance(filters, (list, tuple)):
raise datastore_errors.BadArgumentError(
'filters argument should be a non-empty list (%r)' % (filters,))
super(CompositeFilter, self).__init__()
self._op = op
flattened = []
for f in filters:
if isinstance(f, CompositeFilter) and f._op == self._op:
flattened.extend(f._filters)
elif isinstance(f, FilterPredicate):
flattened.append(f)
else:
raise datastore_errors.BadArgumentError(
'filters argument must be a list of FilterPredicates, found (%r)' %
(f,))
if op == self.AND:
filters = flattened
flattened = []
ineq_map = {}
for f in filters:
if (isinstance(f, _PropertyRangeFilter) or
(isinstance(f, PropertyFilter) and f._has_inequality())):
name = f._get_prop_name()
index = ineq_map.get(name)
if index is not None:
range_filter = flattened[index]
flattened[index] = range_filter.intersect(f)
else:
if isinstance(f, PropertyFilter):
range_filter = _PropertyRangeFilter.from_property_filter(f)
else:
range_filter = f
ineq_map[name] = len(flattened)
flattened.append(range_filter)
else:
flattened.append(f)
self._filters = tuple(flattened)
@property
def op(self):
return self._op
@property
def filters(self):
return self._filters
def __repr__(self):
op = self.op
if op == self.AND:
op = 'AND'
else:
op = str(op)
return '%s(%s, %r)' % (self.__class__.__name__, op, list(self.filters))
def _get_prop_names(self):
names = set()
for f in self._filters:
names |= f._get_prop_names()
return names
def _apply(self, value_map):
if self._op == self.AND:
for f in self._filters:
if not f._apply(value_map):
return False
return True
raise NotImplementedError
def _prune(self, value_map):
if self._op == self.AND:
matches = collections.defaultdict(set)
for f in self._filters:
props = f._get_prop_names()
local_value_map = dict((k, v) for k, v in value_map.iteritems()
if k in props)
if not f._prune(local_value_map):
return False
for (prop, values) in local_value_map.iteritems():
matches[prop].update(values)
for prop, value_set in matches.iteritems():
value_map[prop] = sorted(value_set)
return True
raise NotImplementedError
def _to_pbs(self):
"""Returns the internal only pb representation."""
pbs = []
for f in self._filters:
pbs.extend(f._to_pbs())
return pbs
def _to_pb_v4(self):
"""Returns a datastore_v4_pb.Filter."""
if not self._filters:
return None
if len(self._filters) == 1:
return self._filters[0]._to_pb_v4()
pb = datastore_v4_pb.Filter()
comp_pb = pb.mutable_composite_filter()
if self.op == self.AND:
comp_pb.set_operator(datastore_v4_pb.CompositeFilter.AND)
else:
raise datastore_errors.BadArgumentError(
'Datastore V4 only supports CompositeFilter with AND operator.')
for f in self._filters:
comp_pb.add_filter().CopyFrom(f._to_pb_v4())
return pb
def __eq__(self, other):
if self.__class__ is other.__class__:
return super(CompositeFilter, self).__eq__(other)
if len(self._filters) == 1:
result = self._filters[0].__eq__(other)
if result is NotImplemented and hasattr(other, '__eq__'):
return other.__eq__(self._filters[0])
return result
return NotImplemented
class _IgnoreFilter(_SinglePropertyFilter):
"""A filter that removes all entities with the given keys."""
def __init__(self, key_value_set):
super(_IgnoreFilter, self).__init__()
self._keys = key_value_set
def _get_prop_name(self):
return datastore_types.KEY_SPECIAL_PROPERTY
def _apply_to_value(self, value):
return value not in self._keys
class _DedupingFilter(_IgnoreFilter):
"""A filter that removes duplicate keys."""
def __init__(self, key_value_set=None):
super(_DedupingFilter, self).__init__(key_value_set or set())
def _apply_to_value(self, value):
if super(_DedupingFilter, self)._apply_to_value(value):
self._keys.add(value)
return True
return False
class _BoundingCircleFilter(_SinglePropertyFilter):
"""An immutable bounding circle filter for geo locations.
An immutable filter predicate that constrains a geo location property to a
bounding circle region. The filter is inclusive at the border. The property
has to be of type V3 PointValue. V4 GeoPoints converts to this type.
"""
def __init__(self, property_name, latitude, longitude, radius_meters):
self._property_name = property_name
self._lat_lng = geo_util.LatLng(latitude, longitude)
self._radius_meters = radius_meters
if not radius_meters >= 0:
raise datastore_errors.BadArgumentError(
'invalid radius: %r' % radius_meters)
@classmethod
def _from_v4_pb(cls, bounding_circle_v4_pb):
return _BoundingCircleFilter(bounding_circle_v4_pb.property().name(),
bounding_circle_v4_pb.center().latitude(),
bounding_circle_v4_pb.center().longitude(),
bounding_circle_v4_pb.radius_meters())
def _get_prop_name(self):
return self._property_name
def _apply_to_value(self, value):
if value[0] != entity_pb.PropertyValue.kPointValueGroup:
return False
_, latitude, longitude = value
lat_lng = geo_util.LatLng(latitude, longitude)
return self._lat_lng - lat_lng <= self._radius_meters
class _BoundingBoxFilter(_SinglePropertyFilter):
"""An immutable bounding box filter for geo locations.
An immutable filter predicate that constrains a geo location property to a
bounding box region. The filter is inclusive at the border. The property
has to be of type V3 PointValue. V4 GeoPoints converts to this type.
"""
def __init__(self, property_name, southwest, northeast):
"""Initializes a _BoundingBoxFilter.
Args:
property_name: the name of the property to filter on.
southwest: The south-west corner of the bounding box. The type is
datastore_types.GeoPt.
northeast: The north-east corner of the bounding box. The type is
datastore_types.GeoPt.
Raises:
datastore_errors.BadArgumentError if the south-west coordinate is on top
of the north-east coordinate.
"""
if southwest.lat > northeast.lat:
raise datastore_errors.BadArgumentError(
'the south-west coordinate is on top of the north-east coordinate')
self._property_name = property_name
self._southwest = southwest
self._northeast = northeast
@classmethod
def _from_v4_pb(cls, bounding_box_v4_pb):
sw = datastore_types.GeoPt(bounding_box_v4_pb.southwest().latitude(),
bounding_box_v4_pb.southwest().longitude())
ne = datastore_types.GeoPt(bounding_box_v4_pb.northeast().latitude(),
bounding_box_v4_pb.northeast().longitude())
return _BoundingBoxFilter(bounding_box_v4_pb.property().name(), sw, ne)
def _get_prop_name(self):
return self._property_name
def _apply_to_value(self, value):
if value[0] != entity_pb.PropertyValue.kPointValueGroup:
return False
_, latitude, longitude = value
if not self._southwest.lat <= latitude <= self._northeast.lat:
return False
if self._southwest.lon > self._northeast.lon:
return (longitude <= self._northeast.lon
or longitude >= self._southwest.lon)
else:
return self._southwest.lon <= longitude <= self._northeast.lon
class Order(_PropertyComponent):
"""A base class that represents a sort order on a query.
All sub-classes must be immutable as these are often stored without creating a
defensive copying.
This class can be used as either the cmp or key arg in sorted() or
list.sort(). To provide a stable ordering a trailing key ascending order is
always used.
"""
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
"""Constructs an order representing the reverse of the current order.
This function takes into account the effects of orders on properties not in
the group_by clause of a query. For example, consider:
SELECT A, First(B) ... GROUP BY A ORDER BY A, B
Changing the order of B would effect which value is listed in the 'First(B)'
column which would actually change the results instead of just reversing
them.
Args:
group_by: If specified, only orders on properties in group_by will be
reversed.
Returns:
A new order representing the reverse direction.
"""
raise NotImplementedError
def _key(self, lhs_value_map):
"""Creates a key for the given value map."""
raise NotImplementedError
def _cmp(self, lhs_value_map, rhs_value_map):
"""Compares the given value maps."""
raise NotImplementedError
def _to_pb(self):
"""Internal only function to generate a filter pb."""
raise NotImplementedError
def _to_pb_v4(self):
"""Internal only function to generate a v4 filter pb."""
raise NotImplementedError
def key_for_filter(self, filter_predicate):
if filter_predicate:
return lambda x: self.key(x, filter_predicate)
return self.key
def cmp_for_filter(self, filter_predicate):
if filter_predicate:
return lambda x, y: self.cmp(x, y, filter_predicate)
return self.cmp
def key(self, entity, filter_predicate=None):
"""Constructs a "key" value for the given entity based on the current order.
This function can be used as the key argument for list.sort() and sorted().
Args:
entity: The entity_pb.EntityProto to convert
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
A key value that identifies the position of the entity when sorted by
the current order.
"""
names = self._get_prop_names()
names.add(datastore_types.KEY_SPECIAL_PROPERTY)
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
value_map = _make_key_value_map(entity, names)
if filter_predicate is not None:
filter_predicate._prune(value_map)
return (self._key(value_map),
value_map[datastore_types.KEY_SPECIAL_PROPERTY])
def cmp(self, lhs, rhs, filter_predicate=None):
"""Compares the given values taking into account any filters.
This function can be used as the cmp argument for list.sort() and sorted().
This function is slightly more efficient that Order.key when comparing two
entities, however it is much less efficient when sorting a list of entities.
Args:
lhs: An entity_pb.EntityProto
rhs: An entity_pb.EntityProto
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
An integer <, = or > 0 representing the operator that goes in between lhs
and rhs that to create a true statement.
"""
names = self._get_prop_names()
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
lhs_value_map = _make_key_value_map(lhs, names)
rhs_value_map = _make_key_value_map(rhs, names)
if filter_predicate is not None:
filter_predicate._prune(lhs_value_map)
filter_predicate._prune(rhs_value_map)
result = self._cmp(lhs_value_map, rhs_value_map)
if result:
return result
if not lhs.has_key() and not rhs.has_key():
return 0
lhs_key = (lhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(lhs.key()))
rhs_key = (rhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(rhs.key()))
return cmp(lhs_key, rhs_key)
class _ReverseOrder(_BaseComponent):
"""Reverses the comparison for the given object."""
def __init__(self, obj):
"""Constructor for _ReverseOrder.
Args:
obj: Any comparable and hashable object.
"""
super(_ReverseOrder, self).__init__()
self._obj = obj
def __hash__(self):
return hash(self._obj)
def __cmp__(self, other):
assert self.__class__ == other.__class__, (
'A datastore_query._ReverseOrder object can only be compared to '
'an object of the same type.')
return -cmp(self._obj, other._obj)
class PropertyOrder(Order):
"""An immutable class that represents a sort order for a single property."""
ASCENDING = datastore_pb.Query_Order.ASCENDING
DESCENDING = datastore_pb.Query_Order.DESCENDING
_DIRECTIONS = frozenset([ASCENDING, DESCENDING])
def __init__(self, prop, direction=ASCENDING):
"""Constructor.
Args:
prop: the name of the prop by which to sort.
direction: the direction in which to sort the given prop.
Raises:
datastore_errors.BadArgumentError if the prop name or direction is
invalid.
"""
datastore_types.ValidateString(prop,
'prop',
datastore_errors.BadArgumentError)
if not direction in self._DIRECTIONS:
raise datastore_errors.BadArgumentError('unknown direction: %r' %
(direction,))
super(PropertyOrder, self).__init__()
self.__order = datastore_pb.Query_Order()
self.__order.set_property(prop.encode('utf-8'))
self.__order.set_direction(direction)
@property
def prop(self):
return self.__order.property()
@property
def direction(self):
return self.__order.direction()
def __repr__(self):
name = self.prop
direction = self.direction
extra = ''
if direction == self.DESCENDING:
extra = ', DESCENDING'
name = repr(name).encode('utf-8')[1:-1]
return '%s(<%s>%s)' % (self.__class__.__name__, name, extra)
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
if group_by and self.__order.property() not in group_by:
return self
if self.__order.direction() == self.ASCENDING:
return PropertyOrder(self.__order.property().decode('utf-8'),
self.DESCENDING)
else:
return PropertyOrder(self.__order.property().decode('utf-8'),
self.ASCENDING)
def _get_prop_names(self):
return set([self.__order.property()])
def _key(self, lhs_value_map):
lhs_values = lhs_value_map[self.__order.property()]
if not lhs_values:
raise datastore_errors.BadArgumentError(
'Missing value for property (%s)' % self.__order.property())
if self.__order.direction() == self.ASCENDING:
return min(lhs_values)
else:
return _ReverseOrder(max(lhs_values))
def _cmp(self, lhs_value_map, rhs_value_map):
lhs_values = lhs_value_map[self.__order.property()]
rhs_values = rhs_value_map[self.__order.property()]
if not lhs_values and not rhs_values:
return 0
if not lhs_values:
raise datastore_errors.BadArgumentError(
'LHS missing value for property (%s)' % self.__order.property())
if not rhs_values:
raise datastore_errors.BadArgumentError(
'RHS missing value for property (%s)' % self.__order.property())
if self.__order.direction() == self.ASCENDING:
return cmp(min(lhs_values), min(rhs_values))
else:
return cmp(max(rhs_values), max(lhs_values))
@classmethod
def _from_pb(cls, order_pb):
self = cls.__new__(cls)
self.__order = order_pb
return self
def _to_pb(self):
"""Returns the internal only pb representation."""
return self.__order
def _to_pb_v4(self):
"""Returns a datastore_v4_pb.PropertyOrder representation of the order."""
v4_order = datastore_v4_pb.PropertyOrder()
_query_converter.v3_order_to_v4_order(self.__order, v4_order)
return v4_order
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.PropertyOrder is unsupported.')
class CompositeOrder(Order):
"""An immutable class that represents a sequence of Orders.
This class proactively flattens sub-orders that are of type CompositeOrder.
For example:
CompositeOrder([O1, CompositeOrder([02, 03]), O4])
is equivalent to:
CompositeOrder([O1, 02, 03, O4])
"""
def __init__(self, orders):
"""Constructor.
Args:
orders: A list of Orders which are applied in order.
"""
if not isinstance(orders, (list, tuple)):
raise datastore_errors.BadArgumentError(
'orders argument should be list or tuple (%r)' % (orders,))
super(CompositeOrder, self).__init__()
flattened = []
for order in orders:
if isinstance(order, CompositeOrder):
flattened.extend(order._orders)
elif isinstance(order, Order):
flattened.append(order)
else:
raise datastore_errors.BadArgumentError(
'orders argument should only contain Order (%r)' % (order,))
self._orders = tuple(flattened)
@property
def orders(self):
return self._orders
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(self.orders))
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
return CompositeOrder([order.reversed(group_by=group_by)
for order in self._orders])
def _get_prop_names(self):
names = set()
for order in self._orders:
names |= order._get_prop_names()
return names
def _key(self, lhs_value_map):
result = []
for order in self._orders:
result.append(order._key(lhs_value_map))
return tuple(result)
def _cmp(self, lhs_value_map, rhs_value_map):
for order in self._orders:
result = order._cmp(lhs_value_map, rhs_value_map)
if result != 0:
return result
return 0
def size(self):
"""Returns the number of sub-orders the instance contains."""
return len(self._orders)
def _to_pbs(self):
"""Returns an ordered list of internal only pb representations."""
return [order._to_pb() for order in self._orders]
def _to_pb_v4s(self):
"""Returns an ordered list of datastore_v4_pb.PropertyOrder."""
return [order._to_pb_v4() for order in self._orders]
def __eq__(self, other):
if self.__class__ is other.__class__:
return super(CompositeOrder, self).__eq__(other)
if len(self._orders) == 1:
result = self._orders[0].__eq__(other)
if result is NotImplemented and hasattr(other, '__eq__'):
return other.__eq__(self._orders[0])
return result
return NotImplemented
class FetchOptions(datastore_rpc.Configuration):
"""An immutable class that contains all options for fetching results.
These options apply to any request that pulls results from a query.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below and in datastore_rpc.Configuration.
This object can be used as the default config for a datastore_rpc.Connection
but in that case some options will be ignored, see option documentation below
for details.
"""
@datastore_rpc.ConfigOption
def produce_cursors(value):
"""If a Cursor should be returned with the fetched results.
Raises:
datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'produce_cursors argument should be bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def offset(value):
"""The number of results to skip before returning the first result.
Only applies to the first request it is used with and is ignored if present
on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a integer or is less
than zero.
"""
datastore_types.ValidateInteger(value,
'offset',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def batch_size(value):
"""The number of results to attempt to retrieve in a batch.
Raises:
datastore_errors.BadArgumentError if value is not a integer or is not
greater than zero.
"""
datastore_types.ValidateInteger(value,
'batch_size',
datastore_errors.BadArgumentError)
return value
class QueryOptions(FetchOptions):
"""An immutable class that contains all options for running a query.
This class contains options that control execution process (deadline,
batch_size, read_policy, etc) and what part of the query results are returned
(keys_only, projection, offset, limit, etc) Options that control the contents
of the query results are specified on the datastore_query.Query directly.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below and in FetchOptions and
datastore_rpc.Configuration.
This object can be used as the default config for a datastore_rpc.Connection
but in that case some options will be ignored, see below for details.
"""
ORDER_FIRST = datastore_pb.Query.ORDER_FIRST
ANCESTOR_FIRST = datastore_pb.Query.ANCESTOR_FIRST
FILTER_FIRST = datastore_pb.Query.FILTER_FIRST
_HINTS = frozenset([ORDER_FIRST, ANCESTOR_FIRST, FILTER_FIRST])
@datastore_rpc.ConfigOption
def keys_only(value):
"""If the query should only return keys.
Raises:
datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'keys_only argument should be bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def projection(value):
"""A list or tuple of property names to project.
If None, the entire entity is returned.
Specifying a projection:
- may change the index requirements for the given query;
- will cause a partial entity to be returned;
- will cause only entities that contain those properties to be returned;
A partial entities only contain the property name and value for properties
in the projection (meaning and multiple will not be set). They will also
only contain a single value for any multi-valued property. However, if a
multi-valued property is specified in the order, an inequality property, or
the projected properties, the entity will be returned multiple times. Once
for each unique combination of values.
However, projection queries are significantly faster than normal queries.
Raises:
datastore_errors.BadArgumentError if value is empty or not a list or tuple
of strings.
"""
if isinstance(value, list):
value = tuple(value)
elif not isinstance(value, tuple):
raise datastore_errors.BadArgumentError(
'projection argument should be a list or tuple (%r)' % (value,))
if not value:
raise datastore_errors.BadArgumentError(
'projection argument cannot be empty')
for prop in value:
if not isinstance(prop, basestring):
raise datastore_errors.BadArgumentError(
'projection argument should contain only strings (%r)' % (prop,))
return value
@datastore_rpc.ConfigOption
def limit(value):
"""Limit on the number of results to return.
Raises:
datastore_errors.BadArgumentError if value is not an integer or is less
than zero.
"""
datastore_types.ValidateInteger(value,
'limit',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def prefetch_size(value):
"""Number of results to attempt to return on the initial request.
Raises:
datastore_errors.BadArgumentError if value is not an integer or is not
greater than zero.
"""
datastore_types.ValidateInteger(value,
'prefetch_size',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def start_cursor(value):
"""Cursor to use a start position.
Ignored if present on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a Cursor.
"""
if not isinstance(value, Cursor):
raise datastore_errors.BadArgumentError(
'start_cursor argument should be datastore_query.Cursor (%r)' %
(value,))
return value
@datastore_rpc.ConfigOption
def end_cursor(value):
"""Cursor to use as an end position.
Ignored if present on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a Cursor.
"""
if not isinstance(value, Cursor):
raise datastore_errors.BadArgumentError(
'end_cursor argument should be datastore_query.Cursor (%r)' %
(value,))
return value
@datastore_rpc.ConfigOption
def hint(value):
"""Hint on how the datastore should plan the query.
Raises:
datastore_errors.BadArgumentError if value is not a known hint.
"""
if value not in QueryOptions._HINTS:
raise datastore_errors.BadArgumentError('Unknown query hint (%r)' %
(value,))
return value
class Cursor(_BaseComponent):
"""An immutable class that represents a relative position in a query.
The position denoted by a Cursor is relative to a result in a query even
if the result has been removed from the given query. Usually to position
immediately after the last result returned by a batch.
A cursor should only be used on a query with an identical signature to the
one that produced it or on a query with its sort order reversed.
"""
@datastore_rpc._positional(1)
def __init__(self, urlsafe=None, _cursor_bytes=None):
"""Constructor.
A Cursor constructed with no arguments points the first result of any
query. If such a Cursor is used as an end_cursor no results will ever be
returned.
"""
super(Cursor, self).__init__()
if urlsafe is not None:
if _cursor_bytes is not None:
raise datastore_errors.BadArgumentError(
'Can only specify one of urlsafe and _cursor_bytes')
_cursor_bytes = self._urlsafe_to_bytes(urlsafe)
if _cursor_bytes is not None:
self.__cursor_bytes = _cursor_bytes
else:
self.__cursor_bytes = ''
def __repr__(self):
arg = self.to_websafe_string()
if arg:
arg = '<%s>' % arg
return '%s(%s)' % (self.__class__.__name__, arg)
def reversed(self):
"""DEPRECATED. It is no longer necessary to call reversed() on cursors.
A cursor returned by a query may also be used in a query whose sort order
has been reversed. This method returns a copy of the original cursor.
"""
return Cursor(_cursor_bytes=self.__cursor_bytes)
def to_bytes(self):
"""Serialize cursor as a byte string."""
return self.__cursor_bytes
@staticmethod
def from_bytes(cursor):
"""Gets a Cursor given its byte string serialized form.
The serialized form of a cursor may change in a non-backwards compatible
way. In this case cursors must be regenerated from a new Query request.
Args:
cursor: A serialized cursor as returned by .to_bytes.
Returns:
A Cursor.
Raises:
datastore_errors.BadValueError if the cursor argument does not represent a
serialized cursor.
"""
return Cursor(_cursor_bytes=cursor)
def urlsafe(self):
"""Serialize cursor as a websafe string.
Returns:
A base64-encoded serialized cursor.
"""
return base64.urlsafe_b64encode(self.to_bytes())
to_websafe_string = urlsafe
@staticmethod
def from_websafe_string(cursor):
"""Gets a Cursor given its websafe serialized form.
The serialized form of a cursor may change in a non-backwards compatible
way. In this case cursors must be regenerated from a new Query request.
Args:
cursor: A serialized cursor as returned by .to_websafe_string.
Returns:
A Cursor.
Raises:
datastore_errors.BadValueError if the cursor argument is not a string
type of does not represent a serialized cursor.
"""
decoded_bytes = Cursor._urlsafe_to_bytes(cursor)
return Cursor.from_bytes(decoded_bytes)
@staticmethod
def _urlsafe_to_bytes(cursor):
if not isinstance(cursor, basestring):
raise datastore_errors.BadValueError(
'cursor argument should be str or unicode (%r)' % (cursor,))
try:
decoded_bytes = base64.b64decode(
str(cursor).replace('-', '+').replace('_', '/'))
except (ValueError, TypeError), e:
raise datastore_errors.BadValueError(
'Invalid cursor %s. Details: %s' % (cursor, e))
return decoded_bytes
def advance(self, offset, query, conn):
"""Advances a Cursor by the given offset.
Args:
offset: The amount to advance the current query.
query: A Query identical to the one this cursor was created from.
conn: The datastore_rpc.Connection to use.
Returns:
A new cursor that is advanced by offset using the given query.
"""
datastore_types.ValidateInteger(offset,
'offset',
datastore_errors.BadArgumentError)
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
query_options = QueryOptions(
start_cursor=self, offset=offset, limit=0, produce_cursors=True)
return query.run(conn, query_options).next_batch(
Batcher.AT_LEAST_OFFSET).cursor(0)
def __setstate__(self, state):
if '_Cursor__compiled_cursor' in state:
self.__cursor_bytes = state['_Cursor__compiled_cursor'].Encode()
else:
self.__dict__ = state
class _QueryKeyFilter(_BaseComponent):
"""A class that implements the key filters available on a Query."""
@datastore_rpc._positional(1)
def __init__(self, app=None, namespace=None, kind=None, ancestor=None):
"""Constructs a _QueryKeyFilter.
If app/namespace and ancestor are not defined, the app/namespace set in the
environment is used.
Args:
app: a string representing the required app id or None.
namespace: a string representing the required namespace or None.
kind: a string representing the required kind or None.
ancestor: a entity_pb.Reference representing the required ancestor or
None.
Raises:
datastore_erros.BadArgumentError if app and ancestor.app() do not match or
an unexpected type is passed in for any argument.
"""
if kind is not None:
datastore_types.ValidateString(
kind, 'kind', datastore_errors.BadArgumentError)
if ancestor is not None:
if not isinstance(ancestor, entity_pb.Reference):
raise datastore_errors.BadArgumentError(
'ancestor argument should be entity_pb.Reference (%r)' %
(ancestor,))
if app is None:
app = ancestor.app()
elif app != ancestor.app():
raise datastore_errors.BadArgumentError(
'ancestor argument should match app ("%r" != "%r")' %
(ancestor.app(), app))
if namespace is None:
namespace = ancestor.name_space()
elif namespace != ancestor.name_space():
raise datastore_errors.BadArgumentError(
'ancestor argument should match namespace ("%r" != "%r")' %
(ancestor.name_space(), namespace))
pb = entity_pb.Reference()
pb.CopyFrom(ancestor)
ancestor = pb
self.__ancestor = ancestor
self.__path = ancestor.path().element_list()
else:
self.__ancestor = None
self.__path = None
super(_QueryKeyFilter, self).__init__()
self.__app = datastore_types.ResolveAppId(app).encode('utf-8')
self.__namespace = (
datastore_types.ResolveNamespace(namespace).encode('utf-8'))
self.__kind = kind and kind.encode('utf-8')
@property
def app(self):
return self.__app
@property
def namespace(self):
return self.__namespace
@property
def kind(self):
return self.__kind
@property
def ancestor(self):
return self.__ancestor
def __call__(self, entity_or_reference):
"""Apply the filter.
Accepts either an entity or a reference to avoid the need to extract keys
from entities when we have a list of entities (which is a common case).
Args:
entity_or_reference: Either an entity_pb.EntityProto or
entity_pb.Reference.
"""
if isinstance(entity_or_reference, entity_pb.Reference):
key = entity_or_reference
elif isinstance(entity_or_reference, entity_pb.EntityProto):
key = entity_or_reference.key()
else:
raise datastore_errors.BadArgumentError(
'entity_or_reference argument must be an entity_pb.EntityProto ' +
'or entity_pb.Reference (%r)' % (entity_or_reference))
return (key.app() == self.__app and
key.name_space() == self.__namespace and
(not self.__kind or
key.path().element_list()[-1].type() == self.__kind) and
(not self.__path or
key.path().element_list()[0:len(self.__path)] == self.__path))
def _to_pb(self):
"""Returns an internal pb representation."""
pb = datastore_pb.Query()
pb.set_app(self.__app)
datastore_types.SetNamespace(pb, self.__namespace)
if self.__kind is not None:
pb.set_kind(self.__kind)
if self.__ancestor:
ancestor = pb.mutable_ancestor()
ancestor.CopyFrom(self.__ancestor)
return pb
def _to_pb_v4(self):
"""Returns a v4 internal proto representation of the query key filter.
Returns:
A tuple (datastore_v4_pb.RunQueryRequest, datastore_v4_pb.Filter).
The second tuple value is a Filter representing the ancestor portion of the
query. If there is no ancestor constraint, this value will be None
"""
pb = datastore_v4_pb.RunQueryRequest()
partition_id = pb.mutable_partition_id()
partition_id.set_dataset_id(self.__app)
if self.__namespace:
partition_id.set_namespace(self.__namespace)
if self.__kind is not None:
pb.mutable_query().add_kind().set_name(self.__kind)
ancestor_filter = None
if self.__ancestor:
ancestor_filter = datastore_v4_pb.Filter()
ancestor_prop_filter = ancestor_filter.mutable_property_filter()
ancestor_prop_filter.set_operator(
datastore_v4_pb.PropertyFilter.HAS_ANCESTOR)
prop_pb = ancestor_prop_filter.mutable_property()
prop_pb.set_name(datastore_types.KEY_SPECIAL_PROPERTY)
datastore_pbs.get_entity_converter().v3_to_v4_key(
self.ancestor,
ancestor_prop_filter.mutable_value().mutable_key_value())
return pb, ancestor_filter
class _BaseQuery(_BaseComponent):
"""A base class for query implementations."""
def run(self, conn, query_options=None):
"""Runs the query using provided datastore_rpc.Connection.
Args:
conn: The datastore_rpc.Connection to use
query_options: Optional query options to use
Returns:
A Batcher that implicitly fetches query results asynchronously.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
return Batcher(query_options, self.run_async(conn, query_options))
def run_async(self, conn, query_options=None):
"""Runs the query using the provided datastore_rpc.Connection.
Args:
conn: the datastore_rpc.Connection on which to run the query.
query_options: Optional QueryOptions with which to run the query.
Returns:
An async object that can be used to grab the first Batch. Additional
batches can be retrieved by calling Batch.next_batch/next_batch_async.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
raise NotImplementedError
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
class Query(_BaseQuery):
"""An immutable class that represents a query signature.
A query signature consists of a source of entities (specified as app,
namespace and optionally kind and ancestor) as well as a FilterPredicate,
grouping and a desired ordering.
"""
@datastore_rpc._positional(1)
def __init__(self, app=None, namespace=None, kind=None, ancestor=None,
filter_predicate=None, group_by=None, order=None):
"""Constructor.
Args:
app: Optional app to query, derived from the environment if not specified.
namespace: Optional namespace to query, derived from the environment if
not specified.
kind: Optional kind to query.
ancestor: Optional ancestor to query, an entity_pb.Reference.
filter_predicate: Optional FilterPredicate by which to restrict the query.
order: Optional Order in which to return results.
group_by: Optional list of properties to group the results by.
Raises:
datastore_errors.BadArgumentError if any argument is invalid.
"""
super(Query, self).__init__()
if filter_predicate is not None and not isinstance(filter_predicate,
FilterPredicate):
raise datastore_errors.BadArgumentError(
'filter_predicate should be datastore_query.FilterPredicate (%r)' %
(filter_predicate,))
if isinstance(order, CompositeOrder):
if order.size() == 0:
order = None
elif isinstance(order, Order):
order = CompositeOrder([order])
elif order is not None:
raise datastore_errors.BadArgumentError(
'order should be Order (%r)' % (order,))
if group_by is not None:
if isinstance(group_by, list):
group_by = tuple(group_by)
elif not isinstance(group_by, tuple):
raise datastore_errors.BadArgumentError(
'group_by argument should be a list or tuple (%r)' % (group_by,))
if not group_by:
raise datastore_errors.BadArgumentError(
'group_by argument cannot be empty')
for prop in group_by:
if not isinstance(prop, basestring):
raise datastore_errors.BadArgumentError(
'group_by argument should contain only strings (%r)' % (prop,))
self._key_filter = _QueryKeyFilter(app=app, namespace=namespace, kind=kind,
ancestor=ancestor)
self._order = order
self._filter_predicate = filter_predicate
self._group_by = group_by
@property
def app(self):
return self._key_filter.app
@property
def namespace(self):
return self._key_filter.namespace
@property
def kind(self):
return self._key_filter.kind
@property
def ancestor(self):
return self._key_filter.ancestor
@property
def filter_predicate(self):
return self._filter_predicate
@property
def order(self):
return self._order
@property
def group_by(self):
return self._group_by
def __repr__(self):
args = []
args.append('app=%r' % self.app)
ns = self.namespace
if ns:
args.append('namespace=%r' % ns)
kind = self.kind
if kind is not None:
args.append('kind=%r' % kind)
ancestor = self.ancestor
if ancestor is not None:
websafe = base64.urlsafe_b64encode(ancestor.Encode())
args.append('ancestor=<%s>' % websafe)
filter_predicate = self.filter_predicate
if filter_predicate is not None:
args.append('filter_predicate=%r' % filter_predicate)
order = self.order
if order is not None:
args.append('order=%r' % order)
group_by = self.group_by
if group_by is not None:
args.append('group_by=%r' % (group_by,))
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
if conn._api_version == datastore_rpc._DATASTORE_V4:
req = self._to_pb_v4(conn, query_options)
else:
req = self._to_pb(conn, query_options)
return Batch.create_async(self, query_options, conn, req,
start_cursor=start_cursor)
@classmethod
def _from_pb(cls, query_pb):
kind = query_pb.has_kind() and query_pb.kind().decode('utf-8') or None
ancestor = query_pb.has_ancestor() and query_pb.ancestor() or None
filter_predicate = None
if query_pb.filter_size() > 0:
filter_predicate = CompositeFilter(
CompositeFilter.AND,
[PropertyFilter._from_pb(filter_pb)
for filter_pb in query_pb.filter_list()])
order = None
if query_pb.order_size() > 0:
order = CompositeOrder([PropertyOrder._from_pb(order_pb)
for order_pb in query_pb.order_list()])
group_by = None
if query_pb.group_by_property_name_size() > 0:
group_by = tuple(name.decode('utf-8')
for name in query_pb.group_by_property_name_list())
return Query(app=query_pb.app().decode('utf-8'),
namespace=query_pb.name_space().decode('utf-8'),
kind=kind,
ancestor=ancestor,
filter_predicate=filter_predicate,
order=order,
group_by=group_by)
def _to_pb_v4(self, conn, query_options):
"""Returns a datastore_v4_pb.RunQueryRequest."""
v4_req, v4_ancestor_filter = self._key_filter._to_pb_v4()
v4_query = v4_req.mutable_query()
if self.filter_predicate:
filter_predicate_pb = self._filter_predicate._to_pb_v4()
if self.filter_predicate and v4_ancestor_filter:
comp_filter_pb = v4_query.mutable_filter().mutable_composite_filter()
comp_filter_pb.set_operator(datastore_v4_pb.CompositeFilter.AND)
comp_filter_pb.add_filter().CopyFrom(filter_predicate_pb)
comp_filter_pb.add_filter().CopyFrom(v4_ancestor_filter)
elif self.filter_predicate:
v4_query.mutable_filter().CopyFrom(filter_predicate_pb)
elif v4_ancestor_filter:
v4_query.mutable_filter().CopyFrom(v4_ancestor_filter)
if self._order:
for order in self._order._to_pb_v4s():
v4_query.add_order().CopyFrom(order)
if QueryOptions.keys_only(query_options, conn.config):
prop_ref_pb = v4_query.add_projection().mutable_property()
prop_ref_pb.set_name(datastore_pbs.PROPERTY_NAME_KEY)
projection = QueryOptions.projection(query_options, conn.config)
self._validate_projection_and_group_by(projection, self._group_by)
if projection:
for prop in projection:
prop_ref_pb = v4_query.add_projection().mutable_property()
prop_ref_pb.set_name(prop)
if self._group_by:
for group_by in self._group_by:
v4_query.add_group_by().set_name(group_by)
limit = QueryOptions.limit(query_options, conn.config)
if limit is not None:
v4_query.set_limit(limit)
count = QueryOptions.batch_size(query_options, conn.config)
if count is None:
count = QueryOptions.prefetch_size(query_options, conn.config)
if count is not None:
v4_req.set_suggested_batch_size(count)
if query_options.offset:
v4_query.set_offset(query_options.offset)
if query_options.start_cursor is not None:
v4_query.set_start_cursor(query_options.start_cursor.to_bytes())
if query_options.end_cursor is not None:
v4_query.set_end_cursor(query_options.end_cursor.to_bytes())
conn._set_request_read_policy(v4_req, query_options)
conn._set_request_transaction(v4_req)
return v4_req
def _to_pb(self, conn, query_options):
"""Returns the internal only pb representation."""
pb = self._key_filter._to_pb()
if self._filter_predicate:
for f in self._filter_predicate._to_pbs():
pb.add_filter().CopyFrom(f)
if self._order:
for order in self._order._to_pbs():
pb.add_order().CopyFrom(order)
if QueryOptions.keys_only(query_options, conn.config):
pb.set_keys_only(True)
projection = QueryOptions.projection(query_options, conn.config)
self._validate_projection_and_group_by(projection, self._group_by)
if projection:
pb.property_name_list().extend(projection)
if self._group_by:
pb.group_by_property_name_list().extend(self._group_by)
if QueryOptions.produce_cursors(query_options, conn.config):
pb.set_compile(True)
limit = QueryOptions.limit(query_options, conn.config)
if limit is not None:
pb.set_limit(limit)
count = QueryOptions.prefetch_size(query_options, conn.config)
if count is None:
count = QueryOptions.batch_size(query_options, conn.config)
if count is not None:
pb.set_count(count)
if query_options.offset:
pb.set_offset(query_options.offset)
if query_options.start_cursor is not None:
try:
pb.mutable_compiled_cursor().ParseFromString(
query_options.start_cursor.to_bytes())
except ProtocolBuffer.ProtocolBufferDecodeError:
raise datastore_errors.BadValueError('invalid cursor')
if query_options.end_cursor is not None:
try:
pb.mutable_end_compiled_cursor().ParseFromString(
query_options.end_cursor.to_bytes())
except ProtocolBuffer.ProtocolBufferDecodeError:
raise datastore_errors.BadValueError('invalid cursor')
if ((query_options.hint == QueryOptions.ORDER_FIRST and pb.order_size()) or
(query_options.hint == QueryOptions.ANCESTOR_FIRST and
pb.has_ancestor()) or
(query_options.hint == QueryOptions.FILTER_FIRST and
pb.filter_size() > 0)):
pb.set_hint(query_options.hint)
conn._set_request_read_policy(pb, query_options)
conn._set_request_transaction(pb)
return pb
def _validate_projection_and_group_by(self, projection, group_by):
"""Validates that a query's projection and group by match.
Args:
projection: A set of string property names in the projection.
group_by: A set of string property names in the group by.
Raises:
datastore_errors.BadRequestError: if the projection and group
by sets are not equal.
"""
if projection:
if group_by:
extra = set(projection) - set(group_by)
if extra:
raise datastore_errors.BadRequestError(
'projections includes properties not in the group_by argument: %s'
% extra)
elif group_by:
raise datastore_errors.BadRequestError(
'cannot specify group_by without a projection')
def apply_query(query, entities):
"""Performs the given query on a set of in-memory entities.
This function can perform queries impossible in the datastore (e.g a query
with multiple inequality filters on different properties) because all
operations are done in memory. For queries that can also be executed on the
the datastore, the results produced by this function may not use the same
implicit ordering as the datastore. To ensure compatibility, explicit
ordering must be used (e.g. 'ORDER BY ineq_prop, ..., __key__').
Order by __key__ should always be used when a consistent result is desired
(unless there is a sort order on another globally unique property).
Args:
query: a datastore_query.Query to apply
entities: a list of entity_pb.EntityProto on which to apply the query.
Returns:
A list of entity_pb.EntityProto contain the results of the query.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument must be a datastore_query.Query (%r)' % (query,))
if not isinstance(entities, list):
raise datastore_errors.BadArgumentError(
'entities argument must be a list (%r)' % (entities,))
filtered_entities = filter(query._key_filter, entities)
if not query._order:
if query._filter_predicate:
return filter(query._filter_predicate, filtered_entities)
return filtered_entities
names = query._order._get_prop_names()
if query._filter_predicate:
names |= query._filter_predicate._get_prop_names()
exists_filter = _PropertyExistsFilter(names)
value_maps = []
for entity in filtered_entities:
value_map = _make_key_value_map(entity, names)
if exists_filter._apply(value_map) and (
not query._filter_predicate or
query._filter_predicate._prune(value_map)):
value_map['__entity__'] = entity
value_maps.append(value_map)
value_maps.sort(query._order._cmp)
return [value_map['__entity__'] for value_map in value_maps]
class _AugmentedQuery(_BaseQuery):
"""A query that combines a datastore query with in-memory filters/results."""
@datastore_rpc._positional(2)
def __init__(self, query, in_memory_results=None, in_memory_filter=None,
max_filtered_count=None):
"""Constructor for _AugmentedQuery.
Do not call directly. Use the utility functions instead (e.g.
datastore_query.inject_results)
Args:
query: A datastore_query.Query object to augment.
in_memory_results: a list of pre- sorted and filtered result to add to the
stream of datastore results or None .
in_memory_filter: a set of in-memory filters to apply to the datastore
results or None.
max_filtered_count: the maximum number of datastore entities that will be
filtered out by in_memory_filter if known.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
if (in_memory_filter is not None and
not isinstance(in_memory_filter, FilterPredicate)):
raise datastore_errors.BadArgumentError(
'in_memory_filter argument should be ' +
'datastore_query.FilterPredicate (%r)' % (in_memory_filter,))
if (in_memory_results is not None and
not isinstance(in_memory_results, list)):
raise datastore_errors.BadArgumentError(
'in_memory_results argument should be a list of' +
'datastore_pv.EntityProto (%r)' % (in_memory_results,))
datastore_types.ValidateInteger(max_filtered_count,
'max_filtered_count',
empty_ok=True,
zero_ok=True)
self._query = query
self._max_filtered_count = max_filtered_count
self._in_memory_filter = in_memory_filter
self._in_memory_results = in_memory_results
@property
def app(self):
return self._query._key_filter.app
@property
def namespace(self):
return self._query._key_filter.namespace
@property
def kind(self):
return self._query._key_filter.kind
@property
def ancestor(self):
return self._query._key_filter.ancestor
@property
def filter_predicate(self):
return self._query._filter_predicate
@property
def order(self):
return self._query._order
@property
def group_by(self):
return self._query._group_by
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
if self._query._order:
changes = {'keys_only': False}
else:
changes = {}
if self._in_memory_filter or self._in_memory_results:
in_memory_offset = query_options.offset
in_memory_limit = query_options.limit
if in_memory_limit is not None:
if self._in_memory_filter is None:
changes['limit'] = in_memory_limit
elif self._max_filtered_count is not None:
changes['limit'] = in_memory_limit + self._max_filtered_count
else:
changes['limit'] = None
if in_memory_offset:
changes['offset'] = None
if changes.get('limit', None) is not None:
changes['limit'] += in_memory_offset
else:
in_memory_offset = None
else:
in_memory_offset = None
in_memory_limit = None
modified_query_options = QueryOptions(config=query_options, **changes)
if conn._api_version == datastore_rpc._DATASTORE_V4:
req = self._query._to_pb_v4(conn, modified_query_options)
else:
req = self._query._to_pb(conn, modified_query_options)
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
return _AugmentedBatch.create_async(self, query_options, conn, req,
in_memory_offset=in_memory_offset,
in_memory_limit=in_memory_limit,
start_cursor=start_cursor)
@datastore_rpc._positional(1)
def inject_results(query, updated_entities=None, deleted_keys=None):
"""Creates a query object that will inject changes into results.
Args:
query: The datastore_query.Query to augment
updated_entities: A list of entity_pb.EntityProto's that have been updated
and should take priority over any values returned by query.
deleted_keys: A list of entity_pb.Reference's for entities that have been
deleted and should be removed from query results.
Returns:
A datastore_query.AugmentedQuery if in memory filtering is required,
query otherwise.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
overridden_keys = set()
if deleted_keys is not None:
if not isinstance(deleted_keys, list):
raise datastore_errors.BadArgumentError(
'deleted_keys argument must be a list (%r)' % (deleted_keys,))
deleted_keys = filter(query._key_filter, deleted_keys)
for key in deleted_keys:
overridden_keys.add(datastore_types.ReferenceToKeyValue(key))
if updated_entities is not None:
if not isinstance(updated_entities, list):
raise datastore_errors.BadArgumentError(
'updated_entities argument must be a list (%r)' % (updated_entities,))
updated_entities = filter(query._key_filter, updated_entities)
for entity in updated_entities:
overridden_keys.add(datastore_types.ReferenceToKeyValue(entity.key()))
updated_entities = apply_query(query, updated_entities)
else:
updated_entities = []
if not overridden_keys:
return query
return _AugmentedQuery(query,
in_memory_filter=_IgnoreFilter(overridden_keys),
in_memory_results=updated_entities,
max_filtered_count=len(overridden_keys))
class _BatchShared(object):
"""Data shared among the batches of a query."""
def __init__(self, query, query_options, conn,
augmented_query=None, initial_offset=None):
self.__query = query
self.__query_options = query_options
self.__conn = conn
self.__augmented_query = augmented_query
self.__was_first_result_processed = False
if initial_offset is None:
initial_offset = query_options.offset or 0
self.__expected_offset = initial_offset
@property
def query(self):
return self.__query
@property
def query_options(self):
return self.__query_options
@property
def conn(self):
return self.__conn
@property
def augmented_query(self):
return self.__augmented_query
@property
def keys_only(self):
return self.__keys_only
@property
def compiled_query(self):
return self.__compiled_query
@property
def expected_offset(self):
return self.__expected_offset
@property
def index_list(self):
"""Returns the list of indexes used by the query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self.__index_list
def process_batch(self, batch):
self.__expected_offset -= batch.skipped_results()
if not self.__was_first_result_processed:
self.__was_first_result_processed = True
if self.conn._api_version == datastore_rpc._DATASTORE_V4:
result_type = batch.entity_result_type()
self.__keys_only = result_type == datastore_v4_pb.EntityResult.KEY_ONLY
self.__compiled_query = None
self.__index_list = None
else:
self.__keys_only = batch.keys_only()
if batch.has_compiled_query():
self.__compiled_query = batch.compiled_query
else:
self.__compiled_query = None
try:
self.__index_list = [self.__conn.adapter.pb_to_index(index_pb)
for index_pb in batch.index_list()]
except NotImplementedError:
self.__index_list = None
class Batch(object):
"""A batch of results returned by a query.
This class contains a batch of results returned from the datastore and
relevant metadata. This metadata includes:
query: The query that produced this batch
query_options: The QueryOptions used to run the query. This does not
contained any options passed to the .next_batch() call that created the
current batch.
start_cursor, end_cursor: These are the cursors that can be used
with a query to re-fetch this batch. They can also be used to
find all entities before or after the given batch (by use start_cursor as
an end cursor or vice versa). start_cursor can also be advanced to
point to a position within the batch using Cursor.advance().
skipped_results: the number of result skipped because of the offset
given to the request that generated it. This can be set either on
the original Query.run() request or in subsequent .next_batch() calls.
more_results: If this is true there are more results that can be retrieved
either by .next_batch() or Batcher.next().
This class is also able to fetch the next batch of the query using
.next_batch(). As batches of results must be fetched serially, .next_batch()
can only be called once. Additional calls to .next_batch() will return None.
When there are no more batches .next_batch() will return None as well. Note
that batches returned by iterating over Batcher will always return None for
.next_batch() as the Bather handles fetching the next batch automatically.
A Batch typically represents the result of a single RPC request. The datastore
operates on a "best effort" basis so the batch returned by .next_batch()
or Query.run_async().get_result() may not have satisfied the requested offset
or number of results (specified through FetchOptions.offset and
FetchOptions.batch_size respectively). To satisfy these restrictions
additional batches may be needed (with FetchOptions that specify the remaining
offset or results needed). The Batcher class hides these limitations.
"""
__skipped_cursor = None
__end_cursor = None
@classmethod
@datastore_rpc._positional(5)
def create_async(cls, query, query_options, conn, req,
start_cursor):
batch_shared = _BatchShared(query, query_options, conn)
batch0 = cls(batch_shared, start_cursor=start_cursor)
return batch0._make_query_rpc_call(query_options, req)
@datastore_rpc._positional(2)
def __init__(self, batch_shared, start_cursor=Cursor()):
"""Constructor.
This class is constructed in stages (one when an RPC is sent and another
when an rpc is completed) and should not be constructed directly!!
Use Query.run_async().get_result() to create a Batch or Query.run()
to use a batcher.
This constructor does not perform verification.
Args:
batch_shared: Data shared between batches for a a single query run.
start_cursor: Optional cursor pointing before this batch.
"""
self._batch_shared = batch_shared
self.__start_cursor = start_cursor
@property
def query_options(self):
"""The QueryOptions used to retrieve the first batch."""
return self._batch_shared.query_options
@property
def query(self):
"""The query the current batch came from."""
return self._batch_shared.query
@property
def results(self):
"""A list of entities in this batch."""
return self.__results
@property
def keys_only(self):
"""Whether the entities in this batch only contain keys."""
return self._batch_shared.keys_only
@property
def index_list(self):
"""Returns the list of indexes used to peform this batch's query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self._batch_shared.index_list
@property
def start_cursor(self):
"""A cursor that points to the position just before the current batch."""
return self.__start_cursor
@property
def end_cursor(self):
"""A cursor that points to the position just after the current batch."""
return self.__end_cursor
@property
def skipped_results(self):
"""The number of results skipped because of an offset in the request.
An offset is satisfied before any results are returned. The start_cursor
points to the position in the query before the skipped results.
"""
return self._skipped_results
@property
def more_results(self):
"""Whether more results can be retrieved from the query."""
return self.__more_results
def next_batch(self, fetch_options=None):
"""Synchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
A new Batch of results or None if either the next batch has already been
fetched or there are no more results.
"""
async = self.next_batch_async(fetch_options)
if async is None:
return None
return async.get_result()
def _compiled_query(self):
return self._batch_shared.compiled_query
def cursor(self, index):
"""Gets the cursor that points just after the result at index - 1.
The index is relative to first result in .results. Since start_cursor
points to the position before the first skipped result, the range of
indexes this function supports is limited to
[-skipped_results, len(results)].
For example, using start_cursor=batch.cursor(i) and
end_cursor=batch.cursor(j) will return the results found in
batch.results[i:j]. Note that any result added in the range (i-1, j]
will appear in the new query's results.
Warning: Any index in the range (-skipped_results, 0) may cause
continuation to miss or duplicate results if outside a transaction.
Args:
index: An int, the index relative to the first result before which the
cursor should point.
Returns:
A Cursor that points to a position just after the result index - 1,
which if used as a start_cursor will cause the first result to be
batch.result[index].
"""
if not isinstance(index, (int, long)):
raise datastore_errors.BadArgumentError(
'index argument should be entity_pb.Reference (%r)' % (index,))
if not -self._skipped_results <= index <= len(self.__results):
raise datastore_errors.BadArgumentError(
'index argument must be in the inclusive range [%d, %d]' %
(-self._skipped_results, len(self.__results)))
if index == -self._skipped_results:
return self.__start_cursor
elif (index == 0 and
self.__skipped_cursor):
return self.__skipped_cursor
elif index > 0 and self.__result_cursors:
return self.__result_cursors[index - 1]
elif index == len(self.__results):
return self.__end_cursor
else:
return self.__start_cursor.advance(index + self._skipped_results,
self._batch_shared.query,
self._batch_shared.conn)
def next_batch_async(self, fetch_options=None):
"""Asynchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
An async object that can be used to get the next Batch or None if either
the next batch has already been fetched or there are no more results.
"""
if not self.__datastore_cursor:
return None
fetch_options, next_batch = self._make_next_batch(fetch_options)
if (fetch_options is not None and
not FetchOptions.is_configuration(fetch_options)):
raise datastore_errors.BadArgumentError('Invalid fetch options.')
config = self._batch_shared.query_options.merge(fetch_options)
conn = next_batch._batch_shared.conn
requested_offset = 0
if fetch_options is not None and fetch_options.offset is not None:
requested_offset = fetch_options.offset
if conn._api_version == datastore_rpc._DATASTORE_V4:
if (self._batch_shared.expected_offset != requested_offset):
raise datastore_errors.BadArgumentError(
'Cannot request the next batch with a different offset than '
' expected. Expected: %s, Got: %s.'
% (self._batch_shared.expected_offset, requested_offset))
else:
next_batch.__datastore_cursor = self.__datastore_cursor
result = next_batch._make_next_rpc_call(config, self._to_pb_v4())
else:
result = next_batch._make_next_rpc_call(config,
self._to_pb(fetch_options))
self.__datastore_cursor = None
return result
def _to_pb_v4(self):
"""Returns a datastore_v4_pb.ContinueQueryRequest."""
req = datastore_v4_pb.ContinueQueryRequest()
req.set_query_handle(self.__datastore_cursor)
return req
def _to_pb(self, fetch_options=None):
req = datastore_pb.NextRequest()
if FetchOptions.produce_cursors(fetch_options,
self._batch_shared.query_options,
self._batch_shared.conn.config):
req.set_compile(True)
count = FetchOptions.batch_size(fetch_options,
self._batch_shared.query_options,
self._batch_shared.conn.config)
if count is not None:
req.set_count(count)
if fetch_options is not None and fetch_options.offset:
req.set_offset(fetch_options.offset)
req.mutable_cursor().CopyFrom(self.__datastore_cursor)
return req
def _extend(self, next_batch):
"""Combines the current batch with the next one. Called by batcher."""
self.__datastore_cursor = next_batch.__datastore_cursor
next_batch.__datastore_cursor = None
self.__more_results = next_batch.__more_results
if not self.__results:
self.__skipped_cursor = next_batch.__skipped_cursor
self.__results.extend(next_batch.__results)
self.__result_cursors.extend(next_batch.__result_cursors)
self.__end_cursor = next_batch.__end_cursor
self._skipped_results += next_batch._skipped_results
def _make_query_rpc_call(self, config, req):
"""Makes a RunQuery call that will modify the instance.
Args:
config: The datastore_rpc.Configuration to use for the call.
req: The request to send with the call.
Returns:
A UserRPC object that can be used to fetch the result of the RPC.
"""
if self._batch_shared.conn._api_version == datastore_rpc._DATASTORE_V4:
return self._batch_shared.conn._make_rpc_call(
config, 'RunQuery', req, datastore_v4_pb.RunQueryResponse(),
self.__v4_run_query_response_hook)
return self._batch_shared.conn._make_rpc_call(config, 'RunQuery', req,
datastore_pb.QueryResult(),
self.__query_result_hook)
def _make_next_rpc_call(self, config, req):
"""Makes either a Next or ContinueQuery call that will modify the instance.
Args:
config: The datastore_rpc.Configuration to use for the call.
req: The request to send with the call.
Returns:
A UserRPC object that can be used to fetch the result of the RPC.
"""
if self._batch_shared.conn._api_version == datastore_rpc._DATASTORE_V4:
return self._batch_shared.conn._make_rpc_call(
config, 'ContinueQuery', req,
datastore_v4_pb.ContinueQueryResponse(),
self.__v4_continue_query_response_hook)
return self._batch_shared.conn._make_rpc_call(config, 'Next', req,
datastore_pb.QueryResult(),
self.__query_result_hook)
_need_index_header = 'The suggested index for this query is:'
def __v4_run_query_response_hook(self, rpc):
try:
self._batch_shared.conn.check_rpc_success(rpc)
except datastore_errors.NeedIndexError, exc:
raise
if rpc.response.has_query_handle():
self.__datastore_cursor = rpc.response.query_handle()
return self.__process_v4_query_batch(rpc.response.batch())
def __v4_continue_query_response_hook(self, rpc):
self._batch_shared.conn.check_rpc_success(rpc)
return self.__process_v4_query_batch(rpc.response.batch())
def __process_v4_query_batch(self, batch):
"""Internal method to handle QueryResultBatchs from a V4 Query."""
self._batch_shared.process_batch(batch)
if batch.has_skipped_cursor():
self.__skipped_cursor = Cursor(_cursor_bytes=batch.skipped_cursor())
self.__result_cursors = [Cursor(_cursor_bytes=result.cursor())
for result in batch.entity_result_list()
if result.has_cursor()]
if batch.has_end_cursor():
self.__end_cursor = Cursor(_cursor_bytes=batch.end_cursor())
self._skipped_results = batch.skipped_results()
if batch.more_results() != datastore_v4_pb.QueryResultBatch.NOT_FINISHED:
self._end()
else:
self.__more_results = True
self.__results = self._process_v4_results(batch.entity_result_list())
return self
def __query_result_hook(self, rpc):
"""Internal method used as get_result_hook for RunQuery/Next operation."""
try:
self._batch_shared.conn.check_rpc_success(rpc)
except datastore_errors.NeedIndexError, exc:
if isinstance(rpc.request, datastore_pb.Query):
_, kind, ancestor, props = datastore_index.CompositeIndexForQuery(
rpc.request)
props = datastore_index.GetRecommendedIndexProperties(props)
yaml = datastore_index.IndexYamlForQuery(kind, ancestor, props)
xml = datastore_index.IndexXmlForQuery(kind, ancestor, props)
raise datastore_errors.NeedIndexError(
'\n'.join([str(exc), self._need_index_header, yaml]),
original_message=str(exc), header=self._need_index_header,
yaml_index=yaml, xml_index=xml)
raise
query_result = rpc.response
self._batch_shared.process_batch(query_result)
if query_result.has_skipped_results_compiled_cursor():
self.__skipped_cursor = Cursor(
_cursor_bytes=query_result.skipped_results_compiled_cursor().Encode())
self.__result_cursors = [Cursor(_cursor_bytes=result.Encode())
for result in
query_result.result_compiled_cursor_list()]
if query_result.has_compiled_cursor():
self.__end_cursor = Cursor(
_cursor_bytes=query_result.compiled_cursor().Encode())
self._skipped_results = query_result.skipped_results()
if query_result.more_results():
self.__datastore_cursor = query_result.cursor()
self.__more_results = True
else:
self._end()
self.__results = self._process_results(query_result.result_list())
return self
def _end(self):
"""Changes the internal state so that no more batches can be produced."""
self.__datastore_cursor = None
self.__more_results = False
def _make_next_batch(self, fetch_options):
"""Creates the object to store the next batch.
Args:
fetch_options: The datastore_query.FetchOptions passed in by the user or
None.
Returns:
A tuple containing the fetch options that should be used internally and
the object that should be used to contain the next batch.
"""
return fetch_options, Batch(self._batch_shared,
start_cursor=self.__end_cursor)
def _process_results(self, results):
"""Converts the datastore results into results returned to the user.
Args:
results: A list of entity_pb.EntityProto's returned by the datastore
Returns:
A list of results that should be returned to the user.
"""
converter = self._batch_shared.conn.adapter.pb_to_query_result
return [converter(result, self._batch_shared.query_options)
for result in results]
def _process_v4_results(self, results):
"""Converts the datastore results into results returned to the user.
Args:
results: A list of datastore_v4_pb.EntityResults.
Returns:
A list of results that should be returned to the user.
"""
converter = self._batch_shared.conn.adapter.pb_v4_to_query_result
return [converter(result.entity(), self._batch_shared.query_options)
for result in results]
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.Batch is unsupported.')
class _AugmentedBatch(Batch):
"""A batch produced by a datastore_query._AugmentedQuery."""
@classmethod
@datastore_rpc._positional(5)
def create_async(cls, augmented_query, query_options, conn, req,
in_memory_offset, in_memory_limit, start_cursor):
initial_offset = 0 if in_memory_offset is not None else None
batch_shared = _BatchShared(augmented_query._query,
query_options,
conn,
augmented_query,
initial_offset=initial_offset)
batch0 = cls(batch_shared,
in_memory_offset=in_memory_offset,
in_memory_limit=in_memory_limit,
start_cursor=start_cursor)
return batch0._make_query_rpc_call(query_options, req)
@datastore_rpc._positional(2)
def __init__(self, batch_shared,
in_memory_offset=None,
in_memory_limit=None,
next_index=0,
start_cursor=Cursor()):
"""A Constructor for datastore_query._AugmentedBatch.
Constructed by datastore_query._AugmentedQuery. Should not be called
directly.
"""
super(_AugmentedBatch, self).__init__(batch_shared,
start_cursor=start_cursor)
self.__in_memory_offset = in_memory_offset
self.__in_memory_limit = in_memory_limit
self.__next_index = next_index
@property
def query(self):
"""The query the current batch came from."""
return self._batch_shared.augmented_query
def cursor(self, index):
raise NotImplementedError
def _extend(self, next_batch):
super(_AugmentedBatch, self)._extend(next_batch)
self.__in_memory_limit = next_batch.__in_memory_limit
self.__in_memory_offset = next_batch.__in_memory_offset
self.__next_index = next_batch.__next_index
def _process_v4_results(self, results):
"""Process V4 results by converting to V3 and calling _process_results."""
v3_results = []
is_projection = bool(self.query_options.projection)
for v4_result in results:
v3_entity = entity_pb.EntityProto()
datastore_pbs.get_entity_converter().v4_to_v3_entity(v4_result.entity(),
v3_entity,
is_projection)
v3_results.append(v3_entity)
return self._process_results(v3_results)
def _process_results(self, results):
in_memory_filter = self._batch_shared.augmented_query._in_memory_filter
if in_memory_filter:
results = filter(in_memory_filter, results)
in_memory_results = self._batch_shared.augmented_query._in_memory_results
if in_memory_results and self.__next_index < len(in_memory_results):
original_query = super(_AugmentedBatch, self).query
if original_query._order:
if results:
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i = 0
while i < len(results):
result = results[i]
result_key = original_query._order.key(result)
while next_key <= result_key:
results.insert(i, next_result)
i += 1
self.__next_index += 1
if self.__next_index >= len(in_memory_results):
break
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i += 1
elif results or not super(_AugmentedBatch, self).more_results:
results = in_memory_results + results
self.__next_index = len(in_memory_results)
if self.__in_memory_offset:
assert not self._skipped_results
offset = min(self.__in_memory_offset, len(results))
if offset:
self._skipped_results += offset
self.__in_memory_offset -= offset
results = results[offset:]
if self.__in_memory_limit is not None:
results = results[:self.__in_memory_limit]
self.__in_memory_limit -= len(results)
if self.__in_memory_limit <= 0:
self._end()
return super(_AugmentedBatch, self)._process_results(results)
def _make_next_batch(self, fetch_options):
in_memory_offset = FetchOptions.offset(fetch_options)
augmented_query = self._batch_shared.augmented_query
if in_memory_offset and (augmented_query._in_memory_filter or
augmented_query._in_memory_results):
fetch_options = FetchOptions(offset=0)
else:
in_memory_offset = None
return (fetch_options,
_AugmentedBatch(self._batch_shared,
in_memory_offset=in_memory_offset,
in_memory_limit=self.__in_memory_limit,
start_cursor=self.end_cursor,
next_index=self.__next_index))
class Batcher(object):
"""A class that implements the Iterator interface for Batches.
Typically constructed by a call to Query.run().
The class hides the "best effort" nature of the datastore by potentially
making multiple requests to the datastore and merging the resulting batches.
This is accomplished efficiently by prefetching results and mixing both
non-blocking and blocking calls to the datastore as needed.
Iterating through batches is almost always more efficient than pulling all
results at once as RPC latency is hidden by asynchronously prefetching
results.
The batches produce by this class cannot be used to fetch the next batch
(through Batch.next_batch()) as before the current batch is returned the
request for the next batch has already been sent.
"""
ASYNC_ONLY = None
AT_LEAST_OFFSET = 0
AT_LEAST_ONE = object()
def __init__(self, query_options, first_async_batch):
"""Constructor.
Although this class can be manually constructed, it is preferable to use
Query.run(query_options).
Args:
query_options: The QueryOptions used to create the first batch.
first_async_batch: The first batch produced by
Query.run_async(query_options).
"""
self.__next_batch = first_async_batch
self.__initial_offset = QueryOptions.offset(query_options) or 0
self.__skipped_results = 0
def next(self):
"""Get the next batch. See .next_batch()."""
return self.next_batch(self.AT_LEAST_ONE)
def next_batch(self, min_batch_size):
"""Get the next batch.
The batch returned by this function cannot be used to fetch the next batch
(through Batch.next_batch()). Instead this function will always return None.
To retrieve the next batch use .next() or .next_batch(N).
This function may return a batch larger than min_to_fetch, but will never
return smaller unless there are no more results.
Special values can be used for min_batch_size:
ASYNC_ONLY - Do not perform any synchrounous fetches from the datastore
even if the this produces a batch with no results.
AT_LEAST_OFFSET - Only pull enough results to satifiy the offset.
AT_LEAST_ONE - Pull batches until at least one result is returned.
Args:
min_batch_size: The minimum number of results to retrieve or one of
(ASYNC_ONLY, AT_LEAST_OFFSET, AT_LEAST_ONE)
Returns:
The next Batch of results.
"""
if min_batch_size in (Batcher.ASYNC_ONLY, Batcher.AT_LEAST_OFFSET,
Batcher.AT_LEAST_ONE):
exact = False
else:
exact = True
datastore_types.ValidateInteger(min_batch_size,
'min_batch_size',
datastore_errors.BadArgumentError)
if not self.__next_batch:
raise StopIteration
batch = self.__next_batch.get_result()
self.__next_batch = None
self.__skipped_results += batch.skipped_results
if min_batch_size is not Batcher.ASYNC_ONLY:
if min_batch_size is Batcher.AT_LEAST_ONE:
min_batch_size = 1
needed_results = min_batch_size - len(batch.results)
while (batch.more_results and
(self.__skipped_results < self.__initial_offset or
needed_results > 0)):
if batch.query_options.batch_size:
batch_size = max(batch.query_options.batch_size, needed_results)
elif exact:
batch_size = needed_results
else:
batch_size = None
self.__next_batch = batch.next_batch_async(FetchOptions(
offset=max(0, self.__initial_offset - self.__skipped_results),
batch_size=batch_size))
next_batch = self.__next_batch.get_result()
self.__next_batch = None
self.__skipped_results += next_batch.skipped_results
needed_results = max(0, needed_results - len(next_batch.results))
batch._extend(next_batch)
self.__next_batch = batch.next_batch_async()
return batch
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.Batcher is unsupported.')
def __iter__(self):
return self
class ResultsIterator(object):
"""An iterator over the results from Batches obtained from a Batcher.
ResultsIterator implements Python's iterator protocol, so results can be
accessed with the for-statement:
> it = ResultsIterator(Query(kind='Person').run())
> for person in it:
> print 'Hi, %s!' % person['name']
At any time ResultsIterator.cursor() can be used to grab the Cursor that
points just after the last result returned by the iterator.
"""
__current_batch = None
__current_pos = 0
__last_cursor = None
def __init__(self, batcher):
"""Constructor.
Args:
batcher: A datastore_query.Batcher
"""
if not isinstance(batcher, Batcher):
raise datastore_errors.BadArgumentError(
'batcher argument should be datastore_query.Batcher (%r)' %
(batcher,))
self.__batcher = batcher
def index_list(self):
"""Returns the list of indexes used to perform the query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self._ensure_current_batch().index_list
def cursor(self):
"""Returns a cursor that points just after the last result returned.
If next() throws an exception, this function returns the end_cursor from
the last successful batch or throws the same exception if no batch was
successful.
"""
return (self.__last_cursor or
self._ensure_current_batch().cursor(self.__current_pos))
def _ensure_current_batch(self):
if not self.__current_batch:
self.__current_batch = self.__batcher.next_batch(Batcher.AT_LEAST_OFFSET)
self.__current_pos = 0
return self.__current_batch
def _compiled_query(self):
"""Returns the compiled query associated with the iterator.
Internal only do not use.
"""
return self._ensure_current_batch()._compiled_query()
def next(self):
"""Returns the next query result."""
while (not self.__current_batch or
self.__current_pos >= len(self.__current_batch.results)):
try:
next_batch = self.__batcher.next_batch(Batcher.AT_LEAST_OFFSET)
except:
if self.__current_batch:
self.__last_cursor = self.__current_batch.end_cursor
raise
self.__current_pos = 0
self.__current_batch = next_batch
result = self.__current_batch.results[self.__current_pos]
self.__current_pos += 1
return result
def __iter__(self):
return self
|
{
"content_hash": "a66764a6f7441f89917933ae5df57c16",
"timestamp": "",
"source": "github",
"line_count": 3384,
"max_line_length": 80,
"avg_line_length": 32.82239952718676,
"alnum_prop": 0.653626959332319,
"repo_name": "ychen820/microblog",
"id": "a116e694a22d9ef68a2d1c02b5bcc39df0c83848",
"size": "111675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/platform/google_appengine/google/appengine/datastore/datastore_query.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
"""
Django settings for djangopoliticalmap project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Custom settings for django-political-map
# BACKEND
POLITICAL_MAP_BACKEND = 'googlemaps'
# MAP LANGUAGE
POLITICAL_MAP_LANGUAGE_CODE = 'en'
# GMAPS
GOOGLE_API_KEY = "AIzaSyBUSalEfGXgegMLzcZGSx45YT01okoRfOs"
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#f2e&*)!1yq_lc5v5#)lez*#5d+*mzt#k$1ij4_r&pabxulb&p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'politicalplaces',
'example',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangopoliticalmap.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['example/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangopoliticalmap.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'politicalplaces/static')]
STATIC_ROOT = os.path.abspath('static')
STATIC_URL = '/static/'
|
{
"content_hash": "3e96764e929f0e35c0dd2cf8d88efe78",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 91,
"avg_line_length": 25.816176470588236,
"alnum_prop": 0.6955283395044147,
"repo_name": "20tab/django-political-map",
"id": "a8870bbe6f54efd62733f5e50c190f8a9ed4e577",
"size": "3511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangopoliticalmap/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "756"
},
{
"name": "HTML",
"bytes": "918"
},
{
"name": "JavaScript",
"bytes": "7352"
},
{
"name": "Python",
"bytes": "95103"
}
],
"symlink_target": ""
}
|
def api_breadcrumbs(url, endpoint):
url_parts = url.split("/")
url_iterator = iter(url_parts)
url_items = zip(url_iterator, url_iterator)
if len(url_parts) % 2 == 0:
#this is a request with id at the end.
# We don't want it in parent bread crumbs
parent_bread_crumbs = url_items[:-1]
else:
parent_bread_crumbs = url_items
return parent_bread_crumbs
def api_breadcrumb_filters(url, endpoint):
breadcrumb_filters = {}
url_parts = url.split("/")
endpoint_parts = endpoint.split("/")
if len(endpoint_parts) < len(url_parts) and url_parts[-1] != 'aggregates':
# if id is present in url, need to map this as well
endpoint_parts.append('{id}')
for key, value in zip(endpoint_parts, url_parts):
if key.startswith("{") and key.endswith("}"):
breadcrumb_filters[key[1:-1]] = value
return breadcrumb_filters
|
{
"content_hash": "4844e51449809142f22f675ed2d96496",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 32.785714285714285,
"alnum_prop": 0.6209150326797386,
"repo_name": "Aplopio/django_rip",
"id": "e6daa342ef64fc9779ec02bbc21badb614d16c3a",
"size": "919",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rip/django_adapter/metadata_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1455"
},
{
"name": "Python",
"bytes": "446585"
}
],
"symlink_target": ""
}
|
from .models import Project
__all__ = (
'Project',
)
|
{
"content_hash": "dc2e86d707a4486d6849eb1a9820e333",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 27,
"avg_line_length": 9.833333333333334,
"alnum_prop": 0.559322033898305,
"repo_name": "argaen/kickscraper",
"id": "8cbfe53da6731733bc4709ff89f1ef73931212a8",
"size": "59",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kickscraper/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "118"
},
{
"name": "Python",
"bytes": "7240"
}
],
"symlink_target": ""
}
|
application_config_schema = {
'type': 'object',
'properties': {
'application_command': {'type': 'string'},
'local_input_files': {
'type': 'array',
'items': {'type': 'object'}
},
'local_result_files': {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9_-]+$': {'type': 'object'}
},
'additionalProperties': False
}
},
'required': ['application_command', 'local_input_files', 'local_result_files'],
'additionalProperties': False
}
|
{
"content_hash": "7f8b0cb848e721e25c7622ac1dda0b17",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 83,
"avg_line_length": 30.526315789473685,
"alnum_prop": 0.47586206896551725,
"repo_name": "curious-containers/cc-container-worker",
"id": "da315936c02bc075e807c63c1bcc7c18ac1869e7",
"size": "580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cc_container_worker/commons/schemas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32146"
}
],
"symlink_target": ""
}
|
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import ConfigParser
import cStringIO
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import stat
import sys
import traceback
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import local
from nova.openstack.common import notifier
_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=_DEFAULT_LOG_FORMAT,
metavar='FORMAT',
help='A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'Default: %(default)s'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error'),
cfg.StrOpt('logfile_mode',
default='0644',
help='Default file mode used when creating log files'),
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s '
'[%(request_id)s %(user)s %(tenant)s] %(instance)s'
'%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
class ContextAdapter(logging.LoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
else:
instance_uuid = kwargs.pop('instance_uuid', None)
if instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
if ('nova.openstack.common.notifier.log_notifier' in
CONF.notification_driver):
return
notifier.api.notify(None, 'error.publisher',
'error_notification',
notifier.api.ERROR,
dict(error=record.msg))
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config):
try:
logging.config.fileConfig(log_config)
except ConfigParser.Error, exc:
raise LogConfigError(log_config, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
mode = int(CONF.logfile_mode, 8)
st = os.stat(logpath)
if st.st_mode != (stat.S_IFREG | mode):
os.chmod(logpath, mode)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
log_root.addHandler(PublishErrorsHandler(logging.ERROR))
for handler in log_root.handlers:
datefmt = CONF.log_date_format
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
else:
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
level = logging.NOTSET
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
for handler in log_root.handlers:
logger.addHandler(handler)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class LegacyFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
{
"content_hash": "6c7b848c59151d648a23efa004c8044b",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 78,
"avg_line_length": 35.26526717557252,
"alnum_prop": 0.5853130580659126,
"repo_name": "gspilio/nova",
"id": "e40a803c5c010f9a2732f450128ad36d97cc0d41",
"size": "19295",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/openstack/common/log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "9099328"
},
{
"name": "Shell",
"bytes": "17117"
}
],
"symlink_target": ""
}
|
import numpy as np
import os
import pandas as pd
import sys
def abide_motion_wrapper(motion_thresh, age_l, age_u, n, n_perms=1000, overwrite=True):
behav_data_f = 'DATA/Phenotypic_V1_0b_preprocessed1.csv'
f_name = 'RESULTS/rsq_{:03.0f}pct_{:03.0f}subs_{:02.0f}to{:02.0f}.csv'.format(motion_thresh, n, age_l, age_u)
# By default this code will recreate files even if they already exist
# (overwrite=True)
# If you don't want to do this though, set overwrite to False and
# this step will skip over the analysis if the file already exists
if not overwrite:
# If the file exists then skip this loop
if os.path.isfile(f_name):
return
df = read_in_data(behav_data_f)
rsq_list, icc_list = split_half_outcome(df, motion_thresh, age_l, age_u, n, n_perms=n_perms)
print "R Squared list shape: " + str(rsq_list.shape)
#print "ICC list shape: " + str(icc_list.shape)
med_rsq = np.median(rsq_list)
#rsq_CI = np.percentile(rsq_list, 97.5) - np.percentile(rsq_list, 2.5)
med_icc = np.median(icc_list)
#icc_CI = np.percentile(icc_list, 97.5) - np.percentile(icc_list, 2.5)
columns = [ 'motion_thresh', 'age_l', 'age_u', 'n', 'med_rsq', 'CI_95' ]
results_df = pd.DataFrame(np.array([[motion_thresh, age_l, age_u, n, med_rsq, rsq_CI ]]),
columns=columns)
results_df.to_csv(f_name)
def read_in_data(behav_data_f):
"""
Read in the data
"""
df = pd.read_csv(behav_data_f)
df = df.loc[df['func_perc_fd'].notnull(), :]
df = df.loc[df['FILE_ID']!='no_filename', :]
df['AGE_YRS'] = np.floor(df['AGE_AT_SCAN'])
return df
def split_half_outcome(df, motion_thresh, age_l, age_u, n, n_perms=100):
"""
This function returns the R squared of how each parameter affects split-half reliability!
It takes in a dataframe, motion threshold, an age upper limit(age_u) an age lower limit (age_l), sample size (n),
and number of permutations (n_perms, currently hard coded at 100). This function essentially splits a data frame
into two matched samples (split_two_matched_samples.py), then creates mean roi-roi correlation matrices per sample
(make_group_corr_mat.py) and then calculates the R squared (calc_rsq.py) between the two samples'
correlation matrices and returns all the permuation coefficients of determinations in a dataframe.
"""
#set up data frame of average R squared to fill up later
Rsq_list = []
ICC_list = []
#Do this in each permutation
for i in range(n_perms):
#create two matched samples split on motion_thresh, age upper, age lower, and n
df_A, df_B = split_two_matched_samples(df, motion_thresh, age_l, age_u, n)
#make the matrix of all subjects roi-roi correlations, make the mean corr mat, and make covariance cor mat
#do this for A and then B
all_corr_mat_A, av_corr_mat_A, var_corr_mat_A = make_group_corr_mat(df_A)
all_corr_mat_B, av_corr_mat_B, var_corr_mat_B = make_group_corr_mat(df_B)
#calculate the R squared between the two matrices
Rsq = calc_rsq(av_corr_mat_A, av_corr_mat_B)
#calculate the ICC between the two matrices
ICC = compute_icc(av_corr_mat_A, av_corr_mat_B)
print "Iteration " + str(i) + ": R^2 = " + str(Rsq) + ", ICC = " + str(ICC)
#build up R squared output
Rsq_list += [Rsq]
ICC_list += [ICC]
return np.array(Rsq_list), np.array(ICC_list)
def calc_rsq(av_corr_mat_A, av_corr_mat_B):
"""
From wikipedia: https://en.wikipedia.org/wiki/Coefficient_of_determination
Rsq = 1 - (SSres / SStot)
SSres is calculated as the sum of square errors (where the error
is the difference between x and y).
SStot is calculated as the total sum of squares in y.
"""
# Get the data we need
inds = np.triu_indices_from(av_corr_mat_B, k=1)
x = av_corr_mat_A[inds]
y = av_corr_mat_B[inds]
# Calculate the error/residuals
res = y - x
SSres = np.sum(res**2)
# Sum up the total error in y
y_var = y - np.mean(y)
SStot = np.sum(y_var**2)
# R squared
Rsq = 1 - (SSres/SStot)
return Rsq
def exclude_nan(x,y):
"""
Exclude NaN values if either entry in a pair of vectors has NaN
"""
idx = np.logical_not(np.logical_or(np.isnan(x), np.isnan(y)))
x = x[idx]
y = y[idx]
n = len(x)
return [x, y, n]
def compute_icc(av_corr_mat_A, av_corr_mat_B):
"""
This function computes the inter-class correlation (ICC) of the
two classes represented by the x and y numpy vectors.
from: http://stats.stackexchange.com/questions/63368/intra-class-correlation-and-experimental-design
and: Shrout, P. E., & Fleiss, J. L. (1979). Intraclass Correlations: Uses
in Assessing Rater Reliability. Psychological Bulletin, 86(2), 420-428. http://rokwa.x-y.net/Shrout-Fleiss-ICC.pdf
"""
inds = np.triu_indices_from(av_corr_mat_B, k=1)
x = av_corr_mat_A[inds]
y = av_corr_mat_B[inds]
if all(x == y):
return 1
[x, y, n] = exclude_nan(x,y)
## Need at least 3 data points to compute this
if n < 3:
return np.nan
Sx = sum(x); Sy = sum(y);
Sxx = sum(x*x); Sxy = sum( (x+y)**2 )/2; Syy = sum(y*y)
fact = ((Sx + Sy)**2)/(n*2)
SS_tot = Sxx + Syy - fact
SS_among = Sxy - fact
SS_error = SS_tot - SS_among
MS_error = SS_error/n
MS_among = SS_among/(n-1)
ICC = (MS_among - MS_error) / (MS_among + MS_error)
return ICC
def make_group_corr_mat(df):
"""
This function reads in each subject's aal roi time series files and creates roi-roi correlation matrices
for each subject and then sums them all together. The final output is a 3d matrix of all subjects
roi-roi correlations, a mean roi-roi correlation matrix and a roi-roi covariance matrix.
**NOTE WELL** This returns correlations transformed by the Fisher z, aka arctanh, function.
"""
# for each subject do the following
for i, (sub, f_id) in enumerate(df[['SUB_ID', 'FILE_ID']].values):
#read each subjects aal roi time series files
ts_df = pd.read_table('DATA/{}_rois_aal.1D'.format(f_id))
#create a correlation matrix from the roi all time series files
corr_mat_r = ts_df.corr()
#the correlations need to be transformed to Fisher z, which is
#equivalent to the arctanh function.
corr_mat_z = np.arctanh(corr_mat_r)
#for the first subject, add a correlation matrix of zeros that is the same dimensions as the aal roi-roi matrix
if i == 0:
all_corr_mat = np.zeros([corr_mat_z.shape[0], corr_mat_z.shape[1], len(df)])
#now add the correlation matrix you just created for each subject to the all_corr_mat matrix (3D)
all_corr_mat[:, :, i] = corr_mat_z
#create the mean correlation matrix (ignore nas - sometime there are some...)
av_corr_mat = np.nanmean(all_corr_mat, axis=2)
#create the group covariance matrix (ignore nas - sometime there are some...)
var_corr_mat = np.nanvar(all_corr_mat, axis=2)
return all_corr_mat, av_corr_mat, var_corr_mat
def split_two_matched_samples(df, motion_thresh, age_l, age_u, n):
"""
This function takes in a data frame, thresholds it to only include
participants whose percentage bad frames are less than motion_thresh
and participants who are between the lower and upper age limits (inclusive),
then returns two matched samples of size n. The samples are matched on
age in years, autism diagnosis, gender and scanning site. This function also selectively samples the
func_perc_fd
Information about the motion measure is here:
http://preprocessed-connectomes-project.org/quality-assessment-protocol/
"""
# Start by removing all participants whose data is below a certain
# motion threshold.
df_samp_motion = df.loc[df['func_perc_fd'] < motion_thresh, :]
# Then remove participants who are younger (in years) than age_l and older
# than age_u. Note that this means people who are age_l and age_u
# (eg 6 and 10) will be included in the sample.
df_samp = df_samp_motion.loc[(df_samp_motion['AGE_YRS']>=age_l)
& (df_samp_motion['AGE_YRS']<=age_u), :]
##sort subjects based on motion
sort_column_list = ['func_perc_fd']
df_motion_sorted = df_samp.sort_values(by=sort_column_list)
##rank subjects by motion
r=range(len(df_motion_sorted))
r_df=pd.DataFrame(r)
r_df.columns = ['rank']
r_df['newcol'] = df_motion_sorted.index
r_df.set_index('newcol', inplace=True)
r_df.index.names = [None]
df_motion_sorted_rank=pd.concat ([r_df,df_motion_sorted], axis=1)
##create bins of subjects in quartiles
l=len(df_motion_sorted_rank)
chunk=l/4
chunk1=chunk
chunk2=2*chunk
chunk3=3*chunk
chunk4=l
first=df_motion_sorted_rank[df_motion_sorted_rank['rank']<=chunk1]
second=df_motion_sorted_rank[(df_motion_sorted_rank['rank']>chunk1) & (df_motion_sorted_rank['rank']<=chunk2)]
third=df_motion_sorted_rank[(df_motion_sorted_rank['rank']>chunk2) & (df_motion_sorted_rank['rank']<=chunk3)]
fourth=df_motion_sorted_rank[df_motion_sorted_rank['rank']>=chunk3]
##take 2n/4 from each bin
n_samp=(n*2)/4
n_samp
# Shuffle these remaining participants to ensure you get different sub
# samples each time you run the code.
first_rand = first.reindex(np.random.permutation(first.index))
second_rand = second.reindex(np.random.permutation(second.index))
third_rand = third.reindex(np.random.permutation(third.index))
fourth_rand = fourth.reindex(np.random.permutation(fourth.index))
# Only keep the top 2*n/4 participants.
first_samp_2n = first_rand.iloc[:n_samp, :]
second_samp_2n = second_rand.iloc[:n_samp, :]
third_samp_2n = third_rand.iloc[:n_samp, :]
fourth_samp_2n = fourth_rand.iloc[:n_samp, :]
#append these together
frames = [first_samp_2n, second_samp_2n, third_samp_2n,fourth_samp_2n]
final_df = pd.concat(frames)
# Sort these participants according to the sort columns of interest
sort_column_list = ['DSM_IV_TR', 'DX_GROUP', 'SITE_ID', 'SEX', 'AGE_YRS']
df_samp_2n_sorted = final_df.sort_values(by=sort_column_list)
# Now put all even numbered participants in group A and all odd numbered
# participants in group B.
df_grp_A = df_samp_2n_sorted.iloc[::2, :]
df_grp_B = df_samp_2n_sorted.iloc[1::2, :]
# Boom! Return these two data frames
return df_grp_A, df_grp_B
if __name__ == "__main__":
motion_thresh = np.float(sys.argv[1])
age_l = np.float(sys.argv[2])
age_u = np.float(sys.argv[3])
n = np.int(sys.argv[4])
n_perms = np.int(sys.argv[5])
overwrite = np.int(sys.argv[6])
if overwrite == 1:
overwrite = True
elif overwrite == 0:
overwrite = False
else:
print 'invalid option for overwrite, EXITING'
sys.exit()
#hard code in ages right now...
abide_motion_wrapper(motion_thresh, age_l, age_u, n, n_perms=n_perms, overwrite=overwrite)
|
{
"content_hash": "bfeb725353a565f9fe0945b47d91b0f9",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 119,
"avg_line_length": 37.244299674267104,
"alnum_prop": 0.6320622704215497,
"repo_name": "neurohackweek/kids_rsfMRI_motion",
"id": "181865c9d52afb248fcc32d4e8cd0dc1fc0fbfac",
"size": "11457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "split_half_reliability/abide_motion_wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "706970"
},
{
"name": "Python",
"bytes": "26395"
},
{
"name": "R",
"bytes": "3100"
},
{
"name": "Shell",
"bytes": "2322"
}
],
"symlink_target": ""
}
|
"""Dashboard reports."""
from league.dashboard.models import Game
class Report(object):
"""A convenience class for generating AGA results reports."""
def __init__(self, season, episode):
"""Build a report."""
self.season = season
self.episode = episode
self.games = Game.get_by_season_ep(season, episode)
player_sets = [game.players for game in self.games]
self.players = frozenset().union(*player_sets)
|
{
"content_hash": "9ded3fd623797c90d58319c2a7b2b447",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 65,
"avg_line_length": 30.933333333333334,
"alnum_prop": 0.6422413793103449,
"repo_name": "hwchen/league",
"id": "017cfe54eb58d8a79dcf6992bff2fb2f337b9ab6",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/league/dashboard/reports.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "24392"
},
{
"name": "JavaScript",
"bytes": "143"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Nginx",
"bytes": "267"
},
{
"name": "Python",
"bytes": "74100"
},
{
"name": "Shell",
"bytes": "1295"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from audit_log.models import AuthStampedModel
class Master(AuthStampedModel):
description = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('description'))
is_active = models.BooleanField(default=True, verbose_name=_("is_active"))
created = models.DateTimeField(auto_now_add=True, verbose_name=_("created"))
modified = models.DateTimeField(auto_now=True, verbose_name=_("modified"))
class Meta:
abstract = True
|
{
"content_hash": "53ca25ecf6d667ca6cea1710ac55c17b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 104,
"avg_line_length": 39.285714285714285,
"alnum_prop": 0.7381818181818182,
"repo_name": "beren5000/ghosttown",
"id": "ed81ece78da525e8643ae7a6160226bc69deaeb4",
"size": "550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagemap/utils/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10673"
},
{
"name": "HTML",
"bytes": "61270"
},
{
"name": "JavaScript",
"bytes": "42"
},
{
"name": "Python",
"bytes": "33932"
}
],
"symlink_target": ""
}
|
from datadog_checks.base.checks.windows.perf_counters.base import PerfCountersBaseCheckWithLegacySupport
from .metrics import METRICS_CONFIG
class ActiveDirectoryCheckV2(PerfCountersBaseCheckWithLegacySupport):
__NAMESPACE__ = 'active_directory'
def get_default_config(self):
return {'metrics': METRICS_CONFIG}
|
{
"content_hash": "d1c390cc2d38b36c13164ca28db0afc5",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 104,
"avg_line_length": 33.1,
"alnum_prop": 0.7915407854984894,
"repo_name": "DataDog/integrations-core",
"id": "cd2647b3164456b6d71fe392701b48fe7ac3f676",
"size": "446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "active_directory/datadog_checks/active_directory/check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
}
|
"""
SampleCIView - simple OpenGL based CoreImage view
"""
from Cocoa import *
from Quartz import *
import CGL
from OpenGL.GL import *
# XXX: this may or may not be a bug in the OpenGL bindings
from OpenGL.GL.APPLE.transform_hint import *
import objc
# The default pixel format
_pf = None
class SampleCIView (NSOpenGLView):
_context = objc.ivar()
_image = objc.ivar()
_lastBounds = objc.ivar(type=NSRect.__typestr__)
@classmethod
def defaultPixelFormat(self):
global _pf
if _pf is None:
# Making sure the context's pixel format doesn't have a recovery
# renderer is important - otherwise CoreImage may not be able to
# create deeper context's that share textures with this one.
attr = ( NSOpenGLPFAAccelerated,
NSOpenGLPFANoRecovery, NSOpenGLPFAColorSize, 32 )
_pf = NSOpenGLPixelFormat.alloc().initWithAttributes_(attr)
return _pf
def image(self):
return self._image
def setImage_dirtyRect_(self, image, r):
if self._image is not image:
self._image = image
if CGRectIsInfinite(r):
self.setNeedsDisplay_(True)
else:
self.setNeedsDisplayInRect_(r)
def setImage_(self, image):
self.setImage_dirtyRect_(image, CGRectInfinite)
def prepareOpenGL(self):
parm = 1
# Enable beam-synced updates.
self.openGLContext().setValues_forParameter_(
(parm,), NSOpenGLCPSwapInterval)
# Make sure that everything we don't need is disabled. Some of these
# are enabled by default and can slow down rendering.
glDisable(GL_ALPHA_TEST)
glDisable(GL_DEPTH_TEST)
glDisable(GL_SCISSOR_TEST)
glDisable(GL_BLEND)
glDisable(GL_DITHER)
glDisable(GL_CULL_FACE)
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE)
glDepthMask(GL_FALSE)
glStencilMask(0)
glClearColor(0.0, 0.0, 0.0, 0.0)
glHint (GL_TRANSFORM_HINT_APPLE, GL_FASTEST)
def viewBoundsDidChange_(self, bounds):
# For subclasses.
pass
def updateMatrices(self):
r = self.bounds()
if r != self._lastBounds:
self.openGLContext().update()
# Install an orthographic projection matrix (no perspective)
# with the origin in the bottom left and one unit equal to one
# device pixel.
glViewport(0, 0, r.size.width, r.size.height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, r.size.width, 0, r.size.height, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
self._lastBounds = r
self.viewBoundsDidChange_(r)
def drawRect_(self, r):
self.openGLContext().makeCurrentContext()
# Allocate a CoreImage rendering context using the view's OpenGL
# context as its destination if none already exists.
if self._context is None:
pf = self.pixelFormat()
if pf is None:
pf = type(self).defaultPixelFormat()
self._context=CIContext.contextWithCGLContext_pixelFormat_options_(
CGL.CGLGetCurrentContext(), pf.CGLPixelFormatObj(), None)
ir = CGRectIntegral(r)
if NSGraphicsContext.currentContextDrawingToScreen():
self.updateMatrices()
# Clear the specified subrect of the OpenGL surface then
# render the image into the view. Use the GL scissor test to
# clip to * the subrect. Ask CoreImage to generate an extra
# pixel in case * it has to interpolate (allow for hardware
# inaccuracies)
rr = CGRectIntersection (CGRectInset(ir, -1.0, -1.0),
self._lastBounds)
glScissor(ir.origin.x, ir.origin.y, ir.size.width, ir.size.height)
glEnable(GL_SCISSOR_TEST)
glClear(GL_COLOR_BUFFER_BIT)
if self.respondsToSelector_('drawRect:inCIContext:'):
self.drawRect_inCIContext_(rr, self._context)
elif self._image is not None:
self._context.drawImage_atPoint_fromRect_(
self._image, rr.origin, rr)
glDisable(GL_SCISSOR_TEST)
# Flush the OpenGL command stream. If the view is double
# buffered this should be replaced by [[self openGLContext]
# flushBuffer].
glFlush ()
else:
# Printing the view contents. Render using CG, not OpenGL.
if self.respondsToSelector_('drawRect:inCIContext:'):
self.drawRect_inCIContext_(ir, self._context)
elif self._image is not None:
cgImage = self._context.createCGImage_fromRect_(
self._image, ir)
if cgImage is not None:
CGContextDrawImage(
NSGraphicsContext.currentContext().graphicsPort(),
ir, cgImage)
|
{
"content_hash": "3b2b73b330927d19dd65b9315ca91365",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 79,
"avg_line_length": 31.078787878787878,
"alnum_prop": 0.5916536661466458,
"repo_name": "albertz/music-player",
"id": "1f5acd7aca46d20d88d6ec2244a64e0e09f832f7",
"size": "5128",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mac/pyobjc-framework-Quartz/Examples/Core Image/CIBevelSample/SampleCIView.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "47481"
},
{
"name": "C",
"bytes": "435926"
},
{
"name": "C++",
"bytes": "149133"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "HTML",
"bytes": "914432"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "M",
"bytes": "10808"
},
{
"name": "Makefile",
"bytes": "13304"
},
{
"name": "Mathematica",
"bytes": "61418"
},
{
"name": "Objective-C",
"bytes": "2082720"
},
{
"name": "Objective-C++",
"bytes": "62427"
},
{
"name": "PostScript",
"bytes": "2783"
},
{
"name": "Prolog",
"bytes": "217"
},
{
"name": "Python",
"bytes": "7789845"
},
{
"name": "QMake",
"bytes": "9667"
},
{
"name": "Roff",
"bytes": "8329"
},
{
"name": "Shell",
"bytes": "3521"
}
],
"symlink_target": ""
}
|
"""
test_networking_ovn
----------------------------------
Tests for `networking_ovn` module.
"""
from networking_ovn.ml2 import mech_driver
from networking_ovn.tests import base
class TestNetworking_ovn(base.TestCase):
def test_init(self):
# just create an instance of OVNMechDriver
mech_driver.OVNMechDriver()
|
{
"content_hash": "93061ad24ee196bf02631c9bb96da908",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 50,
"avg_line_length": 21.0625,
"alnum_prop": 0.6498516320474778,
"repo_name": "russellb/networking-ovn",
"id": "3e1e77f0d97bbb8c845a02dd0f32b2d585d44e3b",
"size": "908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_ovn/tests/test_networking_ovn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11939"
},
{
"name": "Shell",
"bytes": "2313"
}
],
"symlink_target": ""
}
|
import StringIO
import math
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.port.driver import DriverOutput
from webkitpy.layout_tests.port.test import TestDriver
from webkitpy.layout_tests.port.test import TestPort
from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
from webkitpy.performance_tests.perftest import PageLoadingPerfTest
from webkitpy.performance_tests.perftest import PerfTest
from webkitpy.performance_tests.perftest import PerfTestFactory
from webkitpy.performance_tests.perftest import ReplayPerfTest
class MockPort(TestPort):
def __init__(self, custom_run_test=None):
super(MockPort, self).__init__(host=MockHost(), custom_run_test=custom_run_test)
class MainTest(unittest.TestCase):
def test_parse_output(self):
output = DriverOutput('\n'.join([
'Running 20 times',
'Ignoring warm-up run (1115)',
'',
'Time:',
'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
'avg 1100 ms',
'median 1101 ms',
'stdev 11 ms',
'min 1080 ms',
'max 1120 ms']), image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest(None, 'some-test', '/path/some-dir/some-test')
self.assertEqual(test.parse_output(output),
{'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
'values': [i for i in range(1, 20)]}})
finally:
pass
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n')
def test_parse_output_with_failing_line(self):
output = DriverOutput('\n'.join([
'Running 20 times',
'Ignoring warm-up run (1115)',
'',
'some-unrecognizable-line',
'',
'Time:'
'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
'avg 1100 ms',
'median 1101 ms',
'stdev 11 ms',
'min 1080 ms',
'max 1120 ms']), image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest(None, 'some-test', '/path/some-dir/some-test')
self.assertEqual(test.parse_output(output), None)
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'some-unrecognizable-line\n')
class TestPageLoadingPerfTest(unittest.TestCase):
class MockDriver(object):
def __init__(self, values, test, measurements=None):
self._values = values
self._index = 0
self._test = test
self._measurements = measurements
def run_test(self, input, stop_when_done):
if input.test_name == self._test.force_gc_test:
return
value = self._values[self._index]
self._index += 1
if isinstance(value, str):
return DriverOutput('some output', image=None, image_hash=None, audio=None, error=value)
else:
return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=self._values[self._index - 1], measurements=self._measurements)
def test_run(self):
port = MockPort()
test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test)
output_capture = OutputCapture()
output_capture.capture_output()
try:
self.assertEqual(test.run(driver, None),
{'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
'values': [i * 1000 for i in range(2, 21)]}})
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 5627.31433871 ms, min= 2000 ms, max= 20000 ms\n')
def test_run_with_memory_output(self):
port = MockPort()
test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
memory_results = {'Malloc': 10, 'JSHeap': 5}
self.maxDiff = None
driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test, memory_results)
output_capture = OutputCapture()
output_capture.capture_output()
try:
self.assertEqual(test.run(driver, None),
{'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
'values': [i * 1000 for i in range(2, 21)]},
'some-test:Malloc': {'max': 10, 'avg': 10.0, 'median': 10, 'min': 10, 'stdev': 0.0, 'unit': 'bytes',
'values': [10] * 19},
'some-test:JSHeap': {'max': 5, 'avg': 5.0, 'median': 5, 'min': 5, 'stdev': 0.0, 'unit': 'bytes',
'values': [5] * 19}})
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 5627.31433871 ms, min= 2000 ms, max= 20000 ms\n'
+ 'RESULT some-test: Malloc= 10.0 bytes\nmedian= 10 bytes, stdev= 0.0 bytes, min= 10 bytes, max= 10 bytes\n'
+ 'RESULT some-test: JSHeap= 5.0 bytes\nmedian= 5 bytes, stdev= 0.0 bytes, min= 5 bytes, max= 5 bytes\n')
def test_run_with_bad_output(self):
output_capture = OutputCapture()
output_capture.capture_output()
try:
port = MockPort()
test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], test)
self.assertEqual(test.run(driver, None), None)
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'error: some-test\nsome error\n')
class TestReplayPerfTest(unittest.TestCase):
class ReplayTestPort(MockPort):
def __init__(self, custom_run_test=None):
class ReplayTestDriver(TestDriver):
def run_test(self, text_input, stop_when_done):
return custom_run_test(text_input, stop_when_done) if custom_run_test else None
self._custom_driver_class = ReplayTestDriver
super(self.__class__, self).__init__()
def _driver_class(self):
return self._custom_driver_class
class MockReplayServer(object):
def __init__(self, wait_until_ready=True):
self.wait_until_ready = lambda: wait_until_ready
def stop(self):
pass
def _add_file(self, port, dirname, filename, content=True):
port.host.filesystem.maybe_make_directory(dirname)
port.host.filesystem.write_binary_file(port.host.filesystem.join(dirname, filename), content)
def _setup_test(self, run_test=None):
test_port = self.ReplayTestPort(run_test)
self._add_file(test_port, '/path/some-dir', 'some-test.replay', 'http://some-test/')
test = ReplayPerfTest(test_port, 'some-test.replay', '/path/some-dir/some-test.replay')
test._start_replay_server = lambda archive, record: self.__class__.MockReplayServer()
return test, test_port
def test_run_single(self):
output_capture = OutputCapture()
output_capture.capture_output()
loaded_pages = []
def run_test(test_input, stop_when_done):
if test_input.test_name == test.force_gc_test:
loaded_pages.append(test_input)
return
if test_input.test_name != "about:blank":
self.assertEqual(test_input.test_name, 'http://some-test/')
loaded_pages.append(test_input)
self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
return DriverOutput('actual text', 'actual image', 'actual checksum',
audio=None, crash=False, timeout=False, error=False)
test, port = self._setup_test(run_test)
test._archive_path = '/path/some-dir/some-test.wpr'
test._url = 'http://some-test/'
try:
driver = port.create_driver(worker_number=1, no_timeout=True)
self.assertTrue(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100))
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(len(loaded_pages), 2)
self.assertEqual(loaded_pages[0].test_name, test.force_gc_test)
self.assertEqual(loaded_pages[1].test_name, 'http://some-test/')
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, '')
self.assertEqual(port.host.filesystem.read_binary_file('/path/some-dir/some-test-actual.png'), 'actual image')
def test_run_single_fails_without_webpagereplay(self):
output_capture = OutputCapture()
output_capture.capture_output()
test, port = self._setup_test()
test._start_replay_server = lambda archive, record: None
test._archive_path = '/path/some-dir.wpr'
test._url = 'http://some-test/'
try:
driver = port.create_driver(worker_number=1, no_timeout=True)
self.assertEqual(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100), None)
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, "Web page replay didn't start.\n")
def test_prepare_fails_when_wait_until_ready_fails(self):
output_capture = OutputCapture()
output_capture.capture_output()
test, port = self._setup_test()
test._start_replay_server = lambda archive, record: self.__class__.MockReplayServer(wait_until_ready=False)
test._archive_path = '/path/some-dir.wpr'
test._url = 'http://some-test/'
try:
driver = port.create_driver(worker_number=1, no_timeout=True)
self.assertEqual(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100), None)
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, "Web page replay didn't start.\n")
def test_run_single_fails_when_output_has_error(self):
output_capture = OutputCapture()
output_capture.capture_output()
loaded_pages = []
def run_test(test_input, stop_when_done):
loaded_pages.append(test_input)
self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
return DriverOutput('actual text', 'actual image', 'actual checksum',
audio=None, crash=False, timeout=False, error='some error')
test, port = self._setup_test(run_test)
test._archive_path = '/path/some-dir.wpr'
test._url = 'http://some-test/'
try:
driver = port.create_driver(worker_number=1, no_timeout=True)
self.assertEqual(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100), None)
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(len(loaded_pages), 2)
self.assertEqual(loaded_pages[0].test_name, test.force_gc_test)
self.assertEqual(loaded_pages[1].test_name, 'http://some-test/')
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'error: some-test.replay\nsome error\n')
def test_prepare(self):
output_capture = OutputCapture()
output_capture.capture_output()
def run_test(test_input, stop_when_done):
self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
return DriverOutput('actual text', 'actual image', 'actual checksum',
audio=None, crash=False, timeout=False, error=False)
test, port = self._setup_test(run_test)
try:
self.assertEqual(test.prepare(time_out_ms=100), True)
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, 'Preparing replay for some-test.replay\nPrepared replay for some-test.replay\n')
self.assertEqual(port.host.filesystem.read_binary_file('/path/some-dir/some-test-expected.png'), 'actual image')
def test_prepare_calls_run_single(self):
output_capture = OutputCapture()
output_capture.capture_output()
called = [False]
def run_single(driver, url, time_out_ms, record):
self.assertTrue(record)
self.assertEqual(url, '/path/some-dir/some-test.wpr')
called[0] = True
return False
test, port = self._setup_test()
test.run_single = run_single
try:
self.assertEqual(test.prepare(time_out_ms=100), False)
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertTrue(called[0])
self.assertEqual(test._archive_path, '/path/some-dir/some-test.wpr')
self.assertEqual(test._url, 'http://some-test/')
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, "Preparing replay for some-test.replay\nFailed to prepare a replay for some-test.replay\n")
class TestPerfTestFactory(unittest.TestCase):
def test_regular_test(self):
test = PerfTestFactory.create_perf_test(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test')
self.assertEqual(test.__class__, PerfTest)
def test_inspector_test(self):
test = PerfTestFactory.create_perf_test(MockPort(), 'inspector/some-test', '/path/inspector/some-test')
self.assertEqual(test.__class__, ChromiumStylePerfTest)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "3843dee00d5c41d87e637345f5a322d2",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 165,
"avg_line_length": 45.83136094674556,
"alnum_prop": 0.612871990187851,
"repo_name": "leighpauls/k2cro4",
"id": "259fc7854cbe87e58748331d73a1304fcb73d8b6",
"size": "17039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "3062"
},
{
"name": "AppleScript",
"bytes": "25392"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "68131038"
},
{
"name": "C",
"bytes": "242794338"
},
{
"name": "C#",
"bytes": "11024"
},
{
"name": "C++",
"bytes": "353525184"
},
{
"name": "Common Lisp",
"bytes": "3721"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Emacs Lisp",
"bytes": "1639"
},
{
"name": "F#",
"bytes": "4992"
},
{
"name": "FORTRAN",
"bytes": "10404"
},
{
"name": "Java",
"bytes": "3845159"
},
{
"name": "JavaScript",
"bytes": "39146656"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "Matlab",
"bytes": "22373"
},
{
"name": "Objective-C",
"bytes": "21887598"
},
{
"name": "PHP",
"bytes": "2344144"
},
{
"name": "Perl",
"bytes": "49033099"
},
{
"name": "Prolog",
"bytes": "2926122"
},
{
"name": "Python",
"bytes": "39863959"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Racket",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "304063"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "9195117"
},
{
"name": "Tcl",
"bytes": "1919771"
},
{
"name": "Verilog",
"bytes": "3092"
},
{
"name": "Visual Basic",
"bytes": "1430"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
}
|
from dateutil.rrule import DAILY, MONTHLY, WEEKLY, YEARLY, HOURLY, MINUTELY, SECONDLY
from django.db import models
from django.utils.translation import ugettext_lazy as _
freqs = (("YEARLY", _("Yearly")),
("MONTHLY", _("Monthly")),
("WEEKLY", _("Weekly")),
("DAILY", _("Daily")),
("HOURLY", _("Hourly")),
("MINUTELY", _("Minutely")),
("SECONDLY", _("Secondly")))
class Rule(models.Model):
"""
This defines a rule by which an event will recur. This is defined by the
rrule in the dateutil documentation.
* name - the human friendly name of this kind of recursion.
* description - a short description describing this type of recursion.
* frequency - the base recurrence period
* param - extra params required to define this type of recursion. The params
should follow this format:
param = [rruleparam:value;]*
rruleparam = see list below
value = int[,int]*
The options are: (documentation for these can be found at
http://labix.org/python-dateutil#head-470fa22b2db72000d7abe698a5783a46b0731b57)
** count
** bysetpos
** bymonth
** bymonthday
** byyearday
** byweekno
** byweekday
** byhour
** byminute
** bysecond
** byeaster
"""
name = models.CharField(_("name"), max_length=32)
description = models.TextField(_("description"))
frequency = models.CharField(_("frequency"), choices=freqs, max_length=10)
params = models.TextField(_("params"), null=True, blank=True)
class Meta:
verbose_name = _('rule')
verbose_name_plural = _('rules')
app_label = 'schedule'
def rrule_frequency(self):
compatibiliy_dict = {
'DAILY': DAILY,
'MONTHLY': MONTHLY,
'WEEKLY': WEEKLY,
'YEARLY': YEARLY,
'HOURLY': HOURLY,
'MINUTELY': MINUTELY,
'SECONDLY': SECONDLY
}
return compatibiliy_dict[self.frequency]
def get_params(self):
"""
>>> rule = Rule(params = "count:1;bysecond:1;byminute:1,2,4,5")
>>> rule.get_params()
{'count': 1, 'byminute': [1, 2, 4, 5], 'bysecond': 1}
"""
if self.params is None:
return {}
params = self.params.split(';')
param_dict = []
for param in params:
param = param.split(':')
if len(param) == 2:
param = (str(param[0]), [int(p) for p in param[1].split(',')])
if len(param[1]) == 1:
param = (param[0], param[1][0])
param_dict.append(param)
return dict(param_dict)
def __unicode__(self):
"""Human readable string for Rule"""
return 'Rule %s params %s' % (self.name, self.params)
|
{
"content_hash": "6bcff43084d3cc10b015825bf78f1349",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 85,
"avg_line_length": 33.42528735632184,
"alnum_prop": 0.5450481430536451,
"repo_name": "saxix/django-scheduler",
"id": "7a8eb1e4e18192e90f15ad87ca5f4c846adab774",
"size": "2908",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "schedule/models/rules.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import copy
import importlib
import logging
import logging.handlers
import os
import re
import sys
import six
from saml2 import root_logger, BINDING_URI, SAMLError
from saml2 import BINDING_SOAP
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_ARTIFACT
from saml2.attribute_converter import ac_factory
from saml2.assertion import Policy
from saml2.mdstore import MetadataStore
from saml2.saml import NAME_FORMAT_URI
from saml2.virtual_org import VirtualOrg
logger = logging.getLogger(__name__)
__author__ = 'rolandh'
COMMON_ARGS = [
"debug",
"entityid",
"xmlsec_binary",
"key_file",
"key_file_passphrase",
"cert_file",
"encryption_keypairs",
"additional_cert_files",
"metadata_key_usage",
"secret",
"accepted_time_diff",
"name",
"ca_certs",
"description",
"valid_for",
"verify_ssl_cert",
"organization",
"contact_person",
"name_form",
"virtual_organization",
"logger",
"only_use_keys_in_metadata",
"disable_ssl_certificate_validation",
"preferred_binding",
"session_storage",
"assurance_certification",
"entity_category",
"entity_category_support",
"xmlsec_path",
"extension_schemas",
"cert_handler_extra_class",
"generate_cert_func",
"generate_cert_info",
"verify_encrypt_cert_advice",
"verify_encrypt_cert_assertion",
"tmp_cert_file",
"tmp_key_file",
"validate_certificate",
"extensions",
"allow_unknown_attributes",
"crypto_backend",
"id_attr_name",
"delete_tmpfiles",
]
SP_ARGS = [
"required_attributes",
"optional_attributes",
"idp",
"aa",
"subject_data",
"want_response_signed",
"want_assertions_signed",
"want_assertions_or_response_signed",
"authn_requests_signed",
"name_form",
"endpoints",
"ui_info",
"discovery_response",
"allow_unsolicited",
"ecp",
"name_id_format",
"name_id_format_allow_create",
"logout_requests_signed",
"requested_attribute_name_format",
"hide_assertion_consumer_service",
"force_authn",
"sp_type",
"sp_type_in_metadata",
"requested_attributes",
]
AA_IDP_ARGS = [
"sign_assertion",
"sign_response",
"encrypt_assertion",
"encrypted_advice_attributes",
"encrypt_assertion_self_contained",
"want_authn_requests_signed",
"want_authn_requests_only_with_valid_cert",
"provided_attributes",
"subject_data",
"sp",
"scope",
"endpoints",
"metadata",
"ui_info",
"name_id_format",
"domain",
"name_qualifier",
"edu_person_targeted_id",
]
PDP_ARGS = ["endpoints", "name_form", "name_id_format"]
AQ_ARGS = ["endpoints"]
AA_ARGS = ["attribute", "attribute_profile"]
COMPLEX_ARGS = ["attribute_converters", "metadata", "policy"]
ALL = set(COMMON_ARGS + SP_ARGS + AA_IDP_ARGS + PDP_ARGS + COMPLEX_ARGS +
AA_ARGS)
SPEC = {
"": COMMON_ARGS + COMPLEX_ARGS,
"sp": COMMON_ARGS + COMPLEX_ARGS + SP_ARGS,
"idp": COMMON_ARGS + COMPLEX_ARGS + AA_IDP_ARGS,
"aa": COMMON_ARGS + COMPLEX_ARGS + AA_IDP_ARGS + AA_ARGS,
"pdp": COMMON_ARGS + COMPLEX_ARGS + PDP_ARGS,
"aq": COMMON_ARGS + COMPLEX_ARGS + AQ_ARGS,
}
# --------------- Logging stuff ---------------
LOG_LEVEL = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
LOG_HANDLER = {
"rotating": logging.handlers.RotatingFileHandler,
"syslog": logging.handlers.SysLogHandler,
"timerotate": logging.handlers.TimedRotatingFileHandler,
"memory": logging.handlers.MemoryHandler,
}
LOG_FORMAT = "%(asctime)s %(name)s:%(levelname)s %(message)s"
_RPA = [BINDING_HTTP_REDIRECT, BINDING_HTTP_POST, BINDING_HTTP_ARTIFACT]
_PRA = [BINDING_HTTP_POST, BINDING_HTTP_REDIRECT, BINDING_HTTP_ARTIFACT]
_SRPA = [BINDING_SOAP, BINDING_HTTP_REDIRECT, BINDING_HTTP_POST,
BINDING_HTTP_ARTIFACT]
PREFERRED_BINDING = {
"single_logout_service": _SRPA,
"manage_name_id_service": _SRPA,
"assertion_consumer_service": _PRA,
"single_sign_on_service": _RPA,
"name_id_mapping_service": [BINDING_SOAP],
"authn_query_service": [BINDING_SOAP],
"attribute_service": [BINDING_SOAP],
"authz_service": [BINDING_SOAP],
"assertion_id_request_service": [BINDING_URI],
"artifact_resolution_service": [BINDING_SOAP],
"attribute_consuming_service": _RPA
}
class ConfigurationError(SAMLError):
pass
# -----------------------------------------------------------------
class Config(object):
def_context = ""
def __init__(self, homedir="."):
self._homedir = homedir
self.entityid = None
self.xmlsec_binary = None
self.xmlsec_path = []
self.debug = False
self.key_file = None
self.key_file_passphrase = None
self.cert_file = None
self.encryption_keypairs = None
self.additional_cert_files = None
self.metadata_key_usage = 'both'
self.secret = None
self.accepted_time_diff = None
self.name = None
self.ca_certs = None
self.verify_ssl_cert = False
self.description = None
self.valid_for = None
self.organization = None
self.contact_person = None
self.name_form = None
self.name_id_format = None
self.name_id_format_allow_create = None
self.virtual_organization = None
self.logger = None
self.only_use_keys_in_metadata = True
self.logout_requests_signed = None
self.disable_ssl_certificate_validation = None
self.context = ""
self.attribute_converters = None
self.metadata = None
self.policy = None
self.serves = []
self.vorg = {}
self.preferred_binding = PREFERRED_BINDING
self.domain = ""
self.name_qualifier = ""
self.assurance_certification = []
self.entity_category = []
self.entity_category_support = []
self.crypto_backend = 'xmlsec1'
self.id_attr_name = None
self.scope = ""
self.allow_unknown_attributes = False
self.extension_schema = {}
self.cert_handler_extra_class = None
self.verify_encrypt_cert_advice = None
self.verify_encrypt_cert_assertion = None
self.generate_cert_func = None
self.generate_cert_info = None
self.tmp_cert_file = None
self.tmp_key_file = None
self.validate_certificate = None
self.extensions = {}
self.attribute = []
self.attribute_profile = []
self.requested_attribute_name_format = NAME_FORMAT_URI
self.delete_tmpfiles = True
def setattr(self, context, attr, val):
if context == "":
setattr(self, attr, val)
else:
setattr(self, "_%s_%s" % (context, attr), val)
def getattr(self, attr, context=None):
if context is None:
context = self.context
if context == "":
return getattr(self, attr, None)
else:
return getattr(self, "_%s_%s" % (context, attr), None)
def load_special(self, cnf, typ, metadata_construction=False):
for arg in SPEC[typ]:
try:
_val = cnf[arg]
except KeyError:
pass
else:
if _val == "true":
_val = True
elif _val == "false":
_val = False
self.setattr(typ, arg, _val)
self.context = typ
self.load_complex(cnf, typ, metadata_construction=metadata_construction)
self.context = self.def_context
def load_complex(self, cnf, typ="", metadata_construction=False):
try:
self.setattr(typ, "policy", Policy(cnf["policy"]))
except KeyError:
pass
# for srv, spec in cnf["service"].items():
# try:
# self.setattr(srv, "policy",
# Policy(cnf["service"][srv]["policy"]))
# except KeyError:
# pass
try:
try:
acs = ac_factory(cnf["attribute_map_dir"])
except KeyError:
acs = ac_factory()
if not acs:
raise ConfigurationError(
"No attribute converters, something is wrong!!")
_acs = self.getattr("attribute_converters", typ)
if _acs:
_acs.extend(acs)
else:
self.setattr(typ, "attribute_converters", acs)
except KeyError:
pass
if not metadata_construction:
try:
self.setattr(typ, "metadata",
self.load_metadata(cnf["metadata"]))
except KeyError:
pass
def unicode_convert(self, item):
try:
return six.text_type(item, "utf-8")
except TypeError:
_uc = self.unicode_convert
if isinstance(item, dict):
return dict([(key, _uc(val)) for key, val in item.items()])
elif isinstance(item, list):
return [_uc(v) for v in item]
elif isinstance(item, tuple):
return tuple([_uc(v) for v in item])
else:
return item
def load(self, cnf, metadata_construction=False):
""" The base load method, loads the configuration
:param cnf: The configuration as a dictionary
:param metadata_construction: Is this only to be able to construct
metadata. If so some things can be left out.
:return: The Configuration instance
"""
_uc = self.unicode_convert
for arg in COMMON_ARGS:
if arg == "virtual_organization":
if "virtual_organization" in cnf:
for key, val in cnf["virtual_organization"].items():
self.vorg[key] = VirtualOrg(None, key, val)
continue
elif arg == "extension_schemas":
# List of filename of modules representing the schemas
if "extension_schemas" in cnf:
for mod_file in cnf["extension_schemas"]:
_mod = self._load(mod_file)
self.extension_schema[_mod.NAMESPACE] = _mod
try:
setattr(self, arg, _uc(cnf[arg]))
except KeyError:
pass
except TypeError: # Something that can't be a string
setattr(self, arg, cnf[arg])
if not self.delete_tmpfiles:
logger.warning(
"delete_tmpfiles is set to False; "
"temporary files will not be deleted."
)
if "service" in cnf:
for typ in ["aa", "idp", "sp", "pdp", "aq"]:
try:
self.load_special(
cnf["service"][typ], typ,
metadata_construction=metadata_construction)
self.serves.append(typ)
except KeyError:
pass
if "extensions" in cnf:
self.do_extensions(cnf["extensions"])
self.load_complex(cnf, metadata_construction=metadata_construction)
self.context = self.def_context
return self
def _load(self, fil):
head, tail = os.path.split(fil)
if head == "":
if sys.path[0] != ".":
sys.path.insert(0, ".")
else:
sys.path.insert(0, head)
return importlib.import_module(tail)
def load_file(self, config_filename, metadata_construction=False):
if config_filename.endswith(".py"):
config_filename = config_filename[:-3]
mod = self._load(config_filename)
return self.load(copy.deepcopy(mod.CONFIG), metadata_construction)
def load_metadata(self, metadata_conf):
""" Loads metadata into an internal structure """
acs = self.attribute_converters
if acs is None:
raise ConfigurationError(
"Missing attribute converter specification")
try:
ca_certs = self.ca_certs
except:
ca_certs = None
try:
disable_validation = self.disable_ssl_certificate_validation
except:
disable_validation = False
mds = MetadataStore(acs, self, ca_certs,
disable_ssl_certificate_validation=disable_validation)
mds.imp(metadata_conf)
return mds
def endpoint(self, service, binding=None, context=None):
""" Goes through the list of endpoint specifications for the
given type of service and returns a list of endpoint that matches
the given binding. If no binding is given all endpoints available for
that service will be returned.
:param service: The service the endpoint should support
:param binding: The expected binding
:return: All the endpoints that matches the given restrictions
"""
spec = []
unspec = []
endps = self.getattr("endpoints", context)
if endps and service in endps:
for endpspec in endps[service]:
try:
endp, bind = endpspec
if binding is None or bind == binding:
spec.append(endp)
except ValueError:
unspec.append(endpspec)
if spec:
return spec
else:
return unspec
def log_handler(self):
try:
_logconf = self.logger
except KeyError:
return None
handler = None
for htyp in LOG_HANDLER:
if htyp in _logconf:
if htyp == "syslog":
args = _logconf[htyp]
if "socktype" in args:
import socket
if args["socktype"] == "dgram":
args["socktype"] = socket.SOCK_DGRAM
elif args["socktype"] == "stream":
args["socktype"] = socket.SOCK_STREAM
else:
raise ConfigurationError("Unknown socktype!")
try:
handler = LOG_HANDLER[htyp](**args)
except TypeError: # difference between 2.6 and 2.7
del args["socktype"]
handler = LOG_HANDLER[htyp](**args)
else:
handler = LOG_HANDLER[htyp](**_logconf[htyp])
break
if handler is None:
# default if rotating logger
handler = LOG_HANDLER["rotating"]()
if "format" in _logconf:
formatter = logging.Formatter(_logconf["format"])
else:
formatter = logging.Formatter(LOG_FORMAT)
handler.setFormatter(formatter)
return handler
def setup_logger(self):
if root_logger.level != logging.NOTSET: # Someone got there before me
return root_logger
_logconf = self.logger
if _logconf is None:
return root_logger
try:
root_logger.setLevel(LOG_LEVEL[_logconf["loglevel"].lower()])
except KeyError: # reasonable default
root_logger.setLevel(logging.INFO)
root_logger.addHandler(self.log_handler())
root_logger.info("Logging started")
return root_logger
def endpoint2service(self, endpoint, context=None):
endps = self.getattr("endpoints", context)
for service, specs in endps.items():
for endp, binding in specs:
if endp == endpoint:
return service, binding
return None, None
def do_extensions(self, extensions):
for key, val in extensions.items():
self.extensions[key] = val
def service_per_endpoint(self, context=None):
"""
List all endpoint this entity publishes and which service and binding
that are behind the endpoint
:param context: Type of entity
:return: Dictionary with endpoint url as key and a tuple of
service and binding as value
"""
endps = self.getattr("endpoints", context)
res = {}
for service, specs in endps.items():
for endp, binding in specs:
res[endp] = (service, binding)
return res
class SPConfig(Config):
def_context = "sp"
def __init__(self):
Config.__init__(self)
def vo_conf(self, vo_name):
try:
return self.virtual_organization[vo_name]
except KeyError:
return None
def ecp_endpoint(self, ipaddress):
"""
Returns the entity ID of the IdP which the ECP client should talk to
:param ipaddress: The IP address of the user client
:return: IdP entity ID or None
"""
_ecp = self.getattr("ecp")
if _ecp:
for key, eid in _ecp.items():
if re.match(key, ipaddress):
return eid
return None
class IdPConfig(Config):
def_context = "idp"
def __init__(self):
Config.__init__(self)
def config_factory(_type, config):
"""
:type _type: str
:param _type:
:type config: str or dict
:param config: Name of file with pysaml2 config or CONFIG dict
:return:
"""
if _type == "sp":
conf = SPConfig()
elif _type in ["aa", "idp", "pdp", "aq"]:
conf = IdPConfig()
else:
conf = Config()
if isinstance(config, dict):
conf.load(copy.deepcopy(config))
elif isinstance(config, str):
conf.load_file(config)
else:
raise ValueError('Unknown type of config')
conf.context = _type
return conf
|
{
"content_hash": "7685e6b9936253b299bf3c60cc067187",
"timestamp": "",
"source": "github",
"line_count": 605,
"max_line_length": 80,
"avg_line_length": 29.864462809917356,
"alnum_prop": 0.5589993358423733,
"repo_name": "cloudera/hue",
"id": "9f394872858214f810ca3bd8d3e6528175271235",
"size": "18091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py3/pysaml2-5.0.0/src/saml2/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
"""This code example runs a report that an upgraded publisher would use to
include statistics before the upgrade. To download the report run
download_report.py."""
__author__ = ('Jeff Sham',
'Vincent Tsao')
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
import time
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
# Initialize appropriate service.
report_service = client.GetService('ReportService', version='v201302')
# Create report job.
report_job = {
'reportQuery': {
'dimensions': ['ORDER_ID', 'ORDER_NAME'],
'columns': ['MERGED_AD_SERVER_IMPRESSIONS', 'MERGED_AD_SERVER_CLICKS',
'MERGED_AD_SERVER_CTR',
'MERGED_AD_SERVER_CPM_AND_CPC_REVENUE',
'MERGED_AD_SERVER_WITHOUT_CPD_AVERAGE_ECPM'],
'dateRangeType': 'LAST_MONTH'
}
}
# Run report.
report_job = report_service.RunReportJob(report_job)[0]
# Wait for report to complete.
status = report_job['reportJobStatus']
while status != 'COMPLETED' and status != 'FAILED':
print 'Report job with \'%s\' id is still running.' % report_job['id']
time.sleep(30)
status = report_service.GetReportJob(report_job['id'])[0]['reportJobStatus']
if status == 'FAILED':
print ('Report job with id \'%s\' failed to complete successfully.'
% report_job['id'])
else:
print 'Report job with id \'%s\' completed successfully.' % report_job['id']
|
{
"content_hash": "71ecc5aa2a8d81df251d5b7deb43b79d",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 33.509803921568626,
"alnum_prop": 0.6512580456407255,
"repo_name": "caioserra/apiAdwords",
"id": "f992cb99aa6b4cc421c56e62ee152e3207555edb",
"size": "2327",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201302/report_service/run_merged_delivery_report.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "47375"
},
{
"name": "Python",
"bytes": "3481410"
},
{
"name": "Shell",
"bytes": "14782"
}
],
"symlink_target": ""
}
|
"""
Layout dimensions are used to give the minimum, maximum and preferred
dimensions for containers and controls.
"""
from __future__ import unicode_literals
from prompt_toolkit.utils import test_callable_args
__all__ = [
'Dimension',
'D',
'sum_layout_dimensions',
'max_layout_dimensions',
'to_dimension',
'is_dimension',
]
class Dimension(object):
"""
Specified dimension (width/height) of a user control or window.
The layout engine tries to honor the preferred size. If that is not
possible, because the terminal is larger or smaller, it tries to keep in
between min and max.
:param min: Minimum size.
:param max: Maximum size.
:param weight: For a VSplit/HSplit, the actual size will be determined
by taking the proportion of weights from all the children.
E.g. When there are two children, one width a weight of 1,
and the other with a weight of 2. The second will always be
twice as big as the first, if the min/max values allow it.
:param preferred: Preferred size.
"""
def __init__(self, min=None, max=None, weight=None, preferred=None):
assert weight is None or (isinstance(weight, int) and weight >= 0) # Cannot be a float.
assert min is None or min >= 0
assert max is None or max >= 0
assert preferred is None or preferred >= 0
self.min_specified = min is not None
self.max_specified = max is not None
self.preferred_specified = preferred is not None
self.weight_specified = weight is not None
if min is None:
min = 0 # Smallest possible value.
if max is None: # 0-values are allowed, so use "is None"
max = 1000 ** 10 # Something huge.
if preferred is None:
preferred = min
if weight is None:
weight = 1
self.min = min
self.max = max
self.preferred = preferred
self.weight = weight
# Don't allow situations where max < min. (This would be a bug.)
if max < min:
raise ValueError('Invalid Dimension: max < min.')
# Make sure that the 'preferred' size is always in the min..max range.
if self.preferred < self.min:
self.preferred = self.min
if self.preferred > self.max:
self.preferred = self.max
@classmethod
def exact(cls, amount):
"""
Return a :class:`.Dimension` with an exact size. (min, max and
preferred set to ``amount``).
"""
return cls(min=amount, max=amount, preferred=amount)
@classmethod
def zero(cls):
"""
Create a dimension that represents a zero size. (Used for 'invisible'
controls.)
"""
return cls.exact(amount=0)
def is_zero(self):
" True if this `Dimension` represents a zero size. "
return self.preferred == 0 or self.max == 0
def __repr__(self):
fields = []
if self.min_specified:
fields.append('min=%r' % self.min)
if self.max_specified:
fields.append('max=%r' % self.max)
if self.preferred_specified:
fields.append('preferred=%r' % self.preferred)
if self.weight_specified:
fields.append('weight=%r' % self.weight)
return 'Dimension(%s)' % ', '.join(fields)
def sum_layout_dimensions(dimensions):
"""
Sum a list of :class:`.Dimension` instances.
"""
min = sum(d.min for d in dimensions)
max = sum(d.max for d in dimensions)
preferred = sum(d.preferred for d in dimensions)
return Dimension(min=min, max=max, preferred=preferred)
def max_layout_dimensions(dimensions):
"""
Take the maximum of a list of :class:`.Dimension` instances.
Used when we have a HSplit/VSplit, and we want to get the best width/height.)
"""
if not len(dimensions):
return Dimension.zero()
# If all dimensions are size zero. Return zero.
# (This is important for HSplit/VSplit, to report the right values to their
# parent when all children are invisible.)
if all(d.is_zero() for d in dimensions):
return dimensions[0]
# Ignore empty dimensions. (They should not reduce the size of others.)
dimensions = [d for d in dimensions if not d.is_zero()]
if dimensions:
# Take the highest minimum dimension.
min_ = max(d.min for d in dimensions)
# For the maximum, we would prefer not to go larger than then smallest
# 'max' value, unless other dimensions have a bigger preferred value.
# This seems to work best:
# - We don't want that a widget with a small height in a VSplit would
# shrink other widgets in the split.
# If it doesn't work well enough, then it's up to the UI designer to
# explicitly pass dimensions.
max_ = min(d.max for d in dimensions)
max_ = max(max_, max(d.preferred for d in dimensions))
# Make sure that min>=max. In some scenarios, when certain min..max
# ranges don't have any overlap, we can end up in such an impossible
# situation. In that case, give priority to the max value.
# E.g. taking (1..5) and (8..9) would return (8..5). Instead take (8..8).
if min_ > max_:
max_ = min_
preferred = max(d.preferred for d in dimensions)
return Dimension(min=min_, max=max_, preferred=preferred)
else:
return Dimension()
def to_dimension(value):
"""
Turn the given object into a `Dimension` object.
"""
if value is None:
return Dimension()
if isinstance(value, int):
return Dimension.exact(value)
if isinstance(value, Dimension):
return value
if callable(value):
return to_dimension(value())
raise ValueError('Not an integer or Dimension object.')
def is_dimension(value):
"""
Test whether the given value could be a valid dimension.
(For usage in an assertion. It's not guaranteed in case of a callable.)
"""
if value is None:
return True
if callable(value):
return test_callable_args(value, [])
if isinstance(value, (int, Dimension)):
return True
return False
# Common alias.
D = Dimension
# For backward-compatibility.
LayoutDimension = Dimension
|
{
"content_hash": "429633f1f1adeadf72a0ebcdf1a11135",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 97,
"avg_line_length": 32.85641025641026,
"alnum_prop": 0.6166692679881379,
"repo_name": "lmregus/Portfolio",
"id": "d8e831588796a3cc81dfc1c5578e26e023dd3b03",
"size": "6407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/layout/dimension.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "27682"
},
{
"name": "C++",
"bytes": "25458"
},
{
"name": "CSS",
"bytes": "12842"
},
{
"name": "HTML",
"bytes": "49171"
},
{
"name": "Java",
"bytes": "99711"
},
{
"name": "JavaScript",
"bytes": "827"
},
{
"name": "Python",
"bytes": "42857"
},
{
"name": "Shell",
"bytes": "5710"
}
],
"symlink_target": ""
}
|
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('lipame')
class CeleryConfig(AppConfig):
name = 'lipame.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
|
{
"content_hash": "e209fd82091446ecbb466578024d5022",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 96,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.688470066518847,
"repo_name": "savioabuga/lipame",
"id": "5b2272a17a11d1b80c0ad199ce68a8b7c74f2543",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lipame/taskapp/celery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "220432"
},
{
"name": "HTML",
"bytes": "40241"
},
{
"name": "JavaScript",
"bytes": "62200"
},
{
"name": "PHP",
"bytes": "38584"
},
{
"name": "Python",
"bytes": "57172"
},
{
"name": "Shell",
"bytes": "4240"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
from reversion import revisions as reversion
@reversion.register()
class Type(models.Model):
name = models.CharField(_('Name'), max_length=200, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = _('Type')
verbose_name_plural = _('Types')
permissions = (
("read_type", _("Can read Type")),
)
def get_absolute_url(self):
return reverse('type-detail', kwargs={'pk': self.pk})
def get_edit_url(self):
return reverse('type-edit', kwargs={'pk': self.pk})
@reversion.register()
class TypeAttribute(models.Model):
devicetype = models.ForeignKey(Type, on_delete=models.CASCADE)
name = models.CharField(max_length=200)
regex = models.CharField(max_length=500, blank=True, null=True)
class Meta:
verbose_name = _("Type-attribute")
verbose_name_plural = _("Type-attributes")
def get_absolute_url(self):
return reverse('type-detail', kwargs={'pk': self.devicetype.pk})
def __str__(self):
return self.name
@reversion.register()
class TypeAttributeValue(models.Model):
typeattribute = models.ForeignKey(TypeAttribute, on_delete=models.CASCADE)
value = models.CharField(max_length=400)
device = models.ForeignKey("devices.Device", on_delete=models.CASCADE)
class Meta:
verbose_name = _("Type-attribute value")
verbose_name_plural = _("Type-attribute values")
def __str__(self):
return self.value
|
{
"content_hash": "2cf978f15ecd9bcb3b95795381e42e3f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 78,
"avg_line_length": 28.526315789473685,
"alnum_prop": 0.6531365313653137,
"repo_name": "vIiRuS/Lagerregal",
"id": "b0fd4919280bbd97191701c89fa775176ce18e1b",
"size": "1626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devicetypes/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42486"
},
{
"name": "HTML",
"bytes": "264180"
},
{
"name": "JavaScript",
"bytes": "179557"
},
{
"name": "Python",
"bytes": "396530"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib import admin
from django.template import RequestContext
from django.http import QueryDict
from cms.api import add_plugin, create_page
from cms.utils.plugins import build_plugin_tree
from cmsplugin_cascade.models import CascadeElement
from cmsplugin_cascade.bootstrap3.container import (BootstrapContainerPlugin, BootstrapRowPlugin,
BootstrapRowForm, BootstrapColumnPlugin)
from cmsplugin_cascade.bootstrap3.settings import CASCADE_BREAKPOINTS_LIST
from cms.test_utils.testcases import CMSTestCase
from .utils import get_request_context
class ContainerPluginTest(CMSTestCase):
def setUp(self):
page = create_page('HOME', 'testing.html', 'en', published=True, in_navigation=True)
self.placeholder = page.placeholders.get(slot='Main Content')
self.request = self.get_request(language='en', page=page)
self.admin_site = admin.sites.AdminSite()
def test_container_context(self):
# add a Bootstrap Container Plugin
container_model = add_plugin(self.placeholder, BootstrapContainerPlugin, 'en',
glossary={'breakpoints': CASCADE_BREAKPOINTS_LIST})
self.assertIsInstance(container_model, CascadeElement)
container_plugin = container_model.get_plugin_class_instance(self.admin_site)
self.assertIsInstance(container_plugin, BootstrapContainerPlugin)
ModelForm = container_plugin.get_form(self.request, container_model)
post_data = QueryDict('', mutable=True)
post_data.setlist('breakpoints', ['sm', 'md'])
form = ModelForm(post_data, None, instance=container_model)
html = form.as_p()
self.assertInHTML('<input id="id_glossary_breakpoints_0" name="breakpoints" type="checkbox" value="xs" />', html)
self.assertInHTML('<input checked="checked" id="id_glossary_breakpoints_2" name="breakpoints" type="checkbox" value="md" />', html)
self.assertInHTML('<input id="id_glossary_fluid" name="fluid" type="checkbox" />', html)
container_plugin.save_model(self.request, container_model, form, False)
self.assertListEqual(container_model.glossary['breakpoints'], ['sm', 'md'])
self.assertTrue('fluid' in container_model.glossary)
self.assertEqual(str(container_model), 'for tablets, laptops')
# add a RowPlugin with 3 Columns
row_model = add_plugin(self.placeholder, BootstrapRowPlugin, 'en', target=container_model)
row_plugin = row_model.get_plugin_class_instance()
row_change_form = BootstrapRowForm({'num_children': 3})
row_change_form.full_clean()
row_plugin.save_model(self.request, row_model, row_change_form, False)
self.assertDictEqual(row_model.glossary, {})
self.assertIsInstance(row_model, CascadeElement)
self.assertEqual(str(row_model), 'with 3 columns')
plugin_list = [container_model, row_model]
columns_qs = CascadeElement.objects.filter(parent_id=row_model.id)
self.assertEqual(columns_qs.count(), 3)
for column_model in columns_qs:
self.assertIsInstance(column_model, CascadeElement)
column_plugin = column_model.get_plugin_class_instance()
self.assertIsInstance(column_plugin, BootstrapColumnPlugin)
self.assertEqual(column_model.parent.id, row_model.id)
self.assertEqual(str(column_model), 'default width: 4 units')
plugin_list.append(column_model)
# Render the Container Plugin with all of its children
build_plugin_tree(plugin_list)
context = get_request_context(self.request)
html = container_model.render_plugin(context)
self.assertHTMLEqual(html, '<div class="container"><div class="row">' +
'<div class="col-sm-4"></div><div class="col-sm-4"></div><div class="col-sm-4"></div>' +
'</div></div>')
# change data inside the first column
column_model = columns_qs[0]
delattr(column_model, '_inst')
column_plugin = column_model.get_plugin_class_instance(self.admin_site)
column_plugin.cms_plugin_instance = column_model
post_data = QueryDict('', mutable=True)
post_data.update({'sm-column-offset': 'col-sm-offset-1', 'sm-column-width': 'col-sm-3'})
ModelForm = column_plugin.get_form(self.request, column_model)
form = ModelForm(post_data, None, instance=column_model)
self.assertTrue(form.is_valid())
column_plugin.save_model(self.request, column_model, form, True)
# change data inside the second column
column_model = columns_qs[1]
delattr(column_model, '_inst')
column_plugin = column_model.get_plugin_class_instance(self.admin_site)
column_plugin.cms_plugin_instance = column_model
post_data = QueryDict('', mutable=True)
post_data.update({'sm-responsive-utils': 'hidden-sm', 'sm-column-width': 'col-sm-4'})
ModelForm = column_plugin.get_form(self.request, column_model)
form = ModelForm(post_data, None, instance=column_model)
self.assertTrue(form.is_valid())
column_plugin.save_model(self.request, column_model, form, False)
html = container_model.render_plugin(context)
self.assertHTMLEqual(html, '<div class="container"><div class="row">' +
'<div class="col-sm-3 col-sm-offset-1"></div><div class="col-sm-4 hidden-sm"></div><div class="col-sm-4"></div>' +
'</div></div>')
|
{
"content_hash": "f795074a4b149ee00f5428f534517604",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 139,
"avg_line_length": 57.322916666666664,
"alnum_prop": 0.678902416863529,
"repo_name": "schacki/djangocms-cascade",
"id": "3a818418cf71e5b765bc501b6a5bb17118d3869a",
"size": "5527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_container.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2562"
},
{
"name": "HTML",
"bytes": "12812"
},
{
"name": "JavaScript",
"bytes": "88398"
},
{
"name": "Python",
"bytes": "281313"
}
],
"symlink_target": ""
}
|
"""Tests for the Velbus config flow."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import serial.tools.list_ports
from velbusaio.exceptions import VelbusConnectionFailed
from homeassistant import data_entry_flow
from homeassistant.components import usb
from homeassistant.components.velbus import config_flow
from homeassistant.components.velbus.const import DOMAIN
from homeassistant.config_entries import SOURCE_USB
from homeassistant.const import CONF_NAME, CONF_PORT, CONF_SOURCE
from homeassistant.core import HomeAssistant
from .const import PORT_SERIAL, PORT_TCP
from tests.common import MockConfigEntry
DISCOVERY_INFO = usb.UsbServiceInfo(
device=PORT_SERIAL,
pid="10CF",
vid="0B1B",
serial_number="1234",
description="Velbus VMB1USB",
manufacturer="Velleman",
)
def com_port():
"""Mock of a serial port."""
port = serial.tools.list_ports_common.ListPortInfo(PORT_SERIAL)
port.serial_number = "1234"
port.manufacturer = "Virtual serial port"
port.device = PORT_SERIAL
port.description = "Some serial port"
return port
@pytest.fixture(autouse=True)
def override_async_setup_entry() -> AsyncMock:
"""Override async_setup_entry."""
with patch(
"homeassistant.components.velbus.async_setup_entry", return_value=True
) as mock_setup_entry:
yield mock_setup_entry
@pytest.fixture(name="controller_connection_failed")
def mock_controller_connection_failed():
"""Mock the velbus controller with an assert."""
with patch("velbusaio.controller.Velbus", side_effect=VelbusConnectionFailed()):
yield
def init_config_flow(hass: HomeAssistant):
"""Init a configuration flow."""
flow = config_flow.VelbusConfigFlow()
flow.hass = hass
return flow
@pytest.mark.usefixtures("controller")
async def test_user(hass: HomeAssistant):
"""Test user config."""
flow = init_config_flow(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await flow.async_step_user(
{CONF_NAME: "Velbus Test Serial", CONF_PORT: PORT_SERIAL}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "velbus_test_serial"
assert result["data"][CONF_PORT] == PORT_SERIAL
result = await flow.async_step_user(
{CONF_NAME: "Velbus Test TCP", CONF_PORT: PORT_TCP}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "velbus_test_tcp"
assert result["data"][CONF_PORT] == PORT_TCP
@pytest.mark.usefixtures("controller_connection_failed")
async def test_user_fail(hass: HomeAssistant):
"""Test user config."""
flow = init_config_flow(hass)
result = await flow.async_step_user(
{CONF_NAME: "Velbus Test Serial", CONF_PORT: PORT_SERIAL}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_PORT: "cannot_connect"}
result = await flow.async_step_user(
{CONF_NAME: "Velbus Test TCP", CONF_PORT: PORT_TCP}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_PORT: "cannot_connect"}
@pytest.mark.usefixtures("config_entry")
async def test_abort_if_already_setup(hass: HomeAssistant):
"""Test we abort if Velbus is already setup."""
flow = init_config_flow(hass)
result = await flow.async_step_user({CONF_PORT: PORT_TCP, CONF_NAME: "velbus test"})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"port": "already_configured"}
@pytest.mark.usefixtures("controller")
@patch("serial.tools.list_ports.comports", MagicMock(return_value=[com_port()]))
async def test_flow_usb(hass: HomeAssistant):
"""Test usb discovery flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USB},
data=DISCOVERY_INFO,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "discovery_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
# test an already configured discovery
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_PORT: PORT_SERIAL},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USB},
data=DISCOVERY_INFO,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
@pytest.mark.usefixtures("controller_connection_failed")
@patch("serial.tools.list_ports.comports", MagicMock(return_value=[com_port()]))
async def test_flow_usb_failed(hass: HomeAssistant):
"""Test usb discovery flow with a failed velbus test."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USB},
data=DISCOVERY_INFO,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
|
{
"content_hash": "bf8600b7cd87d08455052b25087cb1c3",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 88,
"avg_line_length": 33.436708860759495,
"alnum_prop": 0.6880560287715314,
"repo_name": "GenericStudent/home-assistant",
"id": "960eedcbd011b18989b113bbfc1f7c5fc7b3304a",
"size": "5283",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/velbus/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/misc/shared_recording_rod_s01.iff"
result.attribute_template_id = -1
result.stfName("item_n","recording_rod_s01")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "946ded42a66502dd40a162ae87b2f50d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 23.846153846153847,
"alnum_prop": 0.6935483870967742,
"repo_name": "obi-two/Rebelion",
"id": "1011a2f61bf3f818c0d5fa902ff24bbd0294834f",
"size": "455",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/loot/misc/shared_recording_rod_s01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""
Option parser for all tests
Needs to be a separate module to avoid circular imports
"""
import sys, optparse
def option_parser():
"""
Returns the option parser for tests.
This parser needs to be able to handle all flags that may be passed
to any test
Due to the optparse desing we cannot create a 'partial' option parser
that would ignore extra parameters while allowing it to be later be
extended. So it is either every flag goes the main option parser,
or each module will have to implement almost identical parsers.
Having one large option parser seemed the lesser of two bad choices.
"""
parser = optparse.OptionParser()
# passing -n will disable the pathfix, use it to test global pygr distributions
parser.add_option(
'-n', '--nopath', action="store_true", dest="no_pathfix", default=False,
help="do not alter the python import path"
)
# add the regular build directory rather than the in place directory
parser.add_option(
'-b', '--buildpath', action="store_true", dest="builddir", default=False,
help="use the platform specific build directory",
)
# stops testing immediately after a test suite fails
parser.add_option(
'-s', '--strict', action="store_true",
dest="strict", default=False,
help="stops testing after a test suite fails"
)
# exclude the modules listed in arguments from all the tests
parser.add_option(
'-x', '--exclude', action="store_true",
dest="exclude", default=False,
help="excludes the files that are listed"
)
# verbosity can be 0,1 and 2 (increasing verbosity)
parser.add_option(
'-v', '--verbosity', action="store",
dest="verbosity", type="int", default=0,
help="sets the verbosity (0, 1, or 2)",
)
# long options are typically used only within individual tests
# executes figleaf to collect the coverage data
parser.add_option(
'--coverage', action="store_true", dest="coverage", default=False,
help="runs figleaf and collects the coverage information into the html directory"
)
# adds the clean option to the testrunner
parser.add_option(
'--no-clean', action="store_false", dest="clean", default=True,
help="does not reset the temporary directory and temp files"
)
# runs the performance tests
parser.add_option(
'--performance', action="store_true", dest="performance",
default=False,
help="runs the performance tests (not implemented)"
)
# port information for the pygrdata_test.py test; default is random
parser.add_option(
'--port', action="store", type="int",
dest="port", default=0,
help="sets the port information for the XMLRPC server"
)
# where to write out the port information, for communication to test
# runner.
parser.add_option(
'--port-file', action="store", type="string",
dest="port_file",
help="where to write the port information for the XMLRPC server"
)
# set the pygraphdata path from command line
parser.add_option(
'--pygrdatapath', action="store", type="string",
dest="pygrdatapath", default='',
help="sets the pygraphdata path for the XMLRPC server"
)
# add resources to the path colon separated
# --downloadDB=database1
parser.add_option(
'--downloadDB', action="store", type="string",
dest="downloadDB", default=None,
help="sets the downloadDB shelve for the XMLRPC server"
)
# add resources to the path colon separated
# --resources=database1
parser.add_option(
'--resources', action="store", type="string",
dest="resources", default='',
help="sets the downloadable resources, separate multiple ones with a : symbol"
)
return parser
if __name__ == '__main__':
# list flags here
flags = " --downloadDB=1234 "
sys.argv.extend(flags.split())
parser = option_parser()
options, args = parser.parse_args()
print options
|
{
"content_hash": "e9ae4039fb69a750770699ec39ccbf21",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 89,
"avg_line_length": 32.25581395348837,
"alnum_prop": 0.6411920211487623,
"repo_name": "ctb/pygr",
"id": "7d6779979c8a67be7b5d28accbaad23b74a05ca9",
"size": "4161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testlib/testoptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "44404"
},
{
"name": "C++",
"bytes": "12655"
},
{
"name": "Emacs Lisp",
"bytes": "31735"
},
{
"name": "Perl",
"bytes": "163476"
},
{
"name": "Python",
"bytes": "1159395"
},
{
"name": "Shell",
"bytes": "7191"
}
],
"symlink_target": ""
}
|
from kivy.uix.screenmanager import Screen
from gravur.wallet.transactionscrollview import TransactionScrollView # NOQA
from gravur.wallet.invoicescrollview import InvoiceScrollView # NOQA
from gravur.common.labelbox import LabelBox # NOQA
from gravur.common.navbar import NavBar # NOQA
from gravur.common.normalbutton import NormalButton # NOQA
from gravur.common.fadingtabedpanel import FadingTabbedPanel # NOQA
from gravur.utils import load_widget
@load_widget
class WalletMenu(Screen):
pass
|
{
"content_hash": "31091b2ef5b1d3e70a73050a0c4abe50",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 77,
"avg_line_length": 38.92307692307692,
"alnum_prop": 0.8280632411067194,
"repo_name": "F483/gravur",
"id": "27a6e24b33e4ab9315184c9cb4adfd53654242db",
"size": "621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gravur/wallet/walletmenu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1254"
},
{
"name": "Python",
"bytes": "30027"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0066_auto_20150314_2135'),
]
operations = [
migrations.RenameField(
model_name='taxsaveinputs',
old_name='casualty_floor',
new_name='_ID_Casualty_frt',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='charity_ceiling_assets',
new_name='_ID_Charity_crt_Asset',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='charity_ceiling_cash',
new_name='_ID_Charity_crt_Cash',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='misc_floor',
new_name='_ID_Miscellaneous_frt',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='medical_floor',
new_name='_ID_medical_frt',
),
]
|
{
"content_hash": "b3a6dea2253c1e4595fb46dcec1cc79f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 48,
"avg_line_length": 27.973684210526315,
"alnum_prop": 0.5465663217309501,
"repo_name": "PeterDSteinberg/webapp-public",
"id": "0a41c497811faf1df6c892f6be063b0c048e6b4b",
"size": "1087",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webapp/apps/taxbrain/migrations/0067_auto_20150314_2136.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856744"
},
{
"name": "HTML",
"bytes": "61933"
},
{
"name": "JavaScript",
"bytes": "85905"
},
{
"name": "Python",
"bytes": "381167"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
}
|
import os, sys
import inspect
import textwrap
class PCSEFileReader(dict):
"""Reader for parameter files in the PCSE format.
This class is a replacement for the `CABOFileReader`. The latter can be
used for reading parameter files in the CABO format, however this format
has rather severe limitations: it only supports string, integer, float
and array parameters. There is no support for specifying parameters with
dates for example (other then specifying them as a string).
The `PCSEFileReader` is a much more versatile tool for creating parameter
files because it leverages the power of the python interpreter for
processing parameter files through the `execfile` functionality in python.
This means that anything that can be done in a python script can also be
done in a PCSE parameter file.
:param fname: parameter file to read and parse
:returns: dictionary object with parameter key/value pairs.
*Example*
Below is an example of a parameter file 'parfile.pcse'. Parameters can
be defined the 'CABO'-way, but also advanced functionality can be used by
importing modules, defining parameters as dates or numpy arrays and even
applying function on arrays (in this case `np.sin`)::
\"\"\"This is the header of my parameter file.
This file is derived from the following sources
* dummy file for demonstrating the PCSEFileReader
* contains examples how to leverage dates, arrays and functions, etc.
\"\"\"
import numpy as np
import datetime as dt
TSUM1 = 1100
TSUM2 = 900
DTSMTB = [ 0., 0.,
5., 5.,
20., 25.,
30., 25.]
AMAXTB = np.sin(np.arange(12))
cropname = "alfalfa"
CROP_START_DATE = dt.date(2010,5,14)
Can be read with the following statements::
>>>fileparameters = PCSEFileReader('parfile.pcse')
>>>print fileparameters['TSUM1']
1100
>>>print fileparameters['CROP_START_DATE']
2010-05-14
>>>print fileparameters
PCSE parameter file contents loaded from:
D:\UserData\pcse_examples\parfile.pw
This is the header of my parameter file.
This file is derived from the following sources
* dummy file for demonstrating the PCSEFileReader
* contains examples how to leverage dates, arrays and functions, etc.
DTSMTB: [0.0, 0.0, 5.0, 5.0, 20.0, 25.0, 30.0, 25.0] (<type 'list'>)
CROP_START_DATE: 2010-05-14 (<type 'datetime.date'>)
TSUM2: 900 (<type 'int'>)
cropname: alfalfa (<type 'str'>)
AMAXTB: [ 0. 0.84147098 0.90929743 0.14112001 -0.7568025
-0.95892427 -0.2794155 0.6569866 0.98935825 0.41211849
-0.54402111 -0.99999021] (<type 'numpy.ndarray'>)
TSUM1: 1100 (<type 'int'>)
"""
def __init__(self, fname):
dict.__init__(self)
# Construct full path to parameter file and check file existence
cwd = os.getcwd()
self.fname_fp = os.path.normpath(os.path.join(cwd, fname))
if not os.path.exists(self.fname_fp):
msg = "Could not find parameter file '%s'" % self.fname_fp
raise RuntimeError(msg)
# compile and execute the contents of the file
bytecode = compile(open(self.fname_fp).read(), self.fname_fp, 'exec')
exec(bytecode, {}, self)
# Remove any members in self that are python modules
keys = list(self.keys())
for k in keys:
if inspect.ismodule(self[k]):
self.pop(k)
# If the file has a header (e.g. __doc__) store it.
if "__doc__" in self:
header = self.pop("__doc__")
if len(header) > 0:
self.header = header
if self.header[-1] != "\n":
self.header += "\n"
else:
self.header = None
def __str__(self):
printstr = "PCSE parameter file contents loaded from:\n"
printstr += "%s\n\n" % self.fname_fp
if self.header is not None:
printstr += self.header
for k in self:
r = "%s: %s (%s)" % (k, self[k], type(self[k]))
printstr += (textwrap.fill(r, subsequent_indent=" ") + "\n")
return printstr
|
{
"content_hash": "8796ebc7d5cae41b5bf0c01d92328253",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 38.973684210526315,
"alnum_prop": 0.5932928201665542,
"repo_name": "jajberni/pcse_web",
"id": "6aecafe58f7dff3ee665744743221322bc52ccb8",
"size": "4566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/pcse/fileinput/pcsefilereader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8300"
},
{
"name": "HTML",
"bytes": "68573"
},
{
"name": "JavaScript",
"bytes": "97839"
},
{
"name": "Python",
"bytes": "915616"
},
{
"name": "Shell",
"bytes": "291"
}
],
"symlink_target": ""
}
|
"""
EC2api API Server
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from ec2api import config
from ec2api import service
CONF = cfg.CONF
def main():
config.parse_args(sys.argv)
logging.setup(CONF, 'ec2api')
server = service.WSGIService('ec2api', max_url_len=16384)
service.serve(server, workers=server.workers)
service.wait()
if __name__ == '__main__':
main()
|
{
"content_hash": "8037813692dc23b3f68c59660e4f8e91",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 61,
"avg_line_length": 16.384615384615383,
"alnum_prop": 0.6807511737089202,
"repo_name": "hayderimran7/ec2-api",
"id": "4ad067fbf8d5b53b9cefb0d6fe8582b0b73ae8f0",
"size": "1015",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ec2api/cmd/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1701144"
},
{
"name": "Shell",
"bytes": "29607"
}
],
"symlink_target": ""
}
|
from extraction.runnables import Extractor, Filter
class PlainTextExtractor(Extractor):
# Extractors extending this extractor should:
# return an ExtractorResult such that
# xml_result is a node named 'file' with content 'plain_text.txt'
# files is a dict with a key 'plain_text.txt' and a value which is the plain text of the pdf
# the plain text should be a normal string. So extractors working with the text in the future should
# first decode it to utf-8
def extract(self, data, dependency_results):
raise NotImplementedError('Extend me!')
class HeaderTEIExtractor(Extractor):
def extract(self, data, dependency_results):
raise NotImplementedError('Extend me!')
class FullTextTEIExtractor(HeaderTEIExtractor):
# Extractors extending the extractor should:
# return an ExtractorResult object such that
# xml_result is the root node of a TEI xml document
# The TEI document should contain header, text, and reference information
def extract(self, data, dependency_results):
raise NotImplementedError('Extend me!')
class CSXHeaderExtractor(Extractor):
# Returns an ExtractorResult object such that
# xml_result is an xml document containing header info in the CSX ingestion format
def extract(self, data, dependency_results):
raise NotImplementedError('Extend me!')
class CSXCitationExtractor(Extractor):
# Returns an ExtractorResult object such that
# xml_result is an xml document containing citation info in the CSX ingestion format
def extract(self, data, dependency_results):
raise NotImplementedError('Extend me!')
|
{
"content_hash": "fe02e8e47367fcff07254a3d95e7bfd8",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 108,
"avg_line_length": 45.416666666666664,
"alnum_prop": 0.7467889908256881,
"repo_name": "SeerLabs/PDFMEF",
"id": "2958be27a173b41c7ed8d9bda5833f05551fdf13",
"size": "1635",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/extractor/csxextract/interfaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "48467"
},
{
"name": "Perl",
"bytes": "163073"
},
{
"name": "Prolog",
"bytes": "25174"
},
{
"name": "Python",
"bytes": "97500"
},
{
"name": "Shell",
"bytes": "384"
},
{
"name": "XS",
"bytes": "14593"
}
],
"symlink_target": ""
}
|
import json
from datetime import datetime, timedelta
from unittest import mock
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.test.testcases import TransactionTestCase
from olympia import amo
from olympia.abuse.models import AbuseReport
from olympia.activity.models import ActivityLog
from olympia.addons.models import AddonApprovalsCounter, AddonReviewerFlags
from olympia.amo.tests import (
TestCase,
addon_factory,
user_factory,
version_factory,
version_review_flags_factory,
)
from olympia.amo.utils import days_ago
from olympia.constants.promoted import RECOMMENDED
from olympia.constants.scanners import DELAY_AUTO_APPROVAL, MAD, YARA
from olympia.files.models import FileValidation
from olympia.files.utils import lock
from olympia.lib.crypto.signing import SigningError
from olympia.ratings.models import Rating
from olympia.reviewers.management.commands import (
auto_approve,
auto_reject,
notify_about_auto_approve_delay,
)
from olympia.reviewers.models import (
AutoApprovalNoValidationResultError,
AutoApprovalSummary,
get_reviewing_cache,
set_reviewing_cache,
)
from olympia.scanners.models import ScannerResult, ScannerRule
from olympia.versions.models import Version, VersionReviewerFlags
class AutoApproveTestsMixin:
def setUp(self):
user_factory(
id=settings.TASK_USER_ID, username='taskuser', email='taskuser@mozilla.com'
)
# Always mock log_final_summary() method so we can look at the stats
# easily.
patcher = mock.patch.object(auto_approve.Command, 'log_final_summary')
self.log_final_summary_mock = patcher.start()
self.addCleanup(patcher.stop)
def _check_stats(self, expected_stats):
# We abuse the fact that log_final_summary receives stats as positional
# argument to check what happened. Depends on setUp() patching
# auto_approve.Command.log_final_summary
assert self.log_final_summary_mock.call_count == 1
stats = self.log_final_summary_mock.call_args[0][0]
assert stats == expected_stats
def create_base_test_addon(self):
self.addon = addon_factory(name='Basic Addøn', average_daily_users=666)
self.version = version_factory(
addon=self.addon,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
self.file = self.version.file
self.file_validation = FileValidation.objects.create(
file=self.version.file, validation='{}'
)
AddonApprovalsCounter.objects.create(addon=self.addon, counter=1)
def create_candidates(self):
# We already have an add-on with a version awaiting review that should
# be considered. Make sure its nomination and creation date is in the
# past to test ordering.
self.version.update(created=self.days_ago(1), nomination=self.days_ago(1))
# Add reviewer flags disabling auto-approval for this add-on. It would
# still be fetched as a candidate, just rejected later on when
# calculating the verdict.
AddonReviewerFlags.objects.create(addon=self.addon, auto_approval_disabled=True)
# Add nominated add-on: it should be considered.
new_addon = addon_factory(
name='New Addon',
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
new_addon_version = new_addon.versions.all()[0]
new_addon_version.update(created=self.days_ago(2), nomination=self.days_ago(2))
# Even add an empty reviewer flags instance, that should not matter.
AddonReviewerFlags.objects.create(addon=new_addon)
# Add langpack: it should be considered.
langpack = addon_factory(
name='Langpack',
type=amo.ADDON_LPAPP,
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
langpack_version = langpack.versions.all()[0]
langpack_version.update(created=self.days_ago(3), nomination=self.days_ago(3))
# Add a dictionary: it should also be considered.
dictionary = addon_factory(
name='Dictionary',
type=amo.ADDON_DICT,
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
dictionary_version = dictionary.versions.all()[0]
dictionary_version.update(created=self.days_ago(4), nomination=self.days_ago(4))
# Some recommended add-ons - one nominated and one update.
# They should be considered by fetch_candidates(), so that they get a
# weight assigned etc - they will not be auto-approved but that's
# handled at a later stage, when calculating the verdict.
recommendable_addon_nominated = addon_factory(
name='Recommendable Addon',
status=amo.STATUS_NOMINATED,
promoted=RECOMMENDED,
version_kw={
'nomination': self.days_ago(6),
'created': self.days_ago(6),
},
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
recommended_addon = addon_factory(
name='Recommended Addon',
promoted=RECOMMENDED,
version_kw={'promotion_approved': False},
)
recommended_addon_version = version_factory(
addon=recommended_addon,
promotion_approved=True,
nomination=self.days_ago(7),
created=self.days_ago(7),
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
# Add-on with 2 versions:
# - one listed, public.
# - one non-listed version awaiting review.
complex_addon = addon_factory(name='Complex Addon')
complex_addon_version = version_factory(
nomination=self.days_ago(8),
created=self.days_ago(8),
addon=complex_addon,
channel=amo.CHANNEL_UNLISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
# Disabled version with a file waiting review (Still has to be
# considered because unlisted doesn't care about disabled by user
# state.
user_disabled_addon = addon_factory(
name='Disabled by user waiting review', disabled_by_user=True
)
user_disabled_addon_version = version_factory(
nomination=self.days_ago(11),
created=self.days_ago(11),
channel=amo.CHANNEL_UNLISTED,
addon=user_disabled_addon,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
# Pure unlisted upload. Addon status is "incomplete" as a result, but
# it should still be considered because unlisted versions don't care
# about that.
pure_unlisted = addon_factory(
name='Pure unlisted',
version_kw={
'channel': amo.CHANNEL_UNLISTED,
'nomination': self.days_ago(12),
'created': self.days_ago(12),
},
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
status=amo.STATUS_NULL,
)
pure_unlisted_version = pure_unlisted.versions.get()
# Unlisted static theme.
unlisted_theme = addon_factory(
name='Unlisted theme',
version_kw={
'channel': amo.CHANNEL_UNLISTED,
'nomination': self.days_ago(13),
'created': self.days_ago(13),
},
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
status=amo.STATUS_NULL,
type=amo.ADDON_STATICTHEME,
)
unlisted_theme_version = unlisted_theme.versions.get()
# ---------------------------------------------------------------------
# Add a bunch of add-ons in various states that should not be returned.
# Public add-on with no updates.
addon_factory(name='Already Public')
# Mozilla Disabled add-on with updates.
disabled_addon = addon_factory(
name='Mozilla Disabled',
status=amo.STATUS_DISABLED,
)
version_factory(
addon=disabled_addon,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
# Add-on with deleted version.
addon_with_deleted_version = addon_factory(
name='With deleted version awaiting review'
)
deleted_version = version_factory(
addon=addon_with_deleted_version,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
deleted_version.delete()
# Somehow deleted add-on with a file still waiting for review.
deleted_addon = addon_factory(
name='Deleted Awaiting Review Somehow',
status=amo.STATUS_DELETED,
)
version_factory(
addon=deleted_addon,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
# listed version belonging to an add-on disabled by user
addon_factory(
name='Listed Disabled by user',
disabled_by_user=True,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
# Incomplete listed addon
addon_factory(
name='Incomplete listed',
status=amo.STATUS_NULL,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
# Listed static theme
addon_factory(
name='Listed theme',
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
status=amo.STATUS_NOMINATED,
type=amo.ADDON_STATICTHEME,
)
return [
(version.addon, version)
for version in [
unlisted_theme_version,
pure_unlisted_version,
user_disabled_addon_version,
complex_addon_version,
recommended_addon_version,
recommendable_addon_nominated.current_version,
dictionary.current_version,
langpack.current_version,
new_addon.current_version,
self.version,
]
]
class TestAutoApproveCommand(AutoApproveTestsMixin, TestCase):
def setUp(self):
self.create_base_test_addon()
super().setUp()
def test_fetch_candidates(self):
# Create the candidates and extra addons & versions that should not be
# considered for auto-approval.
candidates = self.create_candidates()
expected = [version.id for addon, version in candidates]
# Gather the candidates.
command = auto_approve.Command()
qs = command.fetch_candidates()
# Test that they are all present.
assert list(qs) == expected
@mock.patch('olympia.reviewers.management.commands.auto_approve.statsd.incr')
@mock.patch('olympia.reviewers.management.commands.auto_approve.ReviewHelper')
def test_approve(self, review_helper_mock, statsd_incr_mock):
review_helper_mock.return_value.actions = {'public': mock.MagicMock()}
command = auto_approve.Command()
command.approve(self.version)
assert review_helper_mock.call_count == 1
assert review_helper_mock.call_args == (
(),
{'addon': self.addon, 'version': self.version, 'human_review': False},
)
assert review_helper_mock().actions['public']['method'].call_count == 1
assert statsd_incr_mock.call_count == 1
assert statsd_incr_mock.call_args == (
('reviewers.auto_approve.approve.success',),
{},
)
@mock.patch('olympia.reviewers.utils.sign_file')
def test_full(self, sign_file_mock):
# Simple integration test with as few mocks as possible.
assert not AutoApprovalSummary.objects.exists()
assert not self.file.reviewed
ActivityLog.objects.all().delete()
self.author = user_factory()
self.addon.addonuser_set.create(user=self.author)
# Delete the add-on current version and approval info, leaving it
# nominated. Set its nomination date in the past and it should be
# picked up and auto-approved.
AddonApprovalsCounter.objects.filter(addon=self.addon).get().delete()
self.addon.current_version.delete()
self.version.update(nomination=self.days_ago(2))
self.addon.update_status()
call_command('auto_approve', '--dry-run')
call_command('auto_approve')
self.addon.reload()
self.file.reload()
assert AutoApprovalSummary.objects.count() == 1
summary = AutoApprovalSummary.objects.get(version=self.version)
assert summary
assert get_reviewing_cache(self.addon.pk) is None
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
assert self.file.reviewed
assert ActivityLog.objects.count()
activity_log = ActivityLog.objects.latest('pk')
assert activity_log.action == amo.LOG.APPROVE_VERSION.id
assert sign_file_mock.call_count == 1
assert sign_file_mock.call_args[0][0] == self.file
# Can't test sending the mail here because TestCase doesn't handle
# transactions so on_commit never fires. It's tested in
# TestAutoApproveCommandTransactions below.
return summary
def test_full_with_weights_and_score(self):
ScannerResult.objects.create(score=0.314, scanner=MAD, version=self.version)
AbuseReport.objects.create(guid=self.addon.guid)
Rating.objects.create(
addon=self.addon, version=self.version, user=user_factory(), rating=2
)
self.file_validation.update(
validation=json.dumps(
{
'messages': [
{'id': ['DANGEROUS_EVAL']},
]
}
)
)
summary = self.test_full()
assert summary.weight == 65
assert summary.metadata_weight == 15
assert summary.code_weight == 50
assert summary.score == 31
@mock.patch.object(auto_approve, 'set_reviewing_cache')
@mock.patch.object(auto_approve, 'clear_reviewing_cache')
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_locking(
self,
create_summary_for_version_mock,
clear_reviewing_cache_mock,
set_reviewing_cache_mock,
):
create_summary_for_version_mock.return_value = (AutoApprovalSummary(), {})
call_command('auto_approve')
assert create_summary_for_version_mock.call_count == 1
assert set_reviewing_cache_mock.call_count == 1
assert set_reviewing_cache_mock.call_args == (
(self.addon.pk, settings.TASK_USER_ID),
{},
)
assert clear_reviewing_cache_mock.call_count == 1
assert clear_reviewing_cache_mock.call_args == ((self.addon.pk,), {})
@mock.patch.object(auto_approve, 'set_reviewing_cache')
@mock.patch.object(auto_approve, 'clear_reviewing_cache')
@mock.patch.object(AutoApprovalSummary, 'check_is_locked')
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_no_locking_if_already_locked(
self,
create_summary_for_version_mock,
check_is_locked_mock,
clear_reviewing_cache_mock,
set_reviewing_cache_mock,
):
check_is_locked_mock.return_value = True
create_summary_for_version_mock.return_value = (AutoApprovalSummary(), {})
call_command('auto_approve')
assert create_summary_for_version_mock.call_count == 1
assert set_reviewing_cache_mock.call_count == 0
assert clear_reviewing_cache_mock.call_count == 0
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_no_validation_result(self, create_summary_for_version_mock):
create_summary_for_version_mock.side_effect = (
AutoApprovalNoValidationResultError
)
call_command('auto_approve')
assert get_reviewing_cache(self.addon.pk) is None
assert create_summary_for_version_mock.call_count == 1
self._check_stats({'total': 1, 'error': 1})
@mock.patch('olympia.reviewers.utils.sign_file')
def test_signing_error(self, sign_file_mock):
sign_file_mock.side_effect = SigningError
call_command('auto_approve')
assert sign_file_mock.call_count == 1
assert get_reviewing_cache(self.addon.pk) is None
self._check_stats(
{
'total': 1,
'error': 1,
'has_auto_approval_disabled': 0,
'is_locked': 0,
'is_promoted_prereview': 0,
'should_be_delayed': 0,
'is_blocked': 0,
}
)
@mock.patch.object(auto_approve.Command, 'approve')
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_successful_verdict_dry_run(
self, create_summary_for_version_mock, approve_mock
):
create_summary_for_version_mock.return_value = (
AutoApprovalSummary(verdict=amo.WOULD_HAVE_BEEN_AUTO_APPROVED),
{},
)
call_command('auto_approve', '--dry-run')
assert approve_mock.call_count == 0
assert create_summary_for_version_mock.call_args == (
(self.version,),
{'dry_run': True},
)
assert get_reviewing_cache(self.addon.pk) is None
self._check_stats({'total': 1, 'auto_approved': 1})
@mock.patch.object(auto_approve.Command, 'approve')
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_successful_verdict(self, create_summary_for_version_mock, approve_mock):
create_summary_for_version_mock.return_value = (
AutoApprovalSummary(verdict=amo.AUTO_APPROVED),
{},
)
call_command('auto_approve')
assert create_summary_for_version_mock.call_count == 1
assert create_summary_for_version_mock.call_args == (
(self.version,),
{'dry_run': False},
)
assert get_reviewing_cache(self.addon.pk) is None
assert approve_mock.call_count == 1
assert approve_mock.call_args == ((self.version,), {})
self._check_stats({'total': 1, 'auto_approved': 1})
@mock.patch.object(auto_approve.Command, 'approve')
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_failed_verdict(self, create_summary_for_version_mock, approve_mock):
fake_verdict_info = {'is_locked': True}
create_summary_for_version_mock.return_value = (
AutoApprovalSummary(verdict=amo.NOT_AUTO_APPROVED),
fake_verdict_info,
)
call_command('auto_approve')
assert approve_mock.call_count == 0
assert create_summary_for_version_mock.call_args == (
(self.version,),
{'dry_run': False},
)
assert get_reviewing_cache(self.addon.pk) is None
self._check_stats(
{
'total': 1,
'is_locked': 1,
}
)
def test_prevent_multiple_runs_in_parallel(self):
# Create a lock manually, the command should exit immediately without
# doing anything.
with lock(settings.TMP_PATH, auto_approve.LOCK_NAME):
call_command('auto_approve')
assert self.log_final_summary_mock.call_count == 0
assert self.file.reload().status == amo.STATUS_AWAITING_REVIEW
@mock.patch.object(ScannerResult, 'run_action')
def test_does_not_execute_run_action_when_switch_is_inactive(self, run_action_mock):
call_command('auto_approve')
assert not run_action_mock.called
@mock.patch.object(ScannerResult, 'run_action')
def test_executes_run_action_when_switch_is_active(self, run_action_mock):
self.create_switch('run-action-in-auto-approve', active=True)
call_command('auto_approve')
assert run_action_mock.called
run_action_mock.assert_called_with(self.version)
@mock.patch.object(ScannerResult, 'run_action')
@mock.patch('olympia.reviewers.utils.sign_file')
def test_only_executes_run_action_once(self, sign_file_mock, run_action_mock):
self.create_switch('run-action-in-auto-approve', active=True)
call_command('auto_approve')
assert run_action_mock.called
run_action_mock.assert_called_with(self.version)
run_action_mock.reset_mock()
call_command('auto_approve')
assert not run_action_mock.called
@mock.patch('olympia.reviewers.utils.sign_file')
def test_run_action_delay_approval(self, sign_file_mock):
# Functional test making sure that the scanners _delay_auto_approval()
# action properly delays auto-approval on the version it's applied to
def check_assertions():
aps = self.version.autoapprovalsummary
assert aps.has_auto_approval_disabled
flags = self.addon.reviewerflags
assert flags.auto_approval_delayed_until
assert not sign_file_mock.called
self.create_switch('run-action-in-auto-approve', active=True)
ScannerRule.objects.create(
is_active=True, name='foo', action=DELAY_AUTO_APPROVAL, scanner=YARA
)
result = ScannerResult.objects.create(
scanner=YARA,
version=self.version,
results=[{'rule': 'foo', 'tags': [], 'meta': {}}],
)
assert result.has_matches
call_command('auto_approve')
check_assertions()
call_command('auto_approve') # Shouldn't matter if it's called twice.
check_assertions()
def test_run_action_delay_approval_unlisted(self):
self.version.update(channel=amo.CHANNEL_UNLISTED)
self.test_run_action_delay_approval()
class TestAutoApproveCommandTransactions(AutoApproveTestsMixin, TransactionTestCase):
def setUp(self):
self.addons = [
addon_factory(average_daily_users=666, users=[user_factory()]),
addon_factory(average_daily_users=999, users=[user_factory()]),
]
self.versions = [
version_factory(
addon=self.addons[0],
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
),
version_factory(
addon=self.addons[1],
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
),
]
self.files = [
self.versions[0].file,
self.versions[1].file,
]
self.versions[0].update(nomination=days_ago(1))
FileValidation.objects.create(file=self.versions[0].file, validation='{}')
FileValidation.objects.create(file=self.versions[1].file, validation='{}')
super().setUp()
@mock.patch('olympia.reviewers.utils.sign_file')
def test_signing_error_roll_back(self, sign_file_mock):
sign_file_mock.side_effect = [SigningError, None]
call_command('auto_approve')
# Make sure that the AutoApprovalSummary created for the first add-on
# was rolled back because of the signing error, and that it didn't
# affect the approval of the second one.
assert sign_file_mock.call_count == 2
for file_ in self.files:
file_.reload()
for addon in self.addons:
addon.reload()
assert not AutoApprovalSummary.objects.filter(version=self.versions[0]).exists()
assert self.addons[0].status == amo.STATUS_APPROVED # It already was.
assert self.files[0].status == amo.STATUS_AWAITING_REVIEW
assert not self.files[0].reviewed
assert AutoApprovalSummary.objects.get(version=self.versions[1])
assert self.addons[1].status == amo.STATUS_APPROVED
assert self.files[1].status == amo.STATUS_APPROVED
assert self.files[1].reviewed
assert len(mail.outbox) == 1
msg = mail.outbox[0]
assert msg.to == [self.addons[1].authors.all()[0].email]
assert msg.from_email == settings.ADDONS_EMAIL
assert msg.subject == 'Mozilla Add-ons: {} {} Updated'.format(
str(self.addons[1].name),
self.versions[1].version,
)
assert get_reviewing_cache(self.addons[0].pk) is None
assert get_reviewing_cache(self.addons[1].pk) is None
self._check_stats(
{
'total': 2,
'error': 1,
'auto_approved': 1,
'has_auto_approval_disabled': 0,
'is_locked': 0,
'is_promoted_prereview': 0,
'should_be_delayed': 0,
'is_blocked': 0,
}
)
class TestSendPendingRejectionLastWarningNotification(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = user_factory(pk=settings.TASK_USER_ID)
def test_not_pending_rejection(self):
author = user_factory()
addon = addon_factory(users=[author])
version_factory(addon=addon)
for version in addon.versions.all():
# Add some activity logs, but no pending_rejection flag.
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
def test_not_close_to_deadline(self):
author = user_factory()
addon = addon_factory(users=[author])
version_factory(addon=addon)
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(days=2)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
def test_addon_already_not_public(self):
author = user_factory()
addon = addon_factory(users=[author])
version_factory(addon=addon)
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
# Disabled by user: we don't notify.
addon.update(disabled_by_user=True)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
# Disabled by mozilla: we don't notify.
addon.update(disabled_by_user=False, status=amo.STATUS_DISABLED)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
# Deleted: we don't notify.
addon.update(status=amo.STATUS_DELETED)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
def test_versions_already_disabled(self):
author = user_factory()
addon = addon_factory(users=[author])
version_factory(addon=addon)
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
# Disable file: we should be left with no versions to notify the
# developers about, since they have already been disabled.
version.file.update(status=amo.STATUS_DISABLED)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
def test_more_recent_version_unreviewed_not_pending_rejection(self):
author = user_factory()
addon = addon_factory(users=[author])
version_factory(addon=addon, file_kw={'status': amo.STATUS_DISABLED})
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
# Add another version not pending rejection but unreviewed: we should
# not notify developers in that case.
version_factory(addon=addon, file_kw={'status': amo.STATUS_AWAITING_REVIEW})
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
def test_more_recent_version_public_not_pending_rejection(self):
author = user_factory()
addon = addon_factory(users=[author])
version_factory(addon=addon, file_kw={'status': amo.STATUS_DISABLED})
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
# Add another version public and not pending rejection: we should
# not notify developers in that case.
version_factory(addon=addon)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
def test_notification_already_sent_for_this_addon(self):
author = user_factory()
addon = addon_factory(users=[author])
# Developers were already notified for this add-on, so we don't do it
# again.
AddonReviewerFlags.objects.create(
addon=addon, notified_about_expiring_delayed_rejections=True
)
version_factory(addon=addon)
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
def test_pending_rejection_close_to_deadline(self):
author = user_factory()
addon = addon_factory(users=[author], version_kw={'version': '42.0'})
version_factory(addon=addon, version='42.1')
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'Some cômments'},
user=self.user,
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 1
assert addon.reviewerflags.notified_about_expiring_delayed_rejections
message = mail.outbox[0]
assert message.subject == (
'Reminder - Mozilla Add-ons: %s will be disabled on addons.mozilla.org'
% str(addon.name)
)
assert message.to == [author.email]
assert 'Some cômments' in message.body
for version in addon.versions.all():
assert version.version in message.body
def test_pending_rejection_one_version_already_disabled(self):
author = user_factory()
addon = addon_factory(users=[author], version_kw={'version': '42.0'})
current_version = addon.current_version
disabled_version = version_factory(
addon=addon, version='42.1', file_kw={'status': amo.STATUS_DISABLED}
)
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 1
assert addon.reviewerflags.notified_about_expiring_delayed_rejections
message = mail.outbox[0]
assert message.to == [author.email]
assert 'fôo' in message.body
assert current_version.version in message.body
assert disabled_version.version not in message.body
def test_more_recent_version_disabled(self):
author = user_factory()
addon = addon_factory(users=[author], version_kw={'version': '42.0'})
version1 = addon.current_version
version2 = version_factory(addon=addon, version='42.1')
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
more_recent_version = version_factory(
addon=addon, file_kw={'status': amo.STATUS_DISABLED}, version='42.2'
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 1
assert addon.reviewerflags.notified_about_expiring_delayed_rejections
message = mail.outbox[0]
assert message.to == [author.email]
assert 'fôo' in message.body
assert version1.version in message.body
assert version2.version in message.body
assert more_recent_version.version not in message.body
def test_more_recent_version_deleted(self):
author = user_factory()
addon = addon_factory(users=[author], version_kw={'version': '42.0'})
version1 = addon.current_version
version2 = version_factory(addon=addon, version='42.1')
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
more_recent_version = version_factory(addon=addon, version='43.0')
more_recent_version.delete()
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 1
assert addon.reviewerflags.notified_about_expiring_delayed_rejections
message = mail.outbox[0]
assert message.to == [author.email]
assert 'fôo' in message.body
assert version1.version in message.body
assert version2.version in message.body
assert more_recent_version.version not in message.body
def test_more_recent_version_pending_rejection_as_well(self):
author = user_factory()
addon = addon_factory(users=[author], version_kw={'version': '42.0'})
version1 = addon.current_version
version2 = version_factory(addon=addon, version='42.1')
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
more_recent_version = version_factory(addon=addon, version='43.0')
version_review_flags_factory(
version=more_recent_version,
pending_rejection=datetime.now() + timedelta(days=3),
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 1
assert addon.reviewerflags.notified_about_expiring_delayed_rejections
message = mail.outbox[0]
assert message.to == [author.email]
assert 'fôo' in message.body
assert version1.version in message.body
assert version2.version in message.body
assert more_recent_version.version not in message.body
def test_multiple_addons_pending_rejection_close_to_deadline(self):
author1 = user_factory()
addon1 = addon_factory(users=[author1], version_kw={'version': '42.0'})
version11 = addon1.current_version
version12 = version_factory(addon=addon1, version='42.1')
author2 = user_factory()
addon2 = addon_factory(users=[author2], version_kw={'version': '22.0'})
version21 = addon2.current_version
version22 = version_factory(addon=addon2, version='22.1')
for version in Version.objects.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_CONTENT_DELAYED,
version.addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 2
assert addon1.reviewerflags.notified_about_expiring_delayed_rejections
assert addon2.reviewerflags.notified_about_expiring_delayed_rejections
# Addons are processed in order of their pks.
message = mail.outbox[0]
assert message.to == [author1.email]
assert str(addon1.name) in message.subject
assert 'fôo' in message.body
assert version11.version in message.body
assert version12.version in message.body
message = mail.outbox[1]
assert message.to == [author2.email]
assert str(addon2.name) in message.subject
assert 'fôo' in message.body
assert version21.version in message.body
assert version22.version in message.body
def test_somehow_no_activity_log_skip(self):
author = user_factory()
addon = addon_factory(users=[author])
version_factory(addon=addon)
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
# The ActivityLog doesn't match a pending rejection, so we should
# not send the notification here.
ActivityLog.create(
amo.LOG.REJECT_VERSION,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
def test_somehow_no_activity_log_details_skip(self):
author = user_factory()
addon = addon_factory(users=[author])
version_factory(addon=addon)
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
# The ActivityLog doesn't have details, so we should
# not send the notification here.
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED, addon, version, user=self.user
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
def test_somehow_no_activity_log_comments_skip(self):
author = user_factory()
addon = addon_factory(users=[author])
version_factory(addon=addon)
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
# The ActivityLog doesn't have comments, so we should
# not send the notification here.
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
user=self.user,
details={'foo': 'bar'},
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 0
def test_multiple_developers_are_notified(self):
author1 = user_factory()
author2 = user_factory()
addon = addon_factory(users=[author1, author2])
version_factory(addon=addon)
for version in addon.versions.all():
version_review_flags_factory(
version=version, pending_rejection=datetime.now() + timedelta(hours=23)
)
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED,
addon,
version,
details={'comments': 'fôo'},
user=self.user,
)
more_recent_version = version_factory(addon=addon)
version_review_flags_factory(
version=more_recent_version,
pending_rejection=datetime.now() + timedelta(days=3),
)
call_command('send_pending_rejection_last_warning_notifications')
assert len(mail.outbox) == 2
message1 = mail.outbox[0]
message2 = mail.outbox[1]
assert message1.body == message2.body
assert message1.subject == message2.subject
assert message1.to != message2.to
assert set(message1.to + message2.to) == {author1.email, author2.email}
class TestNotifyAboutAutoApproveDelay(AutoApproveTestsMixin, TestCase):
def test_fetch_versions_waiting_for_approval_for_too_long(self):
self.create_base_test_addon()
expected = self.create_candidates()
command = notify_about_auto_approve_delay.Command()
qs = command.fetch_versions_waiting_for_approval_for_too_long()
# Test that they are all present (all created date created by
# create_candidates() are far enough in the past)
assert [(version.addon, version) for version in qs] == expected
# Reset created for a few selected add-ons to be more recent and
# they should no longer be present (remove them from expected and
# re-test)
addon, version = expected.pop(0)
version.update(created=datetime.now())
addon, version = expected.pop(0)
version.update(
created=datetime.now()
- timedelta(hours=command.WAITING_PERIOD_HOURS)
+ timedelta(seconds=30)
)
qs = command.fetch_versions_waiting_for_approval_for_too_long()
assert [(version.addon, version) for version in qs] == expected
# Set notified_about_auto_approval_delay=True for an add-on and
# it should no longer be present (remove it from expected and re-test)
addon, version = expected.pop(0)
AddonReviewerFlags.objects.create(
addon=addon, notified_about_auto_approval_delay=True
)
qs = command.fetch_versions_waiting_for_approval_for_too_long()
assert [(version.addon, version) for version in qs] == expected
def test_fetch_versions_waiting_for_approval_for_too_long_reset(self):
"""Ensure we only consider the latest auto-approvable version for each
add-on."""
self.create_base_test_addon()
old_version = self.version
old_version.update(
channel=amo.CHANNEL_UNLISTED,
created=self.days_ago(2),
nomination=self.days_ago(2),
)
command = notify_about_auto_approve_delay.Command()
qs = command.fetch_versions_waiting_for_approval_for_too_long()
assert qs.count() == 1
assert qs[0] == self.version
# When we submit a new version, if it's waiting for approval as well,
# it "resets" the waiting period.
new_version = version_factory(
addon=self.addon,
channel=amo.CHANNEL_UNLISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
command = notify_about_auto_approve_delay.Command()
qs = command.fetch_versions_waiting_for_approval_for_too_long()
assert qs.count() == 0
# If the new version is old enough, then it's returned (and only this
# version).
new_version.update(created=self.days_ago(1))
command = notify_about_auto_approve_delay.Command()
qs = command.fetch_versions_waiting_for_approval_for_too_long()
assert qs.count() == 1
assert qs[0] == new_version
# If the new version is approved but not the old one, then the old one
# is returned, the new version no longer prevents the old one from
# being considered.
new_version.file.update(status=amo.STATUS_APPROVED)
command = notify_about_auto_approve_delay.Command()
qs = command.fetch_versions_waiting_for_approval_for_too_long()
assert qs.count() == 1
assert qs[0] == old_version
def test_notify_nothing(self):
command = notify_about_auto_approve_delay.Command()
qs = command.fetch_versions_waiting_for_approval_for_too_long()
assert not qs.exists()
call_command('notify_about_auto_approve_delay')
assert len(mail.outbox) == 0
def test_notify_authors(self):
# Not awaiting review.
addon_factory(version_kw={'created': self.days_ago(1)}).authors.add(
user_factory()
)
# Not awaiting review for long enough.
addon_factory(
file_kw={
'status': amo.STATUS_AWAITING_REVIEW,
}
).authors.add(user_factory())
# Valid.
addon = addon_factory(
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
version_kw={'created': self.days_ago(1)},
)
users = [user_factory(), user_factory()]
[addon.authors.add(user) for user in users]
command = notify_about_auto_approve_delay.Command()
qs = command.fetch_versions_waiting_for_approval_for_too_long()
assert qs.exists()
assert not AddonReviewerFlags.objects.filter(addon=addon).exists()
# Set up is done, let's call the command!
call_command('notify_about_auto_approve_delay')
addon.reload()
assert len(mail.outbox) == 2
assert mail.outbox[0].body == mail.outbox[1].body
assert mail.outbox[0].subject == mail.outbox[1].subject
subject = mail.outbox[0].subject
assert subject == (
'Mozilla Add-ons: %s %s is pending review'
% (addon.name, addon.current_version.version)
)
body = mail.outbox[0].body
assert 'Thank you for submitting your add-on' in body
assert str(addon.name) in body
assert str(addon.current_version.version) in body
for message in mail.outbox:
assert len(message.to) == 1
assert {message.to[0] for message in mail.outbox} == {
user.email for user in users
}
assert addon.reviewerflags.notified_about_auto_approval_delay
class TestAutoReject(TestCase):
def setUp(self):
self.task_user = user_factory(
id=settings.TASK_USER_ID, username='taskuser', email='taskuser@mozilla.com'
)
self.addon = addon_factory(
version_kw={'version': '1.0', 'created': self.days_ago(2)}
)
self.version = self.addon.current_version
self.file = self.version.file
self.yesterday = self.days_ago(1)
self.user = user_factory()
version_review_flags_factory(
version=self.version,
pending_rejection=self.yesterday,
pending_rejection_by=self.user,
pending_content_rejection=True,
)
def test_prevent_multiple_runs_in_parallel(self):
# Create a lock manually, the command should exit immediately without
# doing anything.
with lock(settings.TMP_PATH, auto_reject.LOCK_NAME):
call_command('auto_reject')
self.addon.refresh_from_db()
self.version.refresh_from_db()
assert self.version.reviewerflags.pending_rejection
assert self.version.is_public()
assert self.addon.is_public()
def test_fetch_addon_candidates_distinct(self):
version = version_factory(
addon=self.addon, version='0.9', created=self.days_ago(42)
)
version_review_flags_factory(version=version, pending_rejection=self.yesterday)
qs = auto_reject.Command().fetch_addon_candidates(now=datetime.now())
assert list(qs) == [self.addon]
def test_fetch_addon_candidates(self):
pending_future_rejection = addon_factory()
version_review_flags_factory(
version=pending_future_rejection.current_version,
pending_rejection=datetime.now() + timedelta(days=7),
)
addon_factory()
other_addon_with_pending_rejection = addon_factory(
version_kw={'version': '10.0'}
)
version_factory(addon=other_addon_with_pending_rejection, version='11.0')
version_review_flags_factory(
version=other_addon_with_pending_rejection.current_version,
pending_rejection=self.yesterday,
)
qs = auto_reject.Command().fetch_addon_candidates(now=datetime.now())
assert list(qs) == [self.addon, other_addon_with_pending_rejection]
def test_fetch_fetch_versions_candidates_for_addon(self):
# self.version is already pending rejection, let's add more versions:
# One that is also pending rejection.
awaiting_review_pending_rejection = version_factory(
addon=self.addon,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
version='2.0',
)
version_review_flags_factory(
version=awaiting_review_pending_rejection, pending_rejection=self.yesterday
)
# One that is pending rejection in the future (it shouldn't be picked
# up).
future_pending_rejection = version_factory(addon=self.addon, version='3.0')
version_review_flags_factory(
version=future_pending_rejection,
pending_rejection=datetime.now() + timedelta(days=7),
)
# One that is just approved (it shouldn't be picked up).
version_factory(addon=self.addon, version='4.0')
qs = auto_reject.Command().fetch_version_candidates_for_addon(
addon=self.addon, now=datetime.now()
)
assert list(qs) == [self.version, awaiting_review_pending_rejection]
def test_deleted_addon(self):
self.addon.delete()
call_command('auto_reject')
# Add-on stays deleted, version is rejected
self.addon.refresh_from_db()
self.file.refresh_from_db()
assert self.addon.is_deleted
assert self.file.status == amo.STATUS_DISABLED
assert not VersionReviewerFlags.objects.filter(
pending_rejection__isnull=False
).exists()
def test_deleted_version(self):
self.version.delete()
call_command('auto_reject')
# Version stays deleted & disabled
self.addon.refresh_from_db()
self.file.refresh_from_db()
assert self.addon.status == amo.STATUS_NULL
assert self.version.deleted
assert self.file.status == amo.STATUS_DISABLED
assert not VersionReviewerFlags.objects.filter(
pending_rejection__isnull=False
).exists()
def test_unlisted_version(self):
self.make_addon_unlisted(self.addon)
call_command('auto_reject')
# Version stays unlisted, is disabled (even if that doesn't make much
# sense to delay rejection of an unlisted version)
self.addon.refresh_from_db()
self.version.refresh_from_db()
self.file.refresh_from_db()
assert self.addon.status == amo.STATUS_NULL
assert self.file.status == amo.STATUS_DISABLED
assert not VersionReviewerFlags.objects.filter(
pending_rejection__isnull=False
).exists()
def test_reject_versions(self):
another_pending_rejection = version_factory(addon=self.addon, version='2.0')
version_review_flags_factory(
version=another_pending_rejection,
pending_rejection=self.yesterday,
pending_rejection_by=self.user,
pending_content_rejection=False,
)
ActivityLog.objects.for_addons(self.addon).delete()
command = auto_reject.Command()
command.dry_run = False
command.reject_versions(
addon=self.addon,
versions=[self.version, another_pending_rejection],
latest_version=another_pending_rejection,
)
# The versions should be rejected now.
self.version.refresh_from_db()
assert not self.version.is_public()
another_pending_rejection.refresh_from_db()
assert not self.version.is_public()
# There should be an activity log for each version with the rejection
# and one because the add-on is changing status as a result.
logs = ActivityLog.objects.for_addons(self.addon)
assert len(logs) == 3
assert logs[0].action == amo.LOG.CHANGE_STATUS.id
assert logs[0].arguments == [self.addon, amo.STATUS_NULL]
assert logs[0].user == self.task_user
assert logs[1].action == amo.LOG.REJECT_CONTENT.id
assert logs[1].arguments == [self.addon, self.version]
assert logs[2].action == amo.LOG.REJECT_VERSION.id
assert logs[2].arguments == [self.addon, another_pending_rejection]
# All pending rejections flags in the past should have been dropped
# when the rejection was applied (there are no other pending rejections
# in this test).
assert not VersionReviewerFlags.objects.filter(
pending_rejection__isnull=False
).exists()
# The pending_rejection_by should also have been cleared.
assert not VersionReviewerFlags.objects.filter(
pending_rejection_by__isnull=False
).exists()
# And pending_content_rejection too
assert not VersionReviewerFlags.objects.filter(
pending_content_rejection__isnull=False
).exists()
# No mail should have gone out.
assert len(mail.outbox) == 0
def test_addon_locked(self):
set_reviewing_cache(self.addon.pk, 42)
call_command('auto_reject')
self.addon.refresh_from_db()
self.version.refresh_from_db()
assert self.version.reviewerflags.pending_rejection
assert self.version.is_public()
assert self.addon.is_public()
def test_addon_has_latest_version_unreviewed(self):
version_factory(
addon=self.addon,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
version='2.0',
)
call_command('auto_reject')
# Nothing should have been done: since there is a new version awaiting
# review we consider the reviewer has fixed the issues from past
# version(s) and are waiting on a reviewer decision before proceeding,
# the old pending rejection is on hold.
self.addon.refresh_from_db()
self.version.refresh_from_db()
assert self.version.reviewerflags.pending_rejection
assert self.version.is_public()
assert self.addon.is_public()
def test_full_dry_run(self):
call_command('auto_reject', '--dry-run')
self.addon.refresh_from_db()
self.version.refresh_from_db()
assert self.version.reviewerflags.pending_rejection
assert self.version.is_public()
assert self.addon.is_public()
def test_full_run(self):
# Addon with a couple versions including its current_version pending
# rejection, the add-on should be rejected with the versions
all_pending_rejection = self.addon
version = version_factory(
addon=all_pending_rejection, version='0.9', created=self.days_ago(42)
)
version_review_flags_factory(version=version, pending_rejection=self.yesterday)
# Add-on with an old version pending rejection, but a newer one
# approved: only the old one should be rejected.
old_pending_rejection = addon_factory(
version_kw={'version': '10.0', 'created': self.days_ago(2)}
)
version_review_flags_factory(
version=old_pending_rejection.current_version,
pending_rejection=self.yesterday,
)
new_version_old_pending_rejection = version_factory(
addon=old_pending_rejection, version='11.0'
)
# One with an old version approved, but a newer one pending
# rejection: only the newer one should be rejected.
new_pending_rejection = addon_factory(
version_kw={'version': '20.0', 'created': self.days_ago(3)}
)
new_pending_rejection_new_version = version_factory(
addon=new_pending_rejection, version='21.0', created=self.days_ago(2)
)
version_review_flags_factory(
version=new_pending_rejection_new_version, pending_rejection=self.yesterday
)
# Add-on with a version pending rejection in the future, it shouldn't
# be touched yet.
future_pending_rejection = addon_factory()
version_review_flags_factory(
version=future_pending_rejection.current_version,
pending_rejection=datetime.now() + timedelta(days=2),
)
# Add-on not pending rejection, shouldn't be affected
regular_addon = addon_factory()
# Trigger the command!
now = datetime.now()
call_command('auto_reject')
# First add-on and all its versions should have been rejected.
all_pending_rejection.refresh_from_db()
assert not all_pending_rejection.is_public()
for version in all_pending_rejection.versions.all():
assert not version.is_public()
# Second one should still be public, only its old version rejected.
old_pending_rejection.refresh_from_db()
new_version_old_pending_rejection.refresh_from_db()
assert old_pending_rejection.is_public()
assert (
old_pending_rejection.current_version == new_version_old_pending_rejection
)
assert new_version_old_pending_rejection.is_public()
assert (
not old_pending_rejection.versions.filter(version='10.0').get().is_public()
)
# Third one should still be public, only its newer version rejected.
new_pending_rejection.refresh_from_db()
new_pending_rejection_new_version.refresh_from_db()
assert new_pending_rejection.is_public()
assert (
new_pending_rejection.current_version != new_pending_rejection_new_version
)
assert not new_pending_rejection_new_version.is_public()
assert new_pending_rejection.versions.filter(version='20.0').get().is_public()
# Fourth one shouldn't have been touched because the pending rejection
# for its version is in the future.
future_pending_rejection.refresh_from_db()
assert future_pending_rejection.is_public()
assert future_pending_rejection.current_version
assert future_pending_rejection.current_version.is_public()
# Fifth one shouldn't have been touched.
regular_addon.refresh_from_db()
assert regular_addon.is_public()
assert regular_addon.current_version
assert regular_addon.current_version.is_public()
# All pending rejections flags in the past should have been dropped
# when the rejection was applied.
assert not VersionReviewerFlags.objects.filter(
pending_rejection__lt=now
).exists()
# No mail should have gone out.
assert len(mail.outbox) == 0
|
{
"content_hash": "adf7aff03c784d9a94e624278c31a66b",
"timestamp": "",
"source": "github",
"line_count": 1512,
"max_line_length": 88,
"avg_line_length": 40.70304232804233,
"alnum_prop": 0.6124823294282047,
"repo_name": "mozilla/addons-server",
"id": "b3099c76c236f5c237a52b64335727769f2576b8",
"size": "61566",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/olympia/reviewers/tests/test_commands.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245459"
},
{
"name": "Dockerfile",
"bytes": "3900"
},
{
"name": "HTML",
"bytes": "290496"
},
{
"name": "JavaScript",
"bytes": "750827"
},
{
"name": "Less",
"bytes": "212819"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6811560"
},
{
"name": "Shell",
"bytes": "8638"
},
{
"name": "Smarty",
"bytes": "1261"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from . import settings
from ec2stack.core import DB
from ec2stack import create_app
from .factories import UserFactory
from .utils import FlaskTestCaseMixin
class Ec2StackTestCase(TestCase):
pass
class Ec2StackAppTestCase(FlaskTestCaseMixin, Ec2StackTestCase):
def _create_app(self):
return create_app(settings=settings)
def _create_fixtures(self):
self.user = UserFactory()
def setUp(self):
super(Ec2StackAppTestCase, self).setUp()
self.app = self._create_app()
self.client = self.app.test_client()
self.app_context = self.app.app_context()
self.app_context.push()
DB.create_all()
self._create_fixtures()
def tearDown(self):
super(Ec2StackAppTestCase, self).tearDown()
DB.drop_all()
self.app_context.pop()
|
{
"content_hash": "cced137c6f1dca91e2f80e7f13265375",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 64,
"avg_line_length": 25.352941176470587,
"alnum_prop": 0.6763341067285383,
"repo_name": "apache/cloudstack-ec2stack",
"id": "55758ace2742ebca544ed0bf0c1cb84d8c8ab253",
"size": "1706",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "239301"
},
{
"name": "Shell",
"bytes": "5226"
}
],
"symlink_target": ""
}
|
"""Dependency Resolution
The dependency resolution in pip is performed as follows:
for top-level requirements:
a. only one spec allowed per project, regardless of conflicts or not.
otherwise a "double requirement" exception is raised
b. they override sub-dependency requirements.
for sub-dependencies
a. "first found, wins" (where the order is breadth first)
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
import logging
import sys
from collections import defaultdict
from itertools import chain
from pip._vendor.packaging import specifiers
from pip._internal.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound,
HashError,
HashErrors,
UnsupportedPythonVersion,
)
from pip._internal.req.req_install import check_invalid_constraint_type
from pip._internal.req.req_set import RequirementSet
from pip._internal.resolution.base import BaseResolver
from pip._internal.utils.compatibility_tags import get_supported
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import dist_in_usersite, normalize_version_info
from pip._internal.utils.packaging import check_requires_python, get_requires_python
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import DefaultDict, List, Optional, Set, Tuple
from pip._vendor.pkg_resources import Distribution
from pip._internal.cache import WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.link import Link
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.req_install import InstallRequirement
from pip._internal.resolution.base import InstallRequirementProvider
DiscoveredDependencies = DefaultDict[str, List[InstallRequirement]]
logger = logging.getLogger(__name__)
def _check_dist_requires_python(
dist, # type: Distribution
version_info, # type: Tuple[int, int, int]
ignore_requires_python=False, # type: bool
):
# type: (...) -> None
"""
Check whether the given Python version is compatible with a distribution's
"Requires-Python" value.
:param version_info: A 3-tuple of ints representing the Python
major-minor-micro version to check.
:param ignore_requires_python: Whether to ignore the "Requires-Python"
value if the given Python version isn't compatible.
:raises UnsupportedPythonVersion: When the given Python version isn't
compatible.
"""
requires_python = get_requires_python(dist)
try:
is_compatible = check_requires_python(
requires_python, version_info=version_info,
)
except specifiers.InvalidSpecifier as exc:
logger.warning(
"Package %r has an invalid Requires-Python: %s",
dist.project_name, exc,
)
return
if is_compatible:
return
version = '.'.join(map(str, version_info))
if ignore_requires_python:
logger.debug(
'Ignoring failed Requires-Python check for package %r: '
'%s not in %r',
dist.project_name, version, requires_python,
)
return
raise UnsupportedPythonVersion(
'Package {!r} requires a different Python: {} not in {!r}'.format(
dist.project_name, version, requires_python,
))
class Resolver(BaseResolver):
"""Resolves which packages need to be installed/uninstalled to perform \
the requested operation without breaking the requirements of any package.
"""
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(
self,
preparer, # type: RequirementPreparer
finder, # type: PackageFinder
wheel_cache, # type: Optional[WheelCache]
make_install_req, # type: InstallRequirementProvider
use_user_site, # type: bool
ignore_dependencies, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
force_reinstall, # type: bool
upgrade_strategy, # type: str
py_version_info=None, # type: Optional[Tuple[int, ...]]
):
# type: (...) -> None
super().__init__()
assert upgrade_strategy in self._allowed_strategies
if py_version_info is None:
py_version_info = sys.version_info[:3]
else:
py_version_info = normalize_version_info(py_version_info)
self._py_version_info = py_version_info
self.preparer = preparer
self.finder = finder
self.wheel_cache = wheel_cache
self.upgrade_strategy = upgrade_strategy
self.force_reinstall = force_reinstall
self.ignore_dependencies = ignore_dependencies
self.ignore_installed = ignore_installed
self.ignore_requires_python = ignore_requires_python
self.use_user_site = use_user_site
self._make_install_req = make_install_req
self._discovered_dependencies = \
defaultdict(list) # type: DiscoveredDependencies
def resolve(self, root_reqs, check_supported_wheels):
# type: (List[InstallRequirement], bool) -> RequirementSet
"""Resolve what operations need to be done
As a side-effect of this method, the packages (and their dependencies)
are downloaded, unpacked and prepared for installation. This
preparation is done by ``pip.operations.prepare``.
Once PyPI has static dependency metadata available, it would be
possible to move the preparation to become a step separated from
dependency resolution.
"""
requirement_set = RequirementSet(
check_supported_wheels=check_supported_wheels
)
for req in root_reqs:
if req.constraint:
check_invalid_constraint_type(req)
requirement_set.add_requirement(req)
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# _populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = [] # type: List[InstallRequirement]
hash_errors = HashErrors()
for req in chain(requirement_set.all_requirements, discovered_reqs):
try:
discovered_reqs.extend(self._resolve_one(requirement_set, req))
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
return requirement_set
def _is_upgrade_allowed(self, req):
# type: (InstallRequirement) -> bool
if self.upgrade_strategy == "to-satisfy-only":
return False
elif self.upgrade_strategy == "eager":
return True
else:
assert self.upgrade_strategy == "only-if-needed"
return req.user_supplied or req.constraint
def _set_req_to_reinstall(self, req):
# type: (InstallRequirement) -> None
"""
Set a requirement to be installed.
"""
# Don't uninstall the conflict if doing a user install and the
# conflict is not a user install.
if not self.use_user_site or dist_in_usersite(req.satisfied_by):
req.should_reinstall = True
req.satisfied_by = None
def _check_skip_installed(self, req_to_install):
# type: (InstallRequirement) -> Optional[str]
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
if self.ignore_installed:
return None
req_to_install.check_if_exists(self.use_user_site)
if not req_to_install.satisfied_by:
return None
if self.force_reinstall:
self._set_req_to_reinstall(req_to_install)
return None
if not self._is_upgrade_allowed(req_to_install):
if self.upgrade_strategy == "only-if-needed":
return 'already satisfied, skipping upgrade'
return 'already satisfied'
# Check for the possibility of an upgrade. For link-based
# requirements we have to pull the tree down and inspect to assess
# the version #, so it's handled way down.
if not req_to_install.link:
try:
self.finder.find_requirement(req_to_install, upgrade=True)
except BestVersionAlreadyInstalled:
# Then the best version is installed.
return 'already up-to-date'
except DistributionNotFound:
# No distribution found, so we squash the error. It will
# be raised later when we re-try later to do the install.
# Why don't we just raise here?
pass
self._set_req_to_reinstall(req_to_install)
return None
def _find_requirement_link(self, req):
# type: (InstallRequirement) -> Optional[Link]
upgrade = self._is_upgrade_allowed(req)
best_candidate = self.finder.find_requirement(req, upgrade)
if not best_candidate:
return None
# Log a warning per PEP 592 if necessary before returning.
link = best_candidate.link
if link.is_yanked:
reason = link.yanked_reason or '<none given>'
msg = (
# Mark this as a unicode string to prevent
# "UnicodeEncodeError: 'ascii' codec can't encode character"
# in Python 2 when the reason contains non-ascii characters.
'The candidate selected for download or install is a '
'yanked version: {candidate}\n'
'Reason for being yanked: {reason}'
).format(candidate=best_candidate, reason=reason)
logger.warning(msg)
return link
def _populate_link(self, req):
# type: (InstallRequirement) -> None
"""Ensure that if a link can be found for this, that it is found.
Note that req.link may still be None - if the requirement is already
installed and not needed to be upgraded based on the return value of
_is_upgrade_allowed().
If preparer.require_hashes is True, don't use the wheel cache, because
cached wheels, always built locally, have different hashes than the
files downloaded from the index server and thus throw false hash
mismatches. Furthermore, cached wheels at present have undeterministic
contents due to file modification times.
"""
if req.link is None:
req.link = self._find_requirement_link(req)
if self.wheel_cache is None or self.preparer.require_hashes:
return
cache_entry = self.wheel_cache.get_cache_entry(
link=req.link,
package_name=req.name,
supported_tags=get_supported(),
)
if cache_entry is not None:
logger.debug('Using cached wheel link: %s', cache_entry.link)
if req.link is req.original_link and cache_entry.persistent:
req.original_link_is_in_wheel_cache = True
req.link = cache_entry.link
def _get_dist_for(self, req):
# type: (InstallRequirement) -> Distribution
"""Takes a InstallRequirement and returns a single AbstractDist \
representing a prepared variant of the same.
"""
if req.editable:
return self.preparer.prepare_editable_requirement(req)
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req.satisfied_by is None
skip_reason = self._check_skip_installed(req)
if req.satisfied_by:
return self.preparer.prepare_installed_requirement(
req, skip_reason
)
# We eagerly populate the link, since that's our "legacy" behavior.
self._populate_link(req)
dist = self.preparer.prepare_linked_requirement(req)
# NOTE
# The following portion is for determining if a certain package is
# going to be re-installed/upgraded or not and reporting to the user.
# This should probably get cleaned up in a future refactor.
# req.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req.check_if_exists(self.use_user_site)
if req.satisfied_by:
should_modify = (
self.upgrade_strategy != "to-satisfy-only" or
self.force_reinstall or
self.ignore_installed or
req.link.scheme == 'file'
)
if should_modify:
self._set_req_to_reinstall(req)
else:
logger.info(
'Requirement already satisfied (use --upgrade to upgrade):'
' %s', req,
)
return dist
def _resolve_one(
self,
requirement_set, # type: RequirementSet
req_to_install, # type: InstallRequirement
):
# type: (...) -> List[InstallRequirement]
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
# Parse and return dependencies
dist = self._get_dist_for(req_to_install)
# This will raise UnsupportedPythonVersion if the given Python
# version isn't compatible with the distribution's Requires-Python.
_check_dist_requires_python(
dist, version_info=self._py_version_info,
ignore_requires_python=self.ignore_requires_python,
)
more_reqs = [] # type: List[InstallRequirement]
def add_req(subreq, extras_requested):
sub_install_req = self._make_install_req(
str(subreq),
req_to_install,
)
parent_req_name = req_to_install.name
to_scan_again, add_to_parent = requirement_set.add_requirement(
sub_install_req,
parent_req_name=parent_req_name,
extras_requested=extras_requested,
)
if parent_req_name and add_to_parent:
self._discovered_dependencies[parent_req_name].append(
add_to_parent
)
more_reqs.extend(to_scan_again)
with indent_log():
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not requirement_set.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
# 'unnamed' requirements can only come from being directly
# provided by the user.
assert req_to_install.user_supplied
requirement_set.add_requirement(
req_to_install, parent_req_name=None,
)
if not self.ignore_dependencies:
if req_to_install.extras:
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
"%s does not provide the extra '%s'",
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq, extras_requested=available_requested)
return more_reqs
def get_installation_order(self, req_set):
# type: (RequirementSet) -> List[InstallRequirement]
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set() # type: Set[InstallRequirement]
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._discovered_dependencies[req.name]:
schedule(dep)
order.append(req)
for install_req in req_set.requirements.values():
schedule(install_req)
return order
|
{
"content_hash": "e132701037fa490fc11663307a5dbe4f",
"timestamp": "",
"source": "github",
"line_count": 473,
"max_line_length": 84,
"avg_line_length": 38.51797040169133,
"alnum_prop": 0.617542126351611,
"repo_name": "nataddrho/DigiCue-USB",
"id": "665dba128a9776d3c2c58a1da9f48c1d11d5d60a",
"size": "18219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python3/src/venv/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "64"
},
{
"name": "Python",
"bytes": "205084"
}
],
"symlink_target": ""
}
|
"""spatial index on links
Revision ID: 4c2d727186
Revises: 8fc9e3cc0a
Create Date: 2014-09-26 15:01:43.779140
"""
# revision identifiers, used by Alembic.
revision = '4c2d727186'
down_revision = '8fc9e3cc0a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_link_geom', 'links', ['geom'], unique=False, postgresql_using='gist')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_link_geom', table_name='links')
### end Alembic commands ###
|
{
"content_hash": "b3cd6f6eb45a5c80b86e763a32c2c4b1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 93,
"avg_line_length": 24.5,
"alnum_prop": 0.6844583987441131,
"repo_name": "rjw57/trafficdb",
"id": "f23af721fefe7c367a2f6d4ada00fdd33d41c4d9",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/4c2d727186_spatial_index_on_links.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "38"
},
{
"name": "Python",
"bytes": "92163"
}
],
"symlink_target": ""
}
|
import Adafruit_DHT
sensor = Adafruit_DHT.DHT11
pin = 4
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
print 'Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity)
else:
print 'Failed to get reading. Try again!'
|
{
"content_hash": "3bba8fa44b3c877b8928e61a600ddfe7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 74,
"avg_line_length": 24.833333333333332,
"alnum_prop": 0.7348993288590604,
"repo_name": "valeriodelsarto/valecasa_bot",
"id": "d293cd448fdda5d6f33ca5b01431a7aaa71b0e9d",
"size": "317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hum_temp_sensor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "317"
},
{
"name": "Ruby",
"bytes": "103082"
},
{
"name": "Shell",
"bytes": "1474"
}
],
"symlink_target": ""
}
|
from steamweb.models import *
from steamweb.database import db_session
from steamweb.actions.actions_init import command_mapping
resources = ['water', 'ore', 'quartz', 'energy']
def resolve_action(player_command, gameid, playerid):
player = Player.query.filter(Player.id == playerid).first()
game = Game.query.filter(Game.id == gameid).first()
command_name = player_command['command']
command_name_exists(command_name)
command_arguments = player_command['arguments']
command = command_mapping[command_name]
triggered = command.resolve(game, player, command_arguments)
assert triggered is not None
db_session.commit()
all_triggered_events = list(triggered)
while triggered:
event = triggered.pop()
new_events = event.resolve(game, player)
triggered.extend(new_events)
all_triggered_events.extend(new_events)
db_session.commit()
triggered_events_names = [event.get_name() for event in all_triggered_events]
return game, triggered_events_names
def get_domain(gameid, playerid, actionname):
player = Player.query.filter(Player.id == playerid).first()
game = Game.query.filter(Game.id == gameid).first()
command_name_exists(actionname)
command = command_mapping[actionname]
return command.domain(game, player)
def command_name_exists(cmd_name):
if cmd_name not in command_mapping:
raise ValueError("There is no player action like: {}".format(cmd_name))
|
{
"content_hash": "e2a70cc5c75f7da92b54811471a5c54b",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 81,
"avg_line_length": 38.89473684210526,
"alnum_prop": 0.706359945872801,
"repo_name": "PiotrZakrzewski/steamweb",
"id": "5d39f65b51e5d1554d2e278034d80d5e4b176a14",
"size": "1478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "steamweb/resolver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "208"
},
{
"name": "HTML",
"bytes": "11691"
},
{
"name": "JavaScript",
"bytes": "64132"
},
{
"name": "Jupyter Notebook",
"bytes": "178970"
},
{
"name": "Python",
"bytes": "141058"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
import logging
import numpy as np
from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch
from ray.rllib.policy import Policy
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.offline.io_context import IOContext
from ray.rllib.utils.numpy import convert_to_numpy
from ray.rllib.utils.typing import TensorType, SampleBatchType
from typing import List
logger = logging.getLogger(__name__)
OffPolicyEstimate = namedtuple("OffPolicyEstimate",
["estimator_name", "metrics"])
@DeveloperAPI
class OffPolicyEstimator:
"""Interface for an off policy reward estimator."""
@DeveloperAPI
def __init__(self, policy: Policy, gamma: float):
"""Creates an off-policy estimator.
Args:
policy (Policy): Policy to evaluate.
gamma (float): Discount of the MDP.
"""
self.policy = policy
self.gamma = gamma
self.new_estimates = []
@classmethod
def create(cls, ioctx: IOContext) -> "OffPolicyEstimator":
"""Create an off-policy estimator from a IOContext."""
gamma = ioctx.worker.policy_config["gamma"]
# Grab a reference to the current model
keys = list(ioctx.worker.policy_map.keys())
if len(keys) > 1:
raise NotImplementedError(
"Off-policy estimation is not implemented for multi-agent. "
"You can set `input_evaluation: []` to resolve this.")
policy = ioctx.worker.get_policy(keys[0])
return cls(policy, gamma)
@DeveloperAPI
def estimate(self, batch: SampleBatchType):
"""Returns an estimate for the given batch of experiences.
The batch will only contain data from one episode, but it may only be
a fragment of an episode.
"""
raise NotImplementedError
@DeveloperAPI
def action_prob(self, batch: SampleBatchType) -> np.ndarray:
"""Returns the probs for the batch actions for the current policy."""
num_state_inputs = 0
for k in batch.keys():
if k.startswith("state_in_"):
num_state_inputs += 1
state_keys = ["state_in_{}".format(i) for i in range(num_state_inputs)]
log_likelihoods: TensorType = self.policy.compute_log_likelihoods(
actions=batch[SampleBatch.ACTIONS],
obs_batch=batch[SampleBatch.CUR_OBS],
state_batches=[batch[k] for k in state_keys],
prev_action_batch=batch.data.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=batch.data.get(SampleBatch.PREV_REWARDS))
log_likelihoods = convert_to_numpy(log_likelihoods)
return np.exp(log_likelihoods)
@DeveloperAPI
def process(self, batch: SampleBatchType):
self.new_estimates.append(self.estimate(batch))
@DeveloperAPI
def check_can_estimate_for(self, batch: SampleBatchType):
"""Returns whether we can support OPE for this batch."""
if isinstance(batch, MultiAgentBatch):
raise ValueError(
"IS-estimation is not implemented for multi-agent batches. "
"You can set `input_evaluation: []` to resolve this.")
if "action_prob" not in batch:
raise ValueError(
"Off-policy estimation is not possible unless the inputs "
"include action probabilities (i.e., the policy is stochastic "
"and emits the 'action_prob' key). For DQN this means using "
"`exploration_config: {type: 'SoftQ'}`. You can also set "
"`input_evaluation: []` to disable estimation.")
@DeveloperAPI
def get_metrics(self) -> List[OffPolicyEstimate]:
"""Return a list of new episode metric estimates since the last call.
Returns:
list of OffPolicyEstimate objects.
"""
out = self.new_estimates
self.new_estimates = []
return out
|
{
"content_hash": "9dd6d60db89eeed7e568371c90098378",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 37.660377358490564,
"alnum_prop": 0.6347695390781564,
"repo_name": "richardliaw/ray",
"id": "43aa2cc208a7c7d886738818513d9de9bfae8d7a",
"size": "3992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/offline/off_policy_estimator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62178"
},
{
"name": "C++",
"bytes": "4258483"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "6292"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1263157"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "7515224"
},
{
"name": "Shell",
"bytes": "117425"
},
{
"name": "Starlark",
"bytes": "200955"
},
{
"name": "TypeScript",
"bytes": "149068"
}
],
"symlink_target": ""
}
|
from aiohttp import web
import socketio
sio = socketio.AsyncServer(async_mode='aiohttp')
app = web.Application()
sio.attach(app)
async def index(request):
with open('fiddle.html') as f:
return web.Response(text=f.read(), content_type='text/html')
@sio.event
async def connect(sid, environ, auth):
print(f'connected auth={auth} sid={sid}')
await sio.emit('hello', (1, 2, {'hello': 'you'}), to=sid)
@sio.event
def disconnect(sid):
print('disconnected', sid)
app.router.add_static('/static', 'static')
app.router.add_get('/', index)
if __name__ == '__main__':
web.run_app(app)
|
{
"content_hash": "37130e515544864c2483d52e60792390",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 68,
"avg_line_length": 19.806451612903224,
"alnum_prop": 0.6530944625407166,
"repo_name": "miguelgrinberg/python-socketio",
"id": "dfde8e10c2f193d4b483815be4bf5601ef1d0bfb",
"size": "614",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/server/aiohttp/fiddle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "460138"
},
{
"name": "Shell",
"bytes": "154"
}
],
"symlink_target": ""
}
|
"""
__graph_MT_post__PortRef.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
_____________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_post__PortRef(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 172, 82
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([189.0, 62.0, 189.0, 62.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([20.0, 20.0, 190.0, 100.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'moccasin')
self.gf4 = GraphicalForm(drawing, h, "gf4")
self.graphForms.append(self.gf4)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([58.0, 37.0, 58.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'MT_post__PortRef', width = '0', justify= 'left', stipple='' )
self.gf8 = GraphicalForm(drawing, h, 'gf8', fontObject=font)
self.graphForms.append(self.gf8)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_post__PortRef
|
{
"content_hash": "bf899bd4e004ec9c27e1d726f6952745",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 210,
"avg_line_length": 41.476190476190474,
"alnum_prop": 0.5755836203597398,
"repo_name": "levilucio/SyVOLT",
"id": "6023597b3b0366dbfec7e10bf71c9c8681531224",
"size": "2613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/graph_MT_post__PortRef.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
import base64
from upcloud_api.api import API
from upcloud_api.cloud_manager.firewall_mixin import FirewallManager
from upcloud_api.cloud_manager.host_mixin import HostManager
from upcloud_api.cloud_manager.ip_address_mixin import IPManager
from upcloud_api.cloud_manager.network_mixin import NetworkManager
from upcloud_api.cloud_manager.object_storage_mixin import ObjectStorageManager
from upcloud_api.cloud_manager.server_mixin import ServerManager
from upcloud_api.cloud_manager.storage_mixin import StorageManager
from upcloud_api.cloud_manager.tag_mixin import TagManager
class CloudManager(
ServerManager,
IPManager,
StorageManager,
FirewallManager,
TagManager,
NetworkManager,
HostManager,
ObjectStorageManager,
):
"""
CloudManager contains the core functionality of the upcloud API library.
All other managers are mixed in so code can be organized in corresponding sub-manager classes.
"""
api: API
def __init__(self, username: str, password: str, timeout: int = 60) -> None:
"""
Initiates CloudManager that handles all HTTP connections with UpCloud's API.
Optionally determine a timeout for API connections (in seconds). A timeout with the value
`None` means that there is no timeout.
"""
if not username or not password:
raise Exception('Invalid credentials, please provide a username and password')
credentials = f'{username}:{password}'.encode()
encoded_credentials = base64.b64encode(credentials).decode()
self.api = API(
token=f'Basic {encoded_credentials}',
timeout=timeout,
)
def authenticate(self):
"""
Authenticate.
"""
return self.get_account()
def get_account(self):
"""
Returns information on the user's account and resource limits.
"""
return self.api.get_request('/account')
def get_zones(self):
"""
Returns a list of available zones.
"""
return self.api.get_request('/zone')
def get_timezones(self):
"""
Returns a list of available timezones.
"""
return self.api.get_request('/timezone')
def get_prices(self):
"""
Returns a list of resource prices.
"""
return self.api.get_request('/price')
def get_server_sizes(self):
"""
Returns a list of available server configurations.
"""
return self.api.get_request('/server_size')
|
{
"content_hash": "e147145fc349393afd0f640b06c4ea15",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 98,
"avg_line_length": 30.36904761904762,
"alnum_prop": 0.6589572716581733,
"repo_name": "UpCloudLtd/upcloud-python-api",
"id": "c525d0e96d713fdd9b0b322af3969012ab5be22e",
"size": "2551",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "upcloud_api/cloud_manager/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "140582"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import os
import random
import shutil
import tempfile
import time
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_module
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
class SaverTest(test.TestCase):
def basicSaveRestore(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
# Initialize all variables
if context.in_graph_mode():
self.evaluate([variables.global_variables_initializer(), v2_init])
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"v2": v2.saveable
}, restore_sequentially=True)
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Assert that the variables are not initialized.
if context.in_graph_mode():
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0_2 = variable_op(1000.0, name="v0")
v1_2 = variable_op(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2_2.insert("k1000", 3000.0)
# Check that the parameter nodes have been initialized.
if context.in_graph_mode():
init_all_op = [variables.global_variables_initializer(), v2_init]
self.evaluate(init_all_op)
# TODO(xpan): Why _mutable_hash_table_v2 doesn't create empty
# table as it claims in eager mode?
self.assertEqual(b"k1000", self.evaluate(v2_2.keys()))
self.assertEqual(3000.0, self.evaluate(v2_2.values()))
self.assertEqual(1000.0, self.evaluate(v0_2))
self.assertEqual(2000.0, self.evaluate(v1_2))
# Restore the values saved earlier in the parameter nodes.
save2 = saver_module.Saver({"v0": v0_2, "v1": v1_2, "v2": v2_2.saveable})
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0_2))
self.assertEqual(20.0, self.evaluate(v1_2))
self.assertEqual(b"k1", self.evaluate(v2_2.keys()))
self.assertEqual(30.0, self.evaluate(v2_2.values()))
def testBasic(self):
self.basicSaveRestore(variables.Variable)
@test_util.run_in_graph_and_eager_modes()
def testResourceBasic(self):
self.basicSaveRestore(resource_variable_ops.ResourceVariable)
def testEagerBasic(self):
with context.eager_mode():
ckpt_prefix = os.path.join(self.get_temp_dir(), "ckpt")
v1 = resource_variable_ops.ResourceVariable(3.14, name="v1")
v2 = resource_variable_ops.ResourceVariable([1, 2], name="v2")
save = saver_module.Saver([v1, v2])
save.save(None, ckpt_prefix)
v1.assign(0.0)
v2.assign([0, 0])
self.assertNear(0.0, self.evaluate(v1), 1e-5)
self.assertAllEqual([0, 0], self.evaluate(v2))
save.restore(None, ckpt_prefix)
self.assertNear(3.14, self.evaluate(v1), 1e-5)
self.assertAllEqual([1, 2], self.evaluate(v2))
def testEagerGraphCompatibility(self):
# Save from graph mode and restore from eager mode.
graph_ckpt_prefix = os.path.join(self.get_temp_dir(), "graph_ckpt")
with context.graph_mode():
with self.test_session(graph=ops_lib.Graph()) as sess:
# Create a graph model and save the checkpoint.
w1 = resource_variable_ops.ResourceVariable(1.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(2.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
sess.run(variables.global_variables_initializer())
graph_saver.save(sess, graph_ckpt_prefix)
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w1 = resource_variable_ops.ResourceVariable(0.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(0.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
graph_saver.restore(None, graph_ckpt_prefix)
self.assertAllEqual(self.evaluate(w1), 1.0)
self.assertAllEqual(self.evaluate(w2), 2.0)
# Save from eager mode and restore from graph mode.
eager_ckpt_prefix = os.path.join(self.get_temp_dir(), "eager_ckpt")
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w3 = resource_variable_ops.ResourceVariable(3.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(4.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
graph_saver.save(None, eager_ckpt_prefix)
with context.graph_mode():
with self.test_session(graph=ops_lib.Graph()) as sess:
w3 = resource_variable_ops.ResourceVariable(0.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(0.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
sess.run(variables.global_variables_initializer())
graph_saver.restore(sess, eager_ckpt_prefix)
self.assertAllEqual(w3.eval(), 3.0)
self.assertAllEqual(w4.eval(), 4.0)
@test_util.run_in_graph_and_eager_modes()
def testResourceSaveRestoreCachingDevice(self):
save_path = os.path.join(self.get_temp_dir(), "resource_cache")
with self.test_session(graph=ops_lib.Graph()) as sess:
v = resource_variable_ops.ResourceVariable([1], caching_device="/cpu:0",
name="v")
if context.in_graph_mode():
self.evaluate(variables.global_variables_initializer())
else:
sess = None
save = saver_module.Saver([v])
save.save(sess, save_path)
save2 = saver_module.Saver([v])
save2.restore(sess, save_path)
self.assertEquals(self.evaluate(v), [1])
def testSaveCopyRestoreWithSaveRelativePaths(self):
"""Save, copy checkpoint dir and restore from copied dir.
This only works for save_relative_paths=True.
"""
save_dir1 = os.path.join(self.get_temp_dir(), "save_dir1")
os.mkdir(save_dir1)
save_path1 = os.path.join(save_dir1, "save_copy_restore")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver(
var_list={
"v0": v0,
"v1": v1,
"v2": v2.saveable},
restore_sequentially=True,
save_relative_paths=True)
init_all_op = [variables.global_variables_initializer(), v2_init]
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path1)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path1, val)
self.assertEqual(saver_module.latest_checkpoint(save_dir1), save_path1)
save_dir2 = os.path.join(self.get_temp_dir(), "save_dir2")
os.renames(save_dir1, save_dir2)
save_path2 = os.path.join(save_dir2, "save_copy_restore")
self.assertEqual(saver_module.latest_checkpoint(save_dir2), save_path2)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session() as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
# Assert that the variables are not initialized.
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path2)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
def testFilenameTensor(self):
v0 = variables.Variable(0, name="v0")
filename = b"somerandomfilename"
save = saver_module.Saver({"v0": v0}, filename=filename)
with self.test_session() as sess:
tensor = sess.graph.get_tensor_by_name(
save.saver_def.filename_tensor_name)
self.assertEqual(sess.run(tensor), filename)
def testInvalidPath(self):
v0 = variables.Variable(0, name="v0")
for ver in (saver_pb2.SaverDef.V1, saver_pb2.SaverDef.V2):
with self.test_session() as sess:
save = saver_module.Saver({"v0": v0}, write_version=ver)
with self.assertRaisesRegexp(errors.NotFoundError,
"Failed to find any matching files for"):
save.restore(sess, "invalid path")
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.test_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = variables.Variable(np.int64(15), name="v")
save = saver_module.Saver({"v": v}, restore_sequentially=True)
variables.global_variables_initializer().run()
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session() as sess:
v = variables.Variable(np.int64(-1), name="v")
save = saver_module.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v" in e.message):
sess.run(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), v.eval())
def testSomeErrors(self):
with ops_lib.Graph().as_default():
v0 = variables.Variable([10.0], name="v0")
v1 = variables.Variable([20.0], name="v1")
v2 = variables.Variable([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
saver_module.Saver([v0, v1, v2])
# The names are different and will work.
saver_module.Saver({"vee1": v1, "other": [v2]})
# Partitioned variables also cause name conflicts.
p_v1 = variable_scope.get_variable(
"p_v1",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2 = variable_scope.get_variable(
"p_v2",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2._name = "p_v1"
with self.assertRaisesRegexp(ValueError, "same name: p_v1"):
saver_module.Saver([p_v1, p_v2])
def testSameName(self):
with ops_lib.Graph().as_default():
v0 = variables.Variable([10.0], name="v0")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saving one variable under two names raises an error.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v0"):
saver_module.Saver({"v0": v0, "v0too": v0})
# Ditto for custom saveables.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v2"):
saver_module.Saver({"v2": v2.saveable, "v2too": v2.saveable})
# Verify non-duplicate names work.
saver_module.Saver({"v0": v0, "v2": v2.saveable})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver([v0, v1, v2.saveable])
variables.global_variables_initializer().run()
v2_init.run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver([v0, v1, v2.saveable])
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0_2 = variables.Variable(1000.0, name="v0")
v1_2 = variables.Variable(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
save2 = saver_module.Saver([v0_2, v1_2, v2_2.saveable])
v2_2.insert("k1000", 3000.0).run()
variables.global_variables_initializer().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
self.assertEqual(b"k1000", v2_2.keys().eval())
self.assertEqual(3000.0, v2_2.values().eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
self.assertEqual(b"k1", v2_2.keys().eval())
self.assertEqual(30.0, v2_2.values().eval())
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(var_value, name=var_name)
save = saver_module.Saver({var_name: var})
if context.in_graph_mode():
self.evaluate(var.initializer)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(other_value, name=var_name)
save = saver_module.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, self.evaluate(var))
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
def testAllowEmpty(self):
save_path = os.path.join(self.get_temp_dir(), "allow_empty")
with self.test_session() as sess:
_ = constant_op.constant(1)
save = saver_module.Saver(allow_empty=True)
val = save.save(sess, save_path)
self.assertIsNone(val)
with self.test_session() as sess:
save = saver_module.Saver(allow_empty=True)
save.restore(sess, save_path)
def testGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1})
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2})
variables.global_variables_initializer().run()
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
twos = variables.Variable([2.0, 2.0, 2.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
v2.insert("k1", 3.0).run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(3.0, v2.values().eval())
def testVarListShouldBeEmptyInDeferredBuild(self):
with ops_lib.Graph().as_default():
v = variables.Variable(1.0)
with self.assertRaisesRegexp(ValueError, "defer_build"):
saver_module.Saver([v], defer_build=True)
def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild(self):
save_path = os.path.join(self.get_temp_dir(), "error_deferred_build")
with ops_lib.Graph().as_default(), session.Session() as sess:
variables.Variable(1.0)
saver = saver_module.Saver(defer_build=True)
with self.assertRaisesRegexp(RuntimeError, "build"):
saver.save(sess, save_path)
def testDeferredBuild(self):
save_path = os.path.join(self.get_temp_dir(), "deferred_build")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
save = saver_module.Saver(defer_build=True)
# if build is not deferred, saver cannot save the `twos`.
twos = variables.Variable([2.0, 2.0, 2.0])
init = variables.global_variables_initializer()
save.build()
init.run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
def testReshape(self):
save_path = os.path.join(self.get_temp_dir(), "variables_reshape")
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
save.save(sess, save_path)
# Error when restoring with default reshape=False
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Assign requires shapes of both tensors to match."):
save.restore(sess, save_path)
# Restored to new shape with reshape=True
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver(reshape=True)
save.restore(sess, save_path)
self.assertAllClose([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], var.eval())
@test_util.run_in_graph_and_eager_modes()
def testSaveWithGlobalStep(self, pad_step_number=False):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session(graph=ops_lib.Graph()):
var = resource_variable_ops.ResourceVariable(1.0, name="var0")
save = saver_module.Saver(
{
var._shared_name: var
}, pad_step_number=pad_step_number)
if context.in_graph_mode():
self.evaluate(var.initializer)
sess = ops_lib.get_default_session()
else:
sess = None
if use_tensor:
global_step = constant_op.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
if pad_step_number:
expected_save_path = "%s-%s" % (save_path,
"{:08d}".format(global_step_int))
else:
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
def testSaveWithGlobalStepWithPadding(self):
self.testSaveWithGlobalStep(pad_step_number=True)
def testSaveToNonexistingPath(self):
file_io.write_string_to_file(
os.path.join(self.get_temp_dir(), "actually_a_file"), "")
paths = [
os.path.join(self.get_temp_dir(), "nonexisting_dir/path"),
os.path.join(self.get_temp_dir(), "other_nonexisting_dir/path1/path2"),
os.path.join(self.get_temp_dir(), "actually_a_file/path"),
]
for save_path in paths:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
# In the case where the parent directory doesn't exist, whether or not the
# save succeeds or fails is implementation dependent. Therefore we allow
# both cases.
try:
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the graph.
save.save(sess, save_path)
with self.test_session() as sess:
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
except ValueError as exc:
error_msg_template = "Parent directory of {} doesn't exist, can't save."
self.assertEqual(error_msg_template.format(save_path), str(exc))
def testSaveToURI(self):
# ParseURI functions don't work on Windows yet.
# TODO(jhseu): Remove this check when it works.
if os.name == "nt":
self.skipTest("Local URI support doesn't work on Windows")
save_path = "file://" + os.path.join(self.get_temp_dir(), "uri")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
save.save(sess, save_path)
class SaveRestoreShardedTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
# Build a graph with 2 parameter nodes on different devices.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(10, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(20, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k1", 30.0).run()
t1.insert("k2", 40.0).run()
val = save.save(sess, save_path)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(save_path + "-?????-of-00002", val)
else:
self.assertEqual(save_path, val)
meta_graph_filename = save._MetaGraphFilename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
if save._write_version is saver_pb2.SaverDef.V1:
# Restore different ops from shard 0 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
save = saver_module.Saver({"v0": v0, "t0": t0.saveable}, sharded=True)
variables.global_variables_initializer().run()
t0.insert("k11", 33.0).run()
self.assertEqual(111, v0.eval())
self.assertEqual(b"k11", t0.keys().eval())
self.assertEqual(33.0, t0.values().eval())
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, v0.eval())
self.assertEqual(b"k1", t0.keys().eval())
self.assertEqual(30.0, t0.values().eval())
# Restore different ops from shard 1 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.Variable(222)
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver({"v1": v1, "t1": t1.saveable}, sharded=True)
variables.global_variables_initializer().run()
t1.insert("k22", 44.0).run()
self.assertEqual(222, v1.eval())
self.assertEqual(b"k22", t1.keys().eval())
self.assertEqual(44.0, t1.values().eval())
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, v1.eval())
self.assertEqual(b"k2", t1.keys().eval())
self.assertEqual(40.0, t1.values().eval())
# Now try a restore with the sharded filename.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(222, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k11", 33.0).run()
t1.insert("k22", 44.0).run()
self.assertEqual(111, v0.eval())
self.assertEqual(222, v1.eval())
self.assertEqual(b"k11", t0.keys().eval())
self.assertEqual(33.0, t0.values().eval())
self.assertEqual(b"k22", t1.keys().eval())
self.assertEqual(44.0, t1.values().eval())
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
if save._write_version is saver_pb2.SaverDef.V1:
save.restore(sess, save_path + "-?????-of-?????")
else:
save.restore(sess, save_path)
self.assertEqual(10, v0.eval())
self.assertEqual(20, v1.eval())
self.assertEqual(b"k1", t0.keys().eval())
self.assertEqual(30.0, t0.values().eval())
self.assertEqual(b"k2", t1.keys().eval())
self.assertEqual(40.0, t1.values().eval())
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(
saver_module.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics-?????-of-00002"))
else:
self.assertEqual(
saver_module.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics"))
def testSaverDef(self):
with self.test_session():
v0 = variables.Variable(123, name="v0")
save = saver_module.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
def _testPartitionedVariables(self, use_resource):
var_full_shape = [10, 3]
# Allows save/restore mechanism to work w/ different slicings.
var_name = "my_var"
saved_dir = self._get_test_dir("partitioned_variables")
saved_path = os.path.join(saved_dir, "ckpt")
call_saver_with_dict = False # updated by test loop below
def _save(slices=None, partitioner=None):
with self.test_session(graph=ops_lib.Graph()) as sess:
# Calls .eval() to return the ndarray that makes up the full variable.
rnd = random_ops.random_uniform(var_full_shape).eval()
if slices:
assert not partitioner
# TODO(apassos): make create_partitioned_variables take use_resource
# option to make this test passable without creating a named
# variable_scope.
vs = partitioned_variables.create_partitioned_variables(
var_full_shape, slices, rnd, name=var_name)
elif partitioner:
vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=rnd,
partitioner=partitioner,
use_resource=use_resource)
]
else:
if use_resource:
vs = [resource_variable_ops.ResourceVariable(rnd, name=var_name)]
else:
vs = [variables.Variable(rnd, name=var_name)]
variables.global_variables_initializer().run()
if call_saver_with_dict:
saver = saver_module.Saver({var_name: (vs if slices else vs[0])})
else:
saver = saver_module.Saver(vs)
actual_path = saver.save(sess, saved_path)
self.assertEqual(saved_path, actual_path)
return rnd
def _restore(slices=None, partitioner=None):
with self.test_session(graph=ops_lib.Graph()) as sess:
if slices:
assert not partitioner
new_vs = partitioned_variables.create_partitioned_variables(
var_full_shape,
slices,
array_ops.zeros(var_full_shape), # != original contents.
name=var_name)
elif partitioner:
new_vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=array_ops.zeros(var_full_shape),
partitioner=partitioner)
]
else:
new_vs = [
variables.Variable(
array_ops.zeros(
shape=var_full_shape), # != original contents.
name=var_name)
]
variables.global_variables_initializer().run()
if call_saver_with_dict:
saver = saver_module.Saver({
var_name: (new_vs if slices else new_vs[0])
})
else:
saver = saver_module.Saver(new_vs)
saver.restore(sess, saved_path)
if partitioner:
return new_vs[0].as_tensor().eval()
elif slices and slices[0] != 1:
return array_ops.concat(new_vs, 0).eval()
elif slices and slices[1] != 1:
return array_ops.concat(new_vs, 1).eval()
else: # Non-sliced.
return new_vs[0].eval()
for call_saver_with_dict in {False, True}:
# Save PartitionedVariable and restore into full variable.
saved_full = _save(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Saves 10 horizontal parts of a partitioned variable.
# Restores into a full variable, non-sliced.
saved_full = _save(slices=[10, 1])
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Restores into a different number/orientation of slices.
restored_full = _restore(slices=[2, 1]) # 2 horizon parts.
self.assertAllEqual(saved_full, restored_full)
restored_full = _restore(slices=[1, 3]) # 3 vertical parts.
self.assertAllEqual(saved_full, restored_full)
# Restores into a PartitionedVariable
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
self.assertAllEqual(saved_full, restored_full)
# Now, saves a full variable and restores in slices.
saved_full = _save()
restored_full = _restore(slices=[1, 3])
self.assertAllEqual(saved_full, restored_full)
def testPartitionedVariable(self):
self._testPartitionedVariables(use_resource=False)
def testPartitionedResourceVariable(self):
self._testPartitionedVariables(use_resource=True)
class MaxToKeepTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def assertCheckpointState(self, model_checkpoint_path,
all_model_checkpoint_paths, save_dir):
checkpoint_state = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(checkpoint_state.model_checkpoint_path,
model_checkpoint_path)
self.assertEqual(checkpoint_state.all_model_checkpoint_paths,
all_model_checkpoint_paths)
def testNonSharded(self):
save_dir = self._get_test_dir("max_to_keep_non_sharded")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
variables.global_variables_initializer().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = saver_module.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
# Even though the file for s1 exists, this saver isn't aware of it, which
# is why it doesn't end up in the checkpoint state.
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s2],
save_dir=save_dir)
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
def testSharded(self):
save_dir = self._get_test_dir("max_to_keep_sharded")
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(222, name="v1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, sharded=True, max_to_keep=2)
variables.global_variables_initializer().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEqual(0, len(gfile.Glob(s1 + "*")))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s3)))
else:
self.assertEqual(4, len(gfile.Glob(s3 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
def testNoMaxToKeep(self):
save_dir = self._get_test_dir("no_max_to_keep")
save_dir2 = self._get_test_dir("max_to_keep_0")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
variables.global_variables_initializer().run()
# Test max_to_keep being None.
save = saver_module.Saver({"v": v}, max_to_keep=None)
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s2))
# Test max_to_keep being 0.
save2 = saver_module.Saver({"v": v}, max_to_keep=0)
self.assertEqual([], save2.last_checkpoints)
s1 = save2.save(sess, os.path.join(save_dir2, "s1"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
s2 = save2.save(sess, os.path.join(save_dir2, "s2"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s2))
def testNoMetaGraph(self):
save_dir = self._get_test_dir("no_meta_graph")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
save = saver_module.Saver({"v": v})
variables.global_variables_initializer().run()
s1 = save.save(sess, os.path.join(save_dir, "s1"), write_meta_graph=False)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
class KeepCheckpointEveryNHoursTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
@test.mock.patch.object(saver_module, "time")
def testNonSharded(self, mock_time):
save_dir = self._get_test_dir("keep_checkpoint_every_n_hours")
with self.test_session() as sess:
v = variables.Variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
variables.global_variables_initializer().run()
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
mock_time.time.return_value = start_time
save = saver_module.Saver(
{
"v": v
}, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 1 seconds have elapsed so s1 will be old enough to keep.
# sleep may return early, don't trust it.
mock_time.time.return_value = start_time + 1.0
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertFalse(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(saver_module.checkpoint_exists(s4))
class SaveRestoreWithVariableNameMap(test.TestCase):
def _testNonReshape(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "non_reshape")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = saver_module.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
if context.in_graph_mode():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
if context.in_graph_mode():
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="restore_prefix/v0")
v1 = variable_op(-1.0, name="restore_prefix/v1")
if context.in_graph_mode():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
@test_util.run_in_graph_and_eager_modes()
def testNonReshapeResourceVariable(self):
self._testNonReshape(resource_variable_ops.ResourceVariable)
def testNonReshapeVariable(self):
self._testNonReshape(variables.Variable)
class LatestCheckpointWithRelativePaths(test.TestCase):
@staticmethod
@contextlib.contextmanager
def tempWorkingDir(temppath):
cwd = os.getcwd()
os.chdir(temppath)
try:
yield
finally:
os.chdir(cwd)
@staticmethod
@contextlib.contextmanager
def tempDir():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
def testNameCollision(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
# Collides with the default name of the checkpoint state file.
filepath = os.path.join(traindir, "checkpoint")
with self.test_session() as sess:
unused_a = variables.Variable(0.0) # So that Saver saves something.
variables.global_variables_initializer().run()
# Should fail.
saver = saver_module.Saver(sharded=False)
with self.assertRaisesRegexp(ValueError, "collides with"):
saver.save(sess, filepath)
# Succeeds: the file will be named "checkpoint-<step>".
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<step>-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
def testRelativePath(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
filename = "snapshot"
filepath = os.path.join(traindir, filename)
with self.test_session() as sess:
# Build a simple graph.
v0 = variables.Variable(0.0)
inc = v0.assign_add(1.0)
save = saver_module.Saver({"v0": v0})
# Record a short training history.
variables.global_variables_initializer().run()
save.save(sess, filepath, global_step=0)
inc.eval()
save.save(sess, filepath, global_step=1)
inc.eval()
save.save(sess, filepath, global_step=2)
with self.test_session() as sess:
# Build a new graph with different initialization.
v0 = variables.Variable(-1.0)
# Create a new saver.
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer().run()
# Get the most recent checkpoint name from the training history file.
name = saver_module.latest_checkpoint(traindir)
self.assertIsNotNone(name)
# Restore "v0" from that checkpoint.
save.restore(sess, name)
self.assertEqual(v0.eval(), 2.0)
class CheckpointStateTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAbsPath(self):
save_dir = self._get_test_dir("abs_paths")
abs_path = os.path.join(save_dir, "model-0")
ckpt = saver_module.generate_checkpoint_state_proto(save_dir, abs_path)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testRelPath(self):
train_dir = "train"
model = os.path.join(train_dir, "model-0")
# model_checkpoint_path should have no "train" directory part.
new_rel_path = "model-0"
ckpt = saver_module.generate_checkpoint_state_proto(train_dir, model)
self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)
def testAllModelCheckpointPaths(self):
save_dir = self._get_test_dir("all_models_test")
abs_path = os.path.join(save_dir, "model-0")
for paths in [None, [], ["model-2"]]:
ckpt = saver_module.generate_checkpoint_state_proto(
save_dir, abs_path, all_model_checkpoint_paths=paths)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(
len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testUpdateCheckpointState(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
# Make a temporary train directory.
train_dir = "train"
os.mkdir(train_dir)
abs_path = os.path.join(save_dir, "model-0")
rel_path = os.path.join("train", "model-2")
saver_module.update_checkpoint_state(
train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path])
ckpt = saver_module.get_checkpoint_state(train_dir)
self.assertEqual(ckpt.model_checkpoint_path, rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)
def testUpdateCheckpointStateSaveRelativePaths(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
abs_path2 = os.path.join(save_dir, "model-2")
rel_path2 = "model-2"
abs_path0 = os.path.join(save_dir, "model-0")
rel_path0 = "model-0"
saver_module._update_checkpoint_state( # pylint: disable=protected-access
save_dir=save_dir,
model_checkpoint_path=abs_path2,
all_model_checkpoint_paths=[rel_path0, abs_path2],
save_relative_paths=True)
# File should contain relative paths.
file_content = file_io.read_file_to_string(
os.path.join(save_dir, "checkpoint"))
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, rel_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], rel_path0)
# get_checkpoint_state should return absolute paths.
ckpt = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path, abs_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path0)
def testCheckPointStateFailsWhenIncomplete(self):
save_dir = self._get_test_dir("checkpoint_state_fails_when_incomplete")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("")
ckpt_file.close()
with self.assertRaises(ValueError):
saver_module.get_checkpoint_state(save_dir)
def testCheckPointCompletesRelativePaths(self):
save_dir = self._get_test_dir("checkpoint_completes_relative_paths")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("""
model_checkpoint_path: "./model.ckpt-687529"
all_model_checkpoint_paths: "./model.ckpt-687500"
all_model_checkpoint_paths: "./model.ckpt-687529"
""")
ckpt_file.close()
ckpt = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path,
os.path.join(save_dir, "./model.ckpt-687529"))
self.assertEqual(ckpt.all_model_checkpoint_paths[0],
os.path.join(save_dir, "./model.ckpt-687500"))
self.assertEqual(ckpt.all_model_checkpoint_paths[1],
os.path.join(save_dir, "./model.ckpt-687529"))
class MetaGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAddCollectionDef(self):
test_dir = self._get_test_dir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
# Creates a graph.
v0 = variables.Variable(1.0, name="v0")
control_flow_ops.cond(
math_ops.less(v0, 10), lambda: math_ops.add(v0, 1),
lambda: math_ops.subtract(v0, 1))
control_flow_ops.while_loop(lambda i: math_ops.less(i, 10),
lambda i: math_ops.add(i, 1), [v0])
var = variables.Variable(constant_op.constant(0, dtype=dtypes.int64))
count_up_to = var.count_up_to(3)
input_queue = data_flow_ops.FIFOQueue(
30, dtypes.float32, shared_name="collection_queue")
qr = queue_runner_impl.QueueRunner(input_queue, [count_up_to])
variables.global_variables_initializer()
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Adds a set of collections.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("float_collection", 3.5)
ops_lib.add_to_collection("string_collection", "hello")
ops_lib.add_to_collection("variable_collection", v0)
# Add QueueRunners.
queue_runner_impl.add_queue_runner(qr)
# Adds user_defined proto in three formats: string, bytes and Any.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph(filename)
self.assertTrue(meta_graph_def.HasField("saver_def"))
self.assertTrue(meta_graph_def.HasField("graph_def"))
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
collection_def = meta_graph_def.collection_def
self.assertEqual(len(collection_def), 12)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(
self, meta_graph_def, new_meta_graph_def)
def testAddCollectionDefFails(self):
with self.test_session():
# Creates a graph.
v0 = variables.Variable(10.0, name="v0")
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
ops_lib.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
variables.global_variables_initializer().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph()
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def0.
collection_def = meta_graph_def0.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
def _testMultiSaverCollectionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Imports from meta_graph.
saver_module.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = ops_lib.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
new_saver0.restore(sess, saver0_ckpt)
v0 = sess.graph.get_tensor_by_name("v0:0")
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertAllEqual([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], v0.eval())
self.assertEqual([3, 2], v0.get_shape())
self.assertEqual([], v1.get_shape())
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Retrieves saver1. Verifies that new_saver1 can restore v1.
new_saver1 = savers[1]
new_saver1.restore(sess, saver1_ckpt)
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertEqual(11.0, v1.eval())
def testMultiSaverCollection(self):
test_dir = self._get_test_dir("saver_collection")
self._testMultiSaverCollectionSave(test_dir)
self._testMultiSaverCollectionRestore(test_dir)
def testClearExtraneousSavers(self):
test_dir = self._get_test_dir("clear_extraneous_savers")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
variables.global_variables_initializer().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph(clear_extraneous_savers=True)
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def1.
collection_def = meta_graph_def1.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there is 1 entry in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(1, len(savers.value))
# Verifies that saver0 graph nodes are omitted from the saver1 export
self.assertEqual(29, len(meta_graph_def0.graph_def.node))
self.assertEqual(19, len(meta_graph_def1.graph_def.node))
def testBinaryAndTextFormat(self):
test_dir = self._get_test_dir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
with self.test_session(graph=ops_lib.Graph()):
# Creates a graph.
variables.Variable(10.0, name="v0")
# Exports the graph as binary format.
saver_module.export_meta_graph(filename, as_text=False)
with self.test_session(graph=ops_lib.Graph()):
# Imports the binary format graph.
saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(saver)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with self.test_session(graph=ops_lib.Graph()):
# Imports the text format graph.
saver_module.import_meta_graph(filename)
# Writes wrong contents to the file.
graph_io.write_graph(saver.as_saver_def(),
os.path.dirname(filename),
os.path.basename(filename))
with self.test_session(graph=ops_lib.Graph()):
# Import should fail.
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "Cannot parse file"):
saver_module.import_meta_graph(filename)
# Deletes the file
gfile.Remove(filename)
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "does not exist"):
saver_module.import_meta_graph(filename)
def testSliceVariable(self):
test_dir = self._get_test_dir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
v1 = variables.Variable([20.0], name="v1")
v2 = variables.Variable([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = saver_module.Saver({"first": v1, "second": v2})
variables.global_variables_initializer().run()
# Exports to meta_graph
meta_graph_def = slice_saver.export_meta_graph(filename)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(new_saver)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
def _testGraphExtensionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(1.2, dtypes.float32, shape=[100, 28])
with ops_lib.name_scope("hidden1"):
weights = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test coverage
# the save and restore of control flow context (which doesn't make any
# sense here from a machine learning perspective). The typical biases is
# a simple Variable without the conditions.
biases = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights) + biases)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(
loop_cond, loop_body,
[constant_op.constant(0), variables.Variable(array_ops.zeros([32]))])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
init_all_op = variables.global_variables_initializer()
with self.test_session() as sess:
# Initializes all the variables.
sess.run(init_all_op)
# Runs to logit.
sess.run(logits)
# Creates a saver.
saver0 = saver_module.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Adds loss and train.
labels = constant_op.constant(0, dtypes.int32, shape=[100], name="labels")
batch_size = array_ops.size(labels)
labels = array_ops.expand_dims(labels, 1)
indices = array_ops.expand_dims(math_ops.range(0, batch_size), 1)
concated = array_ops.concat([indices, labels], 1)
onehot_labels = sparse_ops.sparse_to_dense(
concated, array_ops.stack([batch_size, 10]), 1.0, 0.0)
logits = ops_lib.get_collection("logits")[0]
cross_entropy = nn_ops.softmax_cross_entropy_with_logits(
labels=onehot_labels, logits=logits, name="xentropy")
loss = math_ops.reduce_mean(cross_entropy, name="xentropy_mean")
summary.scalar("loss", loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
# Runs train_op.
train_op = optimizer.minimize(loss)
ops_lib.add_to_collection("train_op", train_op)
# Runs train_op.
sess.run(train_op)
# Generates MetaGraphDef.
saver_module.export_meta_graph(train_filename)
def _testRestoreFromTrainGraphWithControlContext(self, test_dir):
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(train_filename)
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
train_op = ops_lib.get_collection("train_op")[0]
sess.run(train_op)
def testGraphExtension(self):
test_dir = self._get_test_dir("graph_extension")
self._testGraphExtensionSave(test_dir)
self._testGraphExtensionRestore(test_dir)
self._testRestoreFromTrainGraphWithControlContext(test_dir)
def testStrippedOpListDef(self):
with self.test_session():
# Creates a graph.
v0 = variables.Variable(0.0)
var = variables.Variable(10.0)
math_ops.add(v0, var)
@function.Defun(dtypes.float32)
def minus_one(x):
return x - 1
minus_one(array_ops.identity(v0))
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp", "RestoreV2",
"SaveSlices", "Sub", "VariableV2"
])
else:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp", "RestoreV2", "SaveV2",
"Sub", "VariableV2"
])
# Test calling stripped_op_list_for_graph directly
op_list = meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def)
self.assertEqual(ops, [o.name for o in op_list.op])
for o in op_list.op:
self.assertEqual(o.summary, "")
self.assertEqual(o.description, "")
def testImportIntoNamescope(self):
# Test that we can import a meta graph into a namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph, import_scope="new_model")
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnImport(self):
# Test that we import a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph()
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=False, import_scope="new_model")
# Device refers to GPU, which is not available here.
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(variables.global_variables_initializer())
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=True, import_scope="new_model")
sess.run(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnExport(self):
# Test that we export a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph(clear_devices=True)
graph_io.write_graph(meta_graph_def, self.get_temp_dir(),
"meta_graph.pbtxt")
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
sess.run(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
class CheckpointReaderTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def testDebugString(self):
# Builds a graph.
v0 = variables.Variable(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
v1 = variables.Variable(
[[[1], [2]], [[3], [4]], [[5], [6]]], dtype=dtypes.float32, name="v1")
init_all_op = variables.global_variables_initializer()
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, write_version=self._WRITE_VERSION)
save_path = os.path.join(self.get_temp_dir(),
"ckpt_for_debug_string" + str(self._WRITE_VERSION))
with self.test_session() as sess:
sess.run(init_all_op)
# Saves a checkpoint.
save.save(sess, save_path)
# Creates a reader.
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
# Verifies that the tensors exist.
self.assertTrue(reader.has_tensor("v0"))
self.assertTrue(reader.has_tensor("v1"))
debug_string = reader.debug_string()
# Verifies that debug string contains the right strings.
self.assertTrue(compat.as_bytes("v0 (DT_FLOAT) [2,3]") in debug_string)
self.assertTrue(compat.as_bytes("v1 (DT_FLOAT) [3,2,1]") in debug_string)
# Verifies get_variable_to_shape_map() returns the correct information.
var_map = reader.get_variable_to_shape_map()
self.assertEqual([2, 3], var_map["v0"])
self.assertEqual([3, 2, 1], var_map["v1"])
# Verifies get_tensor() returns the tensor value.
v0_tensor = reader.get_tensor("v0")
v1_tensor = reader.get_tensor("v1")
self.assertAllEqual(v0.eval(), v0_tensor)
self.assertAllEqual(v1.eval(), v1_tensor)
# Verifies get_tensor() fails for non-existent tensors.
with self.assertRaisesRegexp(errors.NotFoundError,
"v3 not found in checkpoint"):
reader.get_tensor("v3")
def testNonexistentPath(self):
with self.assertRaisesRegexp(errors.NotFoundError,
"Unsuccessful TensorSliceReader"):
pywrap_tensorflow.NewCheckpointReader("non-existent")
class CheckpointReaderForV2Test(CheckpointReaderTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
class WriteGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testWriteGraph(self):
test_dir = self._get_test_dir("write_graph_dir")
variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph(),
os.path.join(test_dir, "l1"), "graph.pbtxt")
truth = os.path.join(test_dir, "l1", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
def testRecursiveCreate(self):
test_dir = self._get_test_dir("deep_dir")
variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph().as_graph_def(),
os.path.join(test_dir, "l1", "l2", "l3"),
"graph.pbtxt")
truth = os.path.join(test_dir, "l1", "l2", "l3", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
class SaverUtilsTest(test.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), "saver_utils_test")
gfile.MakeDirs(self._base_dir)
def tearDown(self):
gfile.DeleteRecursively(self._base_dir)
def testCheckpointExists(self):
for sharded in (False, True):
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.test_session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(sharded=sharded, write_version=version)
path = os.path.join(self._base_dir, "%s-%s" % (sharded, version))
self.assertFalse(
saver_module.checkpoint_exists(path)) # Not saved yet.
ckpt_prefix = saver.save(sess, path)
self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))
ckpt_prefix = saver_module.latest_checkpoint(self._base_dir)
self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))
def testGetCheckpointMtimes(self):
prefixes = []
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.test_session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(write_version=version)
prefixes.append(
saver.save(sess, os.path.join(self._base_dir, str(version))))
mtimes = saver_module.get_checkpoint_mtimes(prefixes)
self.assertEqual(2, len(mtimes))
self.assertTrue(mtimes[1] >= mtimes[0])
class ScopedGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def _testScopedSave(self, test_dir, exported_filename, ckpt_filename):
graph = ops_lib.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops_lib.name_scope("hidden1"):
weights1 = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights2 = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.Variable(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights3 = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops_lib.add_to_collection("logits", logits)
# Adds user_defined proto in three formats: string, bytes and Any.
# Any proto should just pass through.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
_, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops_lib.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
with self.test_session(graph=graph) as sess:
sess.run(variables.global_variables_initializer())
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.save(sess, os.path.join(test_dir, ckpt_filename), write_state=False)
def _testScopedRestore(self, test_dir, exported_filename,
new_exported_filename, ckpt_filename):
graph = ops_lib.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
hidden1 = graph.as_graph_element("new_hidden1/Relu:0")
weights1 = graph.as_graph_element("new_hidden1/weights:0")
biases1 = graph.as_graph_element("new_hidden1/biases:0")
with graph.as_default():
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.Variable(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
# The rest of the variables.
rest_variables = list(
set(variables.global_variables()) - set(var_list.keys()))
init_rest_op = variables.initialize_variables(rest_variables)
with self.test_session(graph=graph) as sess:
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.restore(sess, os.path.join(test_dir, ckpt_filename))
# Verify that we have restored weights1 and biases1.
sess.run([weights1, biases1])
# Initialize the rest of the variables and run logits.
sess.run(init_rest_op)
sess.run(logits)
# Verifies that we can save the subgraph under "hidden1" and restore it
# into "new_hidden1" in the new graph.
def testScopedSaveAndRestore(self):
test_dir = self._get_test_dir("scoped_export_import")
ckpt_filename = "ckpt"
self._testScopedSave(test_dir, "exported_hidden1.pbtxt", ckpt_filename)
self._testScopedRestore(test_dir, "exported_hidden1.pbtxt",
"exported_new_hidden1.pbtxt", ckpt_filename)
# Verifies that we can copy the subgraph under "hidden1" and copy it
# to different name scope in the same graph or different graph.
def testCopyScopedGraph(self):
test_dir = self._get_test_dir("scoped_copy")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.Variable([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.test_session(graph=graph1) as sess:
sess.run(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies copy to the same graph with the same name fails.
with graph1.as_default():
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "need to be different" in str(e)):
meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden1")
# Verifies copy to the same graph.
with graph1.as_default():
var_list_2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden2")
with self.test_session(graph=graph1) as sess:
saver1 = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver1.restore(sess, saver0_ckpt)
saver2 = saver_module.Saver(var_list=var_list_2, max_to_keep=1)
saver2.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("hidden1/relu:0"))
self.assertAllClose(expected, sess.run("hidden2/relu:0"))
# Verifies copy to differen graph.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.test_session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testExportGraphDefWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.Variable([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.test_session(graph=graph1) as sess:
sess.run(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies that we can run successfully after restoring.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.test_session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testSerializeSaverWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
saver2_ckpt = os.path.join(test_dir, "saver2.ckpt")
graph = ops_lib.Graph()
with graph.as_default():
with ops_lib.name_scope("hidden1"):
variable1 = variables.Variable([1.0], name="variable1")
saver1 = saver_module.Saver(var_list=[variable1])
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver1)
with ops_lib.name_scope("hidden2"):
variable2 = variables.Variable([2.0], name="variable2")
saver2 = saver_module.Saver(var_list=[variable2], name="hidden2/")
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver2)
with self.test_session(graph=graph) as sess:
variables.global_variables_initializer().run()
saver1.save(sess, saver1_ckpt, write_state=False)
saver2.save(sess, saver2_ckpt, write_state=False)
graph1 = ops_lib.Graph()
var_dict1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph,
to_graph=graph1)
self.assertEqual(1, len(var_dict1))
saver_list1 = graph1.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list1))
with self.test_session(graph=graph1) as sess:
saver_list1[0].restore(sess, saver1_ckpt)
self.assertEqual(1.0, var_dict1["variable1:0"].eval())
graph2 = ops_lib.Graph()
var_dict2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden2",
to_scope="new_hidden2",
from_graph=graph,
to_graph=graph2)
self.assertEqual(1, len(var_dict2))
saver_list2 = graph2.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list2))
with self.test_session(graph=graph2) as sess:
saver_list2[0].restore(sess, saver2_ckpt)
self.assertEqual(2.0, var_dict2["variable2:0"].eval())
# TODO(b/64763924): Remove after Jan 1st 2018.
class LenientNamesTest(test.TestCase):
def setUp(self):
super(LenientNamesTest, self).setUp()
os.putenv("TF_SAVER_LENIENT_NAMES", "True")
def tearDown(self):
os.putenv("TF_SAVER_LENIENT_NAMES", "")
super(LenientNamesTest, self).tearDown()
def testSaveRestore(self):
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver(
{
"v0:0": v0,
"v1": v1,
"v2": v2.saveable
}, restore_sequentially=True)
init_all_op = [variables.global_variables_initializer(), v2_init]
with self.test_session() as sess:
sess.run(init_all_op)
save.save(sess, save_path)
with self.test_session() as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
if __name__ == "__main__":
test.main()
|
{
"content_hash": "4d61ff49e9472df24ed1891aac8e53ef",
"timestamp": "",
"source": "github",
"line_count": 2570,
"max_line_length": 80,
"avg_line_length": 41.71867704280156,
"alnum_prop": 0.6432188925263718,
"repo_name": "dyoung418/tensorflow",
"id": "744b17dd224297cbefedfe562ff106fe1200664f",
"size": "107905",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/saver_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "155915"
},
{
"name": "C++",
"bytes": "9052366"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "763492"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1772913"
},
{
"name": "Protocol Buffer",
"bytes": "110178"
},
{
"name": "Python",
"bytes": "6032114"
},
{
"name": "Shell",
"bytes": "165125"
},
{
"name": "TypeScript",
"bytes": "403037"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
from django_fsm import signals as fsm_signals
class GeoIPConfig(AppConfig):
name = 'waldur_geo_ip'
def ready(self):
from waldur_geo_ip.mixins import IPCoordinatesMixin
from . import handlers
for index, model in enumerate(IPCoordinatesMixin.get_all_models()):
fsm_signals.post_transition.connect(
handlers.detect_vm_coordinates,
sender=model,
dispatch_uid='waldur_geo_ip.handlers.detect_vm_coordinates_{}_{}'.format(
model.__name__, index
),
)
|
{
"content_hash": "ff2fc746887b74f657883352c1edfacc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 89,
"avg_line_length": 32.68421052631579,
"alnum_prop": 0.6022544283413849,
"repo_name": "opennode/nodeconductor-assembly-waldur",
"id": "2866565978e50a5aa5d26c2d33cd63880746b102",
"size": "621",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_geo_ip/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1624"
},
{
"name": "Python",
"bytes": "412263"
},
{
"name": "Shell",
"bytes": "2031"
}
],
"symlink_target": ""
}
|
"""Clustering Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.factorization.python.ops import gen_clustering_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.factorization.python.ops.gen_clustering_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.embedding_ops import embedding_lookup
from tensorflow.python.platform import resource_loader
_clustering_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_clustering_ops.so'))
# Euclidean distance between vectors U and V is defined as ||U - V||_F which is
# the square root of the sum of the absolute squares of the elements difference.
SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
# Cosine distance between vectors U and V is defined as
# 1 - (U \dot V) / (||U||_F ||V||_F)
COSINE_DISTANCE = 'cosine'
RANDOM_INIT = 'random'
KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'
class KMeans(object):
"""Creates the graph for k-means clustering."""
def __init__(self,
inputs,
num_clusters,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=False,
mini_batch_steps_per_iteration=1,
random_seed=0,
kmeans_plus_plus_num_retries=2):
"""Creates an object for generating KMeans clustering graph.
This class implements the following variants of K-means algorithm:
If use_mini_batch is False, it runs standard full batch K-means. Each step
runs a single iteration of K-Means. This step can be run sharded across
multiple workers by passing a list of sharded inputs to this class. Note
however that a single step needs to process the full input at once.
If use_mini_batch is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of mini_batch_steps_per_iteration steps. Two copies of cluster
centers are maintained: one that is updated at the end of each iteration,
and one that is updated every step. The first copy is used to compute
cluster allocations for each step, and for inference, while the second copy
is the one updated each step using the mini-batch update rule. After each
iteration is complete, this second copy is copied back the first copy.
Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1,
the algorithm reduces to the standard mini-batch algorithm. Also by setting
mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm
becomes an asynchronous version of the full-batch algorithm. Note however
that there is no guarantee by this implementation that each input is seen
exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not behave
exactly like a full-batch version.
Args:
inputs: An input tensor or list of input tensors
num_clusters: number of clusters.
initial_clusters: Specifies the clusters used during initialization. Can
be a tensor or numpy array, or a function that generates the clusters.
Can also be "random" to specify that clusters should be chosen randomly
from input data.
distance_metric: distance metric used for clustering.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy.
random_seed: Seed for PRNG used to initialize seeds.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
"""
self._inputs = inputs if isinstance(inputs, list) else [inputs]
assert num_clusters > 0, num_clusters
self._num_clusters = num_clusters
if initial_clusters is None:
initial_clusters = RANDOM_INIT
self._initial_clusters = initial_clusters
assert distance_metric in [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration)
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
@classmethod
def _distance_graph(cls, inputs, clusters, distance_metric):
"""Computes distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
distance_metric: distance metric used for clustering
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
Currently only Euclidean distance and cosine distance are supported.
"""
assert isinstance(inputs, list)
if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
return cls._compute_euclidean_distance(inputs, clusters)
elif distance_metric == COSINE_DISTANCE:
return cls._compute_cosine_distance(
inputs, clusters, inputs_normalized=True)
else:
assert False, ('Unsupported distance metric passed to Kmeans %s' %
str(distance_metric))
@classmethod
def _compute_euclidean_distance(cls, inputs, clusters):
"""Computes Euclidean distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
"""
output = []
for inp in inputs:
with ops.colocate_with(inp):
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (
math_ops.reduce_sum(math_ops.square(inp), 1, keep_dims=True) -
2 * math_ops.matmul(inp, clusters, transpose_b=True) +
array_ops.transpose(
math_ops.reduce_sum(
math_ops.square(clusters), 1, keep_dims=True)))
output.append(squared_distance)
return output
@classmethod
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
"""Computes cosine distance between each input and each cluster center.
Args:
inputs: list of input Tensor.
clusters: cluster Tensor
inputs_normalized: if True, it assumes that inp and clusters are
normalized and computes the dot product which is equivalent to the cosine
distance. Else it L2 normalizes the inputs first.
Returns:
list of Tensors, where each element corresponds to each element in inp.
The value is the distance of each row to all the cluster centers.
"""
output = []
if not inputs_normalized:
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp in inputs:
with ops.colocate_with(inp):
if not inputs_normalized:
inp = nn_impl.l2_normalize(inp, dim=1)
output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
return output
def _infer_graph(self, inputs, clusters):
"""Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
"""
assert isinstance(inputs, list)
# Pairwise distances are used only by transform(). In all other cases, this
# sub-graph is not evaluated.
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if (self._distance_metric == COSINE_DISTANCE and
not self._clusters_l2_normalized()):
# The cosine distance between normalized vectors x and y is the same as
# 2 * squared_euclidean_distance. We are using this fact and reusing the
# nearest_neighbors op.
# TODO(ands): Support COSINE distance in nearest_neighbors and remove
# this.
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp):
(indices, distances) = gen_clustering_ops.nearest_neighbors(
inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append((score, array_ops.squeeze(distances),
array_ops.squeeze(indices)))
return zip(*output)
def _init_clusters_random(self):
"""Does random initialization of clusters.
Returns:
Tensor of randomly initialized clusters.
"""
num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in self._inputs])
# Note that for mini-batch k-means, we should ensure that the batch size of
# data used during initialization is sufficiently large to avoid duplicated
# clusters.
with ops.control_dependencies(
[check_ops.assert_less_equal(self._num_clusters, num_data)]):
indices = random_ops.random_uniform(
array_ops.reshape(self._num_clusters, [-1]),
minval=0,
maxval=math_ops.cast(num_data, dtypes.int64),
seed=self._random_seed,
dtype=dtypes.int64)
clusters_init = embedding_lookup(
self._inputs, indices, partition_strategy='div')
return clusters_init
def _clusters_l2_normalized(self):
"""Returns True if clusters centers are kept normalized."""
return (self._distance_metric == COSINE_DISTANCE and
(not self._use_mini_batch or
self._mini_batch_steps_per_iteration > 1))
def _initialize_clusters(self, cluster_centers, cluster_centers_initialized,
cluster_centers_updated):
"""Returns an op to initialize the cluster centers."""
init = self._initial_clusters
if init == RANDOM_INIT:
clusters_init = self._init_clusters_random()
elif init == KMEANS_PLUS_PLUS_INIT:
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
inp = self._inputs[0]
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
clusters_init = gen_clustering_ops.kmeans_plus_plus_initialization(
inp, self._num_clusters, self._random_seed,
self._kmeans_plus_plus_num_retries)
elif callable(init):
clusters_init = init(self._inputs, self._num_clusters)
elif not isinstance(init, str):
clusters_init = init
else:
assert False, 'Unsupported init passed to Kmeans %s' % str(init)
if self._distance_metric == COSINE_DISTANCE and clusters_init is not None:
clusters_init = nn_impl.l2_normalize(clusters_init, dim=1)
with ops.colocate_with(cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[clusters_init], array_ops.identity(cluster_centers_initialized))
with ops.colocate_with(cluster_centers):
assign_centers = state_ops.assign(
cluster_centers, clusters_init, validate_shape=False)
if cluster_centers_updated != cluster_centers:
assign_centers = control_flow_ops.group(assign_centers,
state_ops.assign(
cluster_centers_updated,
clusters_init,
validate_shape=False))
assign_centers = control_flow_ops.with_dependencies(
[assign_centers], state_ops.assign(cluster_centers_initialized, True))
return control_flow_ops.cond(initialized, control_flow_ops.no_op,
lambda: assign_centers).op
def _create_variables(self):
"""Creates variables.
Returns:
Tuple with following elements:
cluster_centers: a Tensor for storing cluster centers
cluster_centers_initialized: bool Variable indicating whether clusters
are initialized.
cluster_counts: a Tensor for storing counts of points assigned to this
cluster. This is used by mini-batch training.
cluster_centers_updated: Tensor representing copy of cluster centers that
are updated every step.
update_in_steps: numbers of steps left before we sync
cluster_centers_updated back to cluster_centers.
"""
init_value = array_ops.constant([], dtype=dtypes.float32)
cluster_centers = variable_scope.variable(
init_value, name='clusters', validate_shape=False)
cluster_centers_initialized = variable_scope.variable(
False, dtype=dtypes.bool, name='initialized')
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
# Copy of cluster centers actively updated each step according to
# mini-batch update rule.
cluster_centers_updated = variable_scope.variable(
init_value, name='clusters_updated', validate_shape=False)
# How many steps till we copy the updated clusters to cluster_centers.
update_in_steps = variable_scope.variable(
self._mini_batch_steps_per_iteration,
dtype=dtypes.int64,
name='update_in_steps')
# Count of points assigned to cluster_centers_updated.
cluster_counts = variable_scope.variable(
array_ops.zeros([self._num_clusters], dtype=dtypes.int64))
else:
cluster_centers_updated = cluster_centers
update_in_steps = None
cluster_counts = (variable_scope.variable(
array_ops.ones([self._num_clusters], dtype=dtypes.int64))
if self._use_mini_batch else None)
return (cluster_centers, cluster_centers_initialized, cluster_counts,
cluster_centers_updated, update_in_steps)
@classmethod
def _l2_normalize_data(cls, inputs):
"""Normalized the input data."""
output = []
for inp in inputs:
with ops.colocate_with(inp):
output.append(nn_impl.l2_normalize(inp, dim=1))
return output
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
cluster_centers_initialized: scalar indicating whether clusters have been
initialized.
init_op: an op to initialize the clusters.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
inputs = self._inputs
(cluster_centers_var, cluster_centers_initialized, total_counts,
cluster_centers_updated, update_in_steps) = self._create_variables()
init_op = self._initialize_clusters(cluster_centers_var,
cluster_centers_initialized,
cluster_centers_updated)
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
sync_updates_op = self._mini_batch_sync_updates_op(
update_in_steps, cluster_centers_var, cluster_centers_updated,
total_counts)
assert sync_updates_op is not None
with ops.control_dependencies([sync_updates_op]):
training_op = self._mini_batch_training_op(
inputs, cluster_idx, cluster_centers_updated, total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(inputs, cluster_idx,
cluster_centers_var)
return (all_scores, cluster_idx, scores, cluster_centers_initialized,
init_op, training_op)
def _mini_batch_sync_updates_op(self, update_in_steps, cluster_centers_var,
cluster_centers_updated, total_counts):
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
assert update_in_steps is not None
with ops.colocate_with(update_in_steps):
def _f():
# Note that there is a race condition here, so we do a best effort
# updates here. We reset update_in_steps first so that other workers
# don't duplicate the updates. Also we update cluster_center_vars
# before resetting total_counts to avoid large updates to
# cluster_centers_updated based on partially updated
# cluster_center_vars.
with ops.control_dependencies([
state_ops.assign(update_in_steps,
self._mini_batch_steps_per_iteration - 1)
]):
with ops.colocate_with(
cluster_centers_updated, ignore_existing=True):
if self._distance_metric == COSINE_DISTANCE:
cluster_centers = nn_impl.l2_normalize(
cluster_centers_updated, dim=1)
else:
cluster_centers = cluster_centers_updated
with ops.colocate_with(cluster_centers_var):
with ops.control_dependencies(
[state_ops.assign(cluster_centers_var, cluster_centers)]):
with ops.colocate_with(
cluster_centers_var, ignore_existing=True):
with ops.control_dependencies([
state_ops.assign(total_counts,
array_ops.zeros_like(total_counts))
]):
return array_ops.identity(update_in_steps)
return control_flow_ops.cond(
update_in_steps <= 0, _f,
lambda: state_ops.assign_sub(update_in_steps, 1))
else:
return control_flow_ops.no_op()
def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers,
total_counts):
"""Creates an op for training for mini batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
total_counts: Tensor Ref of cluster counts.
Returns:
An op for doing an update of mini-batch k-means.
"""
update_ops = []
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp):
assert total_counts is not None
cluster_idx = array_ops.reshape(cluster_idx, [-1])
# Dedupe the unique ids of cluster_centers being updated so that updates
# can be locally aggregated.
unique_ids, unique_idx = array_ops.unique(cluster_idx)
num_unique_cluster_idx = array_ops.size(unique_ids)
# Fetch the old values of counts and cluster_centers.
with ops.colocate_with(total_counts, ignore_existing=True):
old_counts = array_ops.gather(total_counts, unique_ids)
# TODO(agarwal): This colocation seems to run into problems. Fix it.
with ops.colocate_with(cluster_centers, ignore_existing=True):
old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)
# Locally aggregate the increment to counts.
count_updates = math_ops.unsorted_segment_sum(
array_ops.ones_like(unique_idx, dtype=total_counts.dtype),
unique_idx, num_unique_cluster_idx)
# Locally compute the sum of inputs mapped to each id.
# For a cluster with old cluster value x, old count n, and with data
# d_1,...d_k newly assigned to it, we recompute the new value as
# x += (sum_i(d_i) - k * x) / (n + k).
# Compute sum_i(d_i), see comment above.
cluster_center_updates = math_ops.unsorted_segment_sum(
inp, unique_idx, num_unique_cluster_idx)
# Shape to enable broadcasting count_updates and learning_rate to inp.
# It extends the shape with 1's to match the rank of inp.
broadcast_shape = array_ops.concat([
array_ops.reshape(num_unique_cluster_idx, [1]),
array_ops.ones(
array_ops.reshape(array_ops.rank(inp) - 1, [1]),
dtype=dtypes.int32)
], 0)
# Subtract k * x, see comment above.
cluster_center_updates -= math_ops.cast(
array_ops.reshape(count_updates, broadcast_shape),
inp.dtype) * old_cluster_centers
learning_rate = math_ops.reciprocal(
math_ops.cast(old_counts + count_updates, inp.dtype))
learning_rate = array_ops.reshape(learning_rate, broadcast_shape)
# scale by 1 / (n + k), see comment above.
cluster_center_updates *= learning_rate
# Apply the updates.
update_counts = state_ops.scatter_add(total_counts, unique_ids,
count_updates)
update_cluster_centers = state_ops.scatter_add(
cluster_centers, unique_ids, cluster_center_updates)
update_ops.extend([update_counts, update_cluster_centers])
return control_flow_ops.group(*update_ops)
def _full_batch_training_op(self, inputs, cluster_idx_list, cluster_centers):
"""Creates an op for training for full batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
Returns:
An op for doing an update of mini-batch k-means.
"""
cluster_sums = []
cluster_counts = []
epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp, ignore_existing=True):
cluster_sums.append(
math_ops.unsorted_segment_sum(inp, cluster_idx, self._num_clusters))
cluster_counts.append(
math_ops.unsorted_segment_sum(
array_ops.reshape(
array_ops.ones(
array_ops.reshape(array_ops.shape(inp)[0], [-1])),
[-1, 1]), cluster_idx, self._num_clusters))
with ops.colocate_with(cluster_centers, ignore_existing=True):
new_clusters_centers = math_ops.add_n(cluster_sums) / (math_ops.cast(
math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)
if self._clusters_l2_normalized():
new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
return state_ops.assign(cluster_centers, new_clusters_centers)
|
{
"content_hash": "b2ba61c4138d177ecd12a1a34b4c1c2c",
"timestamp": "",
"source": "github",
"line_count": 536,
"max_line_length": 80,
"avg_line_length": 45.88805970149254,
"alnum_prop": 0.6608391608391608,
"repo_name": "ville-k/tensorflow",
"id": "2e9b5e22c73e02dce01fe6f62ad1de5fced88dd9",
"size": "25285",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/factorization/python/ops/clustering_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7908"
},
{
"name": "C",
"bytes": "186881"
},
{
"name": "C++",
"bytes": "25385487"
},
{
"name": "CMake",
"bytes": "166479"
},
{
"name": "Go",
"bytes": "859393"
},
{
"name": "HTML",
"bytes": "593130"
},
{
"name": "Java",
"bytes": "319061"
},
{
"name": "JavaScript",
"bytes": "1399"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63700"
},
{
"name": "Protocol Buffer",
"bytes": "227623"
},
{
"name": "Python",
"bytes": "22405092"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "338633"
},
{
"name": "TypeScript",
"bytes": "801168"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EmbedPlacePlugin'
db.create_table(u'aldryn_locations_embedplaceplugin', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('query', self.gf('django.db.models.fields.CharField')(max_length=255)),
('map_type', self.gf('django.db.models.fields.CharField')(default='roadmap', max_length=300)),
('center', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('zoom', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('ui_lang', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('region', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('width', self.gf('django.db.models.fields.CharField')(default='100%', max_length=6)),
('height', self.gf('django.db.models.fields.CharField')(default='400px', max_length=6)),
))
db.send_create_signal(u'aldryn_locations', ['EmbedPlacePlugin'])
# Adding model 'EmbedViewPlugin'
db.create_table(u'aldryn_locations_embedviewplugin', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('query', self.gf('django.db.models.fields.CharField')(max_length=255)),
('map_type', self.gf('django.db.models.fields.CharField')(default='roadmap', max_length=300)),
('center', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('zoom', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('ui_lang', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('region', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('width', self.gf('django.db.models.fields.CharField')(default='100%', max_length=6)),
('height', self.gf('django.db.models.fields.CharField')(default='400px', max_length=6)),
))
db.send_create_signal(u'aldryn_locations', ['EmbedViewPlugin'])
# Adding model 'EmbedDirectionsPlugin'
db.create_table(u'aldryn_locations_embeddirectionsplugin', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('query', self.gf('django.db.models.fields.CharField')(max_length=255)),
('map_type', self.gf('django.db.models.fields.CharField')(default='roadmap', max_length=300)),
('center', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('zoom', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('ui_lang', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('region', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('width', self.gf('django.db.models.fields.CharField')(default='100%', max_length=6)),
('height', self.gf('django.db.models.fields.CharField')(default='400px', max_length=6)),
('origin', self.gf('django.db.models.fields.CharField')(max_length=255)),
('destination', self.gf('django.db.models.fields.CharField')(max_length=255)),
('waypoints', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('travel_mode', self.gf('django.db.models.fields.CharField')(default='auto', max_length=50)),
('avoid', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('units', self.gf('django.db.models.fields.CharField')(default='auto', max_length=10)),
))
db.send_create_signal(u'aldryn_locations', ['EmbedDirectionsPlugin'])
# Adding model 'EmbedSearchPlugin'
db.create_table(u'aldryn_locations_embedsearchplugin', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('query', self.gf('django.db.models.fields.CharField')(max_length=255)),
('map_type', self.gf('django.db.models.fields.CharField')(default='roadmap', max_length=300)),
('center', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('zoom', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('ui_lang', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('region', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('width', self.gf('django.db.models.fields.CharField')(default='100%', max_length=6)),
('height', self.gf('django.db.models.fields.CharField')(default='400px', max_length=6)),
))
db.send_create_signal(u'aldryn_locations', ['EmbedSearchPlugin'])
def backwards(self, orm):
# Deleting model 'EmbedPlacePlugin'
db.delete_table(u'aldryn_locations_embedplaceplugin')
# Deleting model 'EmbedViewPlugin'
db.delete_table(u'aldryn_locations_embedviewplugin')
# Deleting model 'EmbedDirectionsPlugin'
db.delete_table(u'aldryn_locations_embeddirectionsplugin')
# Deleting model 'EmbedSearchPlugin'
db.delete_table(u'aldryn_locations_embedsearchplugin')
models = {
u'aldryn_locations.embeddirectionsplugin': {
'Meta': {'object_name': 'EmbedDirectionsPlugin'},
'avoid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'center': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'destination': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'height': ('django.db.models.fields.CharField', [], {'default': "'400px'", 'max_length': '6'}),
'map_type': ('django.db.models.fields.CharField', [], {'default': "'roadmap'", 'max_length': '300'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'travel_mode': ('django.db.models.fields.CharField', [], {'default': "'auto'", 'max_length': '50'}),
'ui_lang': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'default': "'auto'", 'max_length': '10'}),
'waypoints': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.CharField', [], {'default': "'100%'", 'max_length': '6'}),
'zoom': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
u'aldryn_locations.embedplaceplugin': {
'Meta': {'object_name': 'EmbedPlacePlugin'},
'center': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'height': ('django.db.models.fields.CharField', [], {'default': "'400px'", 'max_length': '6'}),
'map_type': ('django.db.models.fields.CharField', [], {'default': "'roadmap'", 'max_length': '300'}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'ui_lang': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.CharField', [], {'default': "'100%'", 'max_length': '6'}),
'zoom': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
u'aldryn_locations.embedsearchplugin': {
'Meta': {'object_name': 'EmbedSearchPlugin'},
'center': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'height': ('django.db.models.fields.CharField', [], {'default': "'400px'", 'max_length': '6'}),
'map_type': ('django.db.models.fields.CharField', [], {'default': "'roadmap'", 'max_length': '300'}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'ui_lang': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.CharField', [], {'default': "'100%'", 'max_length': '6'}),
'zoom': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
u'aldryn_locations.embedviewplugin': {
'Meta': {'object_name': 'EmbedViewPlugin'},
'center': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'height': ('django.db.models.fields.CharField', [], {'default': "'400px'", 'max_length': '6'}),
'map_type': ('django.db.models.fields.CharField', [], {'default': "'roadmap'", 'max_length': '300'}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'ui_lang': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.CharField', [], {'default': "'100%'", 'max_length': '6'}),
'zoom': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
u'aldryn_locations.locationplugin': {
'Meta': {'object_name': 'LocationPlugin', '_ormbases': ['cms.CMSPlugin']},
'address': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'aldryn_locations.mapplugin': {
'Meta': {'object_name': 'MapPlugin', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'double_click_zoom': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'draggable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'height': ('django.db.models.fields.CharField', [], {'default': "'400px'", 'max_length': '6'}),
'keyboard_shortcuts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'map_type': ('django.db.models.fields.CharField', [], {'default': "'roadmap'", 'max_length': '300'}),
'pan_control': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'route_planner_title': ('django.db.models.fields.CharField', [], {'default': "u'Calculate your fastest way to here'", 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'scrollwheel': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'street_view_control': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.CharField', [], {'default': "'100%'", 'max_length': '6'}),
'zoom': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'zoom_control': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'aldryn_locations.routelocationplugin': {
'Meta': {'object_name': 'RouteLocationPlugin', '_ormbases': [u'aldryn_locations.LocationPlugin']},
u'locationplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['aldryn_locations.LocationPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['aldryn_locations']
|
{
"content_hash": "b7f37232cc493016fb543df41d56975a",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 185,
"avg_line_length": 81.6734693877551,
"alnum_prop": 0.5834582708645677,
"repo_name": "aldryn/aldryn-locations",
"id": "9738f53cefedb75d126fa68415168d552e6391c2",
"size": "16032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aldryn_locations/south_migrations/0003_auto__add_embedplaceplugin__add_embedviewplugin__add_embeddirectionspl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "10653"
},
{
"name": "JavaScript",
"bytes": "16402"
},
{
"name": "Python",
"bytes": "78694"
}
],
"symlink_target": ""
}
|
import signal
from django.test import TestCase
from bulk_sms.utils import SignalManager
class SignalManagerTest(TestCase):
def setUp(self):
self.manager = SignalManager()
def _test_handler1(self):
pass
def _test_handler2(self):
pass
def test_push_pop(self):
"""
Make sure we can add and remove signal handlers, and that they get
updated appropriately.
"""
sig = signal.SIGHUP
saved = signal.getsignal(sig)
# push a new handler and make sure it gets installed
self.manager.push(sig, self._test_handler1)
self.assertEqual(signal.getsignal(sig), self._test_handler1)
# pop our handler and make sure the original value gets restored
handler = self.manager.pop(sig)
self.assertEqual(handler, self._test_handler1)
self.assertEqual(signal.getsignal(sig), saved)
def test_nested(self):
"""
Make sure nested calls to ``push`` and ``pop`` work as expected.
"""
sig = signal.SIGHUP
saved = signal.getsignal(sig)
# push a new handler and make sure it gets installed
self.manager.push(sig, self._test_handler1)
self.assertEqual(signal.getsignal(sig), self._test_handler1)
# push a second new handler and make sure it gets installed
self.manager.push(sig, self._test_handler2)
self.assertEqual(signal.getsignal(sig), self._test_handler2)
# pop our second handler and make sure the first one we installed gets set
handler = self.manager.pop(sig)
self.assertEqual(handler, self._test_handler2)
self.assertEqual(signal.getsignal(sig), self._test_handler1)
# pop our first handler and make sure the original value gets restoredet
handler = self.manager.pop(sig)
self.assertEqual(handler, self._test_handler1)
self.assertEqual(signal.getsignal(sig), saved)
def test_saved_first(self):
"""
Make sure ``pop`` raises a ``ValueError`` if ``push`` hasn't been called
first for a given signum.
"""
# fails if no handler saved
self.assertRaises(ValueError, self.manager.pop, 1)
# fails if handler saved but already popped
self.manager._saved_handlers = {1: []}
self.assertRaises(ValueError, self.manager.pop, 1)
# fails if other handlers exist, but not one for our signum
self.manager._saved_handlers = {2: [self._test_handler1]}
self.assertRaises(ValueError, self.manager.pop, 1)
|
{
"content_hash": "62c712c8f8a535b570ae9281fb887d91",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 82,
"avg_line_length": 38.28358208955224,
"alnum_prop": 0.6456140350877193,
"repo_name": "SmartElect/SmartElect",
"id": "21a9d4b8b7cbb0fec2e0c9de1fc144c18e734c75",
"size": "2565",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bulk_sms/tests/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "43928"
},
{
"name": "HTML",
"bytes": "175822"
},
{
"name": "JavaScript",
"bytes": "475284"
},
{
"name": "Python",
"bytes": "1848271"
},
{
"name": "Shell",
"bytes": "1834"
}
],
"symlink_target": ""
}
|
from core import ContextDependent, Drawable
class TextBlock(ContextDependent, Drawable):
""" For drawing text
"""
def __init__(self, string):
# initialize parent constructors
for parent in self.__bases__:
parent.__init__()
self.contents = []
self.contents.append(string)
self.size = 14
self._set_bbox()
self.font = "sans-serif"
def draw(self, ctx):
pass
def _set_bbox(self):
"""resets the bounding box based on string contents
"""
pass
|
{
"content_hash": "b47ab8c6d8e743a91721b2e85c48f0a1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 59,
"avg_line_length": 21.653846153846153,
"alnum_prop": 0.5612788632326821,
"repo_name": "bengolder/sketcher",
"id": "e8aa036b569b545c375ae7c4673d7d6f11bbdf11",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3034"
}
],
"symlink_target": ""
}
|
import os
import sys
import hashlib
import unittest
from time import sleep
import system_test
from system_test import TestCase, Qdrouterd, QdManager, Process
from system_test import curl_available, TIMEOUT, skip_test_in_ci
from subprocess import PIPE
h2hyper_installed = True
try:
import h2.connection # noqa F401: imported but unused
except ImportError:
h2hyper_installed = False
def python_37_available():
if sys.version_info >= (3, 7):
return True
def quart_available():
"""
Checks if quart version is greater than 0.13
"""
popen_args = ['quart', '--version']
try:
process = Process(popen_args,
name='quart_check',
stdout=PIPE,
expect=None,
universal_newlines=True)
out = process.communicate()[0]
parts = out.split(".")
major_version = parts[0]
if int(major_version[-1]) > 0 or int(parts[1]) >= 13:
return True
return False
except Exception as e:
print(e)
print("quart_not_available")
return False
def skip_test():
if python_37_available() and quart_available() and curl_available():
return False
return True
def skip_h2_test():
if python_37_available() and h2hyper_installed and curl_available():
return False
return True
def get_digest(file_path):
h = hashlib.sha256()
with open(file_path, 'rb') as file:
while True:
# Reading is buffered, so we can read smaller chunks.
chunk = file.read(h.block_size)
if not chunk:
break
h.update(chunk)
return h.hexdigest()
def image_file(name):
return os.path.join(system_test.DIR, 'images', name)
class Http2TestBase(TestCase):
def run_curl(self, address, args=None, input=None, timeout=TIMEOUT):
"""
Run the curl command using the HTTP/2 protocol
"""
local_args = [str(address), "--http2-prior-knowledge"]
if args:
local_args += args
status, out, err = system_test.run_curl(local_args, input=input,
timeout=timeout)
if status != 0:
print("CURL ERROR (%s): %s %s" % (status, out, err), flush=True)
assert status == 0
return out
class CommonHttp2Tests:
"""
Common Base class containing all tests. These tests are run by all
topologies of routers.
"""
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
# Tests the HTTP2 head request
def test_head_request(self):
# Run curl 127.0.0.1:port --http2-prior-knowledge --head
address = self.router_qdra.http_addresses[0]
out = self.run_curl(address, args=["--head"])
self.assertIn('HTTP/2 200', out)
self.assertIn('server: hypercorn-h2', out)
self.assertIn('content-type: text/html; charset=utf-8', out)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_get_request(self):
# Run curl 127.0.0.1:port --http2-prior-knowledge
address = self.router_qdra.http_addresses[0]
out = self.run_curl(address)
i = 0
ret_string = ""
while (i < 1000):
ret_string += str(i) + ","
i += 1
self.assertIn(ret_string, out)
# @unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
# def test_large_get_request(self):
# Tests a large get request. Response is more than 50k which means it
# will span many qd_http2_buffer_t objects.
# Run curl 127.0.0.1:port/largeget --http2-prior-knowledge
# address = self.router_qdra.http_addresses[0] + "/largeget"
# out = self.run_curl(address)
# self.assertIn("49996,49997,49998,49999", out)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_post_request(self):
# curl -d "fname=John&lname=Doe" -X POST 127.0.0.1:9000/myinfo --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/myinfo"
out = self.run_curl(address, args=['-d', 'fname=John&lname=Doe', '-X', 'POST'])
self.assertIn('Success! Your first name is John, last name is Doe', out)
skip_reason = 'Test skipped on certain Travis environments'
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
@unittest.skipIf(skip_test_in_ci('QPID_SYSTEM_TEST_SKIP_HTTP2_LARGE_IMAGE_UPLOAD_TEST'), skip_reason)
def test_post_upload_large_image_jpg(self):
# curl -X POST -H "Content-Type: multipart/form-data" -F "data=@/home/gmurthy/opensource/test.jpg"
# http://127.0.0.1:9000/upload --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/upload"
out = self.run_curl(address, args=['-X', 'POST', '-H', 'Content-Type: multipart/form-data',
'-F', 'data=@' + image_file('test.jpg')])
self.assertIn('Success', out)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_delete_request(self):
# curl -X DELETE "http://127.0.0.1:9000/myinfo/delete/22122" -H "accept: application/json" --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/myinfo/delete/22122"
out = self.run_curl(address, args=['-X', 'DELETE'])
self.assertIn('{"fname": "John", "lname": "Doe", "id": "22122"}', out)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_put_request(self):
# curl -d "fname=John&lname=Doe" -X PUT 127.0.0.1:9000/myinfo --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/myinfo"
out = self.run_curl(address, args=['-d', 'fname=John&lname=Doe', '-X', 'PUT'])
self.assertIn('Success! Your first name is John, last name is Doe', out)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_patch_request(self):
# curl -d "fname=John&lname=Doe" -X PATCH 127.0.0.1:9000/myinfo --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/patch"
out = self.run_curl(address, args=['--data', '{\"op\":\"add\",\"path\":\"/user\",\"value\":\"jane\"}', '-X', 'PATCH'])
self.assertIn('"op":"add"', out)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_404(self):
# Run curl 127.0.0.1:port/unavailable --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/unavailable"
out = self.run_curl(address=address)
self.assertIn('404 Not Found', out)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_500(self):
# Run curl 127.0.0.1:port/test/500 --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/test/500"
out = self.run_curl(address)
self.assertIn('500 Internal Server Error', out)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_get_image_png(self):
# Run curl 127.0.0.1:port --output images/balanced-routing.png --http2-prior-knowledge
image_file_name = '/balanced-routing.png'
address = self.router_qdra.http_addresses[0] + "/images" + image_file_name
self.run_curl(address, args=['--output', self.router_qdra.outdir + image_file_name])
digest_of_server_file = get_digest(image_file(image_file_name[1:]))
digest_of_response_file = get_digest(self.router_qdra.outdir + image_file_name)
self.assertEqual(digest_of_server_file, digest_of_response_file)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_get_image_jpg(self):
# Run curl 127.0.0.1:port --output images/apache.jpg --http2-prior-knowledge
image_file_name = '/apache.jpg'
address = self.router_qdra.http_addresses[0] + "/images" + image_file_name
self.run_curl(address, args=['--output', self.router_qdra.outdir + image_file_name])
digest_of_server_file = get_digest(image_file(image_file(image_file_name[1:])))
digest_of_response_file = get_digest(self.router_qdra.outdir + image_file_name)
self.assertEqual(digest_of_server_file, digest_of_response_file)
def check_connector_delete(self, client_addr, server_addr):
# Run curl 127.0.0.1:port --http2-prior-knowledge
# We are first making sure that the http request goes thru successfully.
out = self.run_curl(client_addr)
# Run a qdmanage query on connections to see how many qdr_connections are
# there on the egress router
qd_manager = QdManager(self, address=server_addr)
connections = qd_manager.query('org.apache.qpid.dispatch.connection')
self.assertGreaterEqual(len(connections), 2)
server_conn_found = False
for conn in connections:
if os.environ['SERVER_LISTEN_PORT'] in conn['name']:
server_conn_found = True
break
self.assertTrue(server_conn_found)
# Run a qdmanage DELETE on the httpConnector
http_connectors = qd_manager.query('org.apache.qpid.dispatch.httpConnector')
self.assertEqual(len(http_connectors), 1)
# Delete the httpConnector
qd_manager.delete("org.apache.qpid.dispatch.httpConnector", name=self.connector_name)
# Make sure the connector is gone
http_connectors = qd_manager.query('org.apache.qpid.dispatch.httpConnector')
self.assertEqual(len(http_connectors), 0)
# Deleting the connector must have taken out the connection to the server.
connections = qd_manager.query('org.apache.qpid.dispatch.connection')
http_server_conn_found = False
for conn in connections:
if os.environ['SERVER_LISTEN_PORT'] in conn['name']:
server_conn_found = True
break
self.assertFalse(http_server_conn_found)
sleep(2)
# Now, run a curl client GET request with a timeout
request_timed_out = False
try:
out = self.run_curl(client_addr, timeout=5)
print(out)
except Exception as e:
request_timed_out = True
self.assertTrue(request_timed_out)
# Add back the httpConnector
# qdmanage CREATE type=httpConnector address=examples.com host=127.0.0.1 port=80 protocolVersion=HTTP2
create_result = qd_manager.create("org.apache.qpid.dispatch.httpConnector", self.connector_props)
num_tries = 2
tries = 0
conn_present = False
while tries < num_tries:
connections = qd_manager.query('org.apache.qpid.dispatch.connection')
tries += 1
if (len(connections) < 2):
sleep(2)
else:
conn_present = True
self.assertTrue(conn_present)
out = self.run_curl(client_addr)
ret_string = ""
i = 0
while (i < 1000):
ret_string += str(i) + ","
i += 1
self.assertIn(ret_string, out)
class Http2TestOneStandaloneRouter(Http2TestBase, CommonHttp2Tests):
@classmethod
def setUpClass(cls):
super(Http2TestOneStandaloneRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
name = "http2-test-standalone-router"
cls.connector_name = 'connectorToBeDeleted'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('httpConnector', cls.connector_props)
])
cls.router_qdra = cls.tester.qdrouterd(name, config, wait=True)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_zzz_http_connector_delete(self):
self.check_connector_delete(client_addr=self.router_qdra.http_addresses[0],
server_addr=self.router_qdra.addresses[0])
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_000_stats(self):
# Run curl 127.0.0.1:port --http2-prior-knowledge
address = self.router_qdra.http_addresses[0]
qd_manager = QdManager(self, address=self.router_qdra.addresses[0])
# First request
out = self.run_curl(address)
# Second request
address = self.router_qdra.http_addresses[0] + "/myinfo"
out = self.run_curl(address, args=['-d', 'fname=Mickey&lname=Mouse', '-X', 'POST'])
self.assertIn('Success! Your first name is Mickey, last name is Mouse', out)
stats = qd_manager.query('org.apache.qpid.dispatch.httpRequestInfo')
self.assertEqual(len(stats), 2)
# Give time for the core thread to augment the stats.
i = 0
while i < 3:
if not stats or stats[0].get('requests') < 2:
i += 1
sleep(1)
stats = qd_manager.query('org.apache.qpid.dispatch.httpRequestInfo')
else:
break
for s in stats:
self.assertEqual(s.get('requests'), 2)
self.assertEqual(s.get('details').get('GET:200'), 1)
self.assertEqual(s.get('details').get('POST:200'), 1)
if stats[0].get('direction') == 'out':
self.assertEqual(stats[1].get('direction'), 'in')
self.assertEqual(stats[0].get('bytesOut'), 24)
self.assertEqual(stats[0].get('bytesIn'), 3944)
self.assertEqual(stats[1].get('bytesOut'), 3944)
self.assertEqual(stats[1].get('bytesIn'), 24)
else:
self.assertEqual(stats[0].get('direction'), 'in')
self.assertEqual(stats[1].get('direction'), 'out')
self.assertEqual(stats[0].get('bytesOut'), 3944)
self.assertEqual(stats[0].get('bytesIn'), 24)
self.assertEqual(stats[1].get('bytesOut'), 24)
self.assertEqual(stats[1].get('bytesIn'), 3944)
class Http2TestOneEdgeRouter(Http2TestBase, CommonHttp2Tests):
@classmethod
def setUpClass(cls):
super(Http2TestOneEdgeRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
name = "http2-test-router"
cls.connector_name = 'connectorToBeDeleted'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
config = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('httpConnector', cls.connector_props)
])
cls.router_qdra = cls.tester.qdrouterd(name, config, wait=True)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_zzz_http_connector_delete(self):
self.check_connector_delete(client_addr=self.router_qdra.http_addresses[0],
server_addr=self.router_qdra.addresses[0])
class Http2TestOneInteriorRouter(Http2TestBase, CommonHttp2Tests):
@classmethod
def setUpClass(cls):
super(Http2TestOneInteriorRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
name = "http2-test-router"
cls.connector_name = 'connectorToBeDeleted'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
config = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('httpConnector', cls.connector_props)
])
cls.router_qdra = cls.tester.qdrouterd(name, config, wait=True)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_zzz_http_connector_delete(self):
self.check_connector_delete(client_addr=self.router_qdra.http_addresses[0],
server_addr=self.router_qdra.addresses[0])
class Http2TestTwoRouter(Http2TestBase, CommonHttp2Tests):
@classmethod
def setUpClass(cls):
super(Http2TestTwoRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
name = "http2-test-router"
inter_router_port = cls.tester.get_port()
config_qdra = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('listener', {'role': 'inter-router', 'port': inter_router_port})
])
cls.connector_name = 'connectorToBeDeleted'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
config_qdrb = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.B'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpConnector', cls.connector_props),
('connector', {'name': 'connectorToA', 'role': 'inter-router',
'port': inter_router_port,
'verifyHostname': 'no'})
])
cls.router_qdra = cls.tester.qdrouterd(name, config_qdra, wait=True)
cls.router_qdrb = cls.tester.qdrouterd(name, config_qdrb, wait=True)
cls.router_qdra.wait_router_connected('QDR.B')
cls.router_qdrb.wait_router_connected('QDR.A')
sleep(2)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_000_stats(self):
# Run curl 127.0.0.1:port --http2-prior-knowledge
address = self.router_qdra.http_addresses[0]
qd_manager_a = QdManager(self, address=self.router_qdra.addresses[0])
stats_a = qd_manager_a.query('org.apache.qpid.dispatch.httpRequestInfo')
# First request
self.run_curl(address)
address = self.router_qdra.http_addresses[0] + "/myinfo"
# Second request
out = self.run_curl(address, args=['-d', 'fname=Mickey&lname=Mouse', '-X', 'POST'])
self.assertIn('Success! Your first name is Mickey, last name is Mouse', out)
# Give time for the core thread to augment the stats.
i = 0
while i < 3:
if not stats_a or stats_a[0].get('requests') < 2:
sleep(1)
i += 1
stats_a = qd_manager_a.query('org.apache.qpid.dispatch.httpRequestInfo')
else:
break
self.assertEqual(len(stats_a), 1)
self.assertEqual(stats_a[0].get('requests'), 2)
self.assertEqual(stats_a[0].get('direction'), 'in')
self.assertEqual(stats_a[0].get('bytesOut'), 3944)
self.assertEqual(stats_a[0].get('bytesIn'), 24)
qd_manager_b = QdManager(self, address=self.router_qdrb.addresses[0])
stats_b = qd_manager_b.query('org.apache.qpid.dispatch.httpRequestInfo')
self.assertEqual(len(stats_b), 1)
i = 0
while i < 3:
s = stats_b[0]
if not stats_b or stats_b[0].get('requests') < 2:
i += 1
sleep(1)
stats_b = qd_manager_b.query('org.apache.qpid.dispatch.httpRequestInfo')
else:
break
self.assertEqual(stats_b[0].get('requests'), 2)
self.assertEqual(stats_b[0].get('direction'), 'out')
self.assertEqual(stats_b[0].get('bytesOut'), 24)
self.assertEqual(stats_b[0].get('bytesIn'), 3944)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_zzz_http_connector_delete(self):
self.check_connector_delete(client_addr=self.router_qdra.http_addresses[0],
server_addr=self.router_qdrb.addresses[0])
class Http2TestEdgeInteriorRouter(Http2TestBase, CommonHttp2Tests):
"""
The interior router connects to the HTTP2 server and the curl client
connects to the edge router.
"""
@classmethod
def setUpClass(cls):
super(Http2TestEdgeInteriorRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
inter_router_port = cls.tester.get_port()
config_edgea = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'EDGE.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('connector', {'name': 'connectorToA', 'role': 'edge',
'port': inter_router_port,
'verifyHostname': 'no'})
])
config_qdrb = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('listener', {'role': 'edge', 'port': inter_router_port}),
('httpConnector',
{'port': os.getenv('SERVER_LISTEN_PORT'), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'})
])
cls.router_qdrb = cls.tester.qdrouterd("interior-router", config_qdrb, wait=True)
cls.router_qdra = cls.tester.qdrouterd("edge-router", config_edgea)
sleep(3)
class Http2TestInteriorEdgeRouter(Http2TestBase, CommonHttp2Tests):
"""
The edge router connects to the HTTP2 server and the curl client
connects to the interior router.
"""
@classmethod
def setUpClass(cls):
super(Http2TestInteriorEdgeRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
inter_router_port = cls.tester.get_port()
config_edge = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'EDGE.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpConnector',
{'port': os.getenv('SERVER_LISTEN_PORT'), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('connector', {'name': 'connectorToA', 'role': 'edge',
'port': inter_router_port,
'verifyHostname': 'no'})
])
config_qdra = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('listener', {'role': 'edge', 'port': inter_router_port}),
('httpListener',
{'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
])
cls.router_qdra = cls.tester.qdrouterd("interior-router", config_qdra, wait=True)
cls.router_qdrb = cls.tester.qdrouterd("edge-router", config_edge)
sleep(3)
class Http2TestEdgeToEdgeViaInteriorRouter(Http2TestBase, CommonHttp2Tests):
"""
The edge router connects to the HTTP2 server and the curl client
connects to another edge router. The two edge routers are connected
via an interior router.
"""
@classmethod
def setUpClass(cls):
super(Http2TestEdgeToEdgeViaInteriorRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
cls.connector_name = 'connectorToBeDeleted'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
inter_router_port = cls.tester.get_port()
config_edge_b = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'EDGE.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpConnector', cls.connector_props),
('connector', {'name': 'connectorToA', 'role': 'edge',
'port': inter_router_port,
'verifyHostname': 'no'})
])
config_qdra = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('listener', {'role': 'edge', 'port': inter_router_port}),
])
config_edge_a = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'EDGE.B'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal',
'host': '0.0.0.0'}),
('httpListener',
{'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('connector', {'name': 'connectorToA', 'role': 'edge',
'port': inter_router_port,
'verifyHostname': 'no'})
])
cls.interior_qdr = cls.tester.qdrouterd("interior-router", config_qdra,
wait=True)
cls.router_qdra = cls.tester.qdrouterd("edge-router-a", config_edge_a)
cls.router_qdrb = cls.tester.qdrouterd("edge-router-b", config_edge_b)
sleep(5)
@unittest.skipIf(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_zzz_http_connector_delete(self):
self.check_connector_delete(client_addr=self.router_qdra.http_addresses[0],
server_addr=self.router_qdrb.addresses[0])
class Http2TestGoAway(Http2TestBase):
@classmethod
def setUpClass(cls):
super(Http2TestGoAway, cls).setUpClass()
if skip_h2_test():
return
cls.http2_server_name = "hyperh2_server"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="hyperh2_server.py")
name = "http2-test-router"
cls.connector_name = 'connectorToBeDeleted'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('httpConnector', cls.connector_props)
])
cls.router_qdra = cls.tester.qdrouterd(name, config, wait=True)
@unittest.skipIf(skip_h2_test(),
"Python 3.7 or greater, hyper-h2 and curl needed to run hyperhttp2 tests")
def test_goaway(self):
# Executes a request against the router at the /goaway_test_1 URL
# The router in turn forwards the request to the http2 server which
# responds with a GOAWAY frame. The router propagates this
# GOAWAY frame to the client and issues a HTTP 503 to the client
address = self.router_qdra.http_addresses[0] + "/goaway_test_1"
out = self.run_curl(address, args=["-i"])
self.assertIn("HTTP/2 503", out)
class Http2Q2OneRouterTest(Http2TestBase):
@classmethod
def setUpClass(cls):
super(Http2Q2OneRouterTest, cls).setUpClass()
if skip_h2_test():
return
cls.http2_server_name = "http2_slow_q2_server"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_slow_q2_server.py")
name = "http2-test-router"
cls.connector_name = 'connectorToServer'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('httpConnector', cls.connector_props)
])
cls.router_qdra = cls.tester.qdrouterd(name, config, wait=True)
@unittest.skipIf(skip_h2_test(),
"Python 3.7 or greater, hyper-h2 and curl needed to run hyperhttp2 tests")
def test_q2_block_unblock(self):
# curl -X POST -H "Content-Type: multipart/form-data" -F "data=@/home/gmurthy/opensource/test.jpg"
# http://127.0.0.1:9000/upload --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/upload"
out = self.run_curl(address, args=['-X', 'POST', '-H', 'Content-Type: multipart/form-data',
'-F', 'data=@' + image_file('test.jpg')])
self.assertIn('Success', out)
num_blocked = 0
num_unblocked = 0
blocked = "q2 is blocked"
unblocked = "q2 is unblocked"
with open(self.router_qdra.logfile_path, 'r') as router_log:
log_lines = router_log.read().split("\n")
for log_line in log_lines:
if unblocked in log_line:
num_unblocked += 1
elif blocked in log_line:
num_blocked += 1
self.assertGreater(num_blocked, 0)
self.assertGreater(num_unblocked, 0)
class Http2Q2TwoRouterTest(Http2TestBase):
@classmethod
def setUpClass(cls):
super(Http2Q2TwoRouterTest, cls).setUpClass()
if skip_h2_test():
return
cls.http2_server_name = "http2_server"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_slow_q2_server.py")
qdr_a = "QDR.A"
inter_router_port = cls.tester.get_port()
config_qdra = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('connector', {'name': 'connectorToB', 'role': 'inter-router',
'port': inter_router_port,
'verifyHostname': 'no'})
])
qdr_b = "QDR.B"
cls.connector_name = 'serverConnector'
cls.http_connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
config_qdrb = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.B'}),
('httpConnector', cls.http_connector_props),
('listener', {'role': 'inter-router', 'maxSessionFrames': '10', 'port': inter_router_port})
])
cls.router_qdrb = cls.tester.qdrouterd(qdr_b, config_qdrb, wait=True)
cls.router_qdra = cls.tester.qdrouterd(qdr_a, config_qdra, wait=True)
cls.router_qdra.wait_router_connected('QDR.B')
@unittest.skipIf(skip_h2_test(),
"Python 3.7 or greater, hyper-h2 and curl needed to run hyperhttp2 tests")
def test_q2_block_unblock(self):
# curl -X POST -H "Content-Type: multipart/form-data" -F "data=@/home/gmurthy/opensource/test.jpg"
# http://127.0.0.1:9000/upload --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/upload"
out = self.run_curl(address, args=['-X', 'POST', '-H', 'Content-Type: multipart/form-data',
'-F', 'data=@' + image_file('test.jpg')])
self.assertIn('Success', out)
num_blocked = 0
num_unblocked = 0
blocked = "q2 is blocked"
unblocked = "q2 is unblocked"
with open(self.router_qdra.logfile_path, 'r') as router_log:
log_lines = router_log.read().split("\n")
for log_line in log_lines:
if unblocked in log_line:
num_unblocked += 1
elif blocked in log_line:
num_blocked += 1
self.assertGreater(num_blocked, 0)
self.assertGreater(num_unblocked, 0)
|
{
"content_hash": "6a8f114d08648744ea75df3031d383c0",
"timestamp": "",
"source": "github",
"line_count": 860,
"max_line_length": 126,
"avg_line_length": 45.753488372093024,
"alnum_prop": 0.5613245908305378,
"repo_name": "mgoulish/qpid-dispatch",
"id": "a588fa47df425d31fe3848604f25f0d3de38159e",
"size": "40138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/system_tests_http2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2831990"
},
{
"name": "C++",
"bytes": "354723"
},
{
"name": "CMake",
"bytes": "57520"
},
{
"name": "CSS",
"bytes": "49129"
},
{
"name": "Dockerfile",
"bytes": "3323"
},
{
"name": "HTML",
"bytes": "2320"
},
{
"name": "JavaScript",
"bytes": "733506"
},
{
"name": "Python",
"bytes": "2736603"
},
{
"name": "Shell",
"bytes": "34107"
}
],
"symlink_target": ""
}
|
import argparse
import argparse_parent_with_group
parser = argparse.ArgumentParser(
parents=[argparse_parent_with_group.parser],
)
parser.add_argument('--local-arg',
action="store_true",
default=False)
print(parser.parse_args())
|
{
"content_hash": "89e06980b9d55683e19d5e1d312afea1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 48,
"avg_line_length": 23,
"alnum_prop": 0.6485507246376812,
"repo_name": "jasonwee/asus-rt-n14uhp-mrtg",
"id": "f44d6a899ad8833da38cb9e847bf405ad9d46cae",
"size": "276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lesson_application_building_blocks/argparse_uses_parent_with_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45876"
},
{
"name": "HTML",
"bytes": "107072"
},
{
"name": "JavaScript",
"bytes": "161335"
},
{
"name": "Python",
"bytes": "6923750"
},
{
"name": "Shell",
"bytes": "7616"
}
],
"symlink_target": ""
}
|
from textx.metamodel import metamodel_from_file
from textx.export import metamodel_export, model_export
from textx.exceptions import TextXSyntaxError
from pythonmodels import MLayoutGraph, MLayoutSubgraphs, MExpression, MTerm, MFactor
import os
import sys
class Interpreter():
def __init__(self,):
metamodel_dir = sys.path[0]
metamodel_path = os.path.join(metamodel_dir, "layout.tx")
metamodel = metamodel_from_file(metamodel_path)
self.metamodel = metamodel
def execute(self, model_str):
try:
model = self.metamodel.model_from_str(model_str)
except TextXSyntaxError as e:
print(e.message)
return MLayoutGraph(exception = e.message)
if model.__class__.__name__ == 'LayoutGraph':
layoutGraph = Interpreter.execute_one(model.layoutType, 'graph')
print('graph')
return layoutGraph
else:
print('subgraphs')
subgraphs = []
for layoutSubgraph in model.layoutSubgraphs:
subgraph = layoutSubgraph.subgraph
if subgraph == None:
graph = 'others'
else:
vertices = ''
content = True
for i,vertex in enumerate(subgraph.vertices):
if vertex.index:
vertices = vertices + str(vertex.index)
content = False
else:
vertices = vertices + vertex.content
if i < len(subgraph.vertices) - 1:
vertices = vertices + ','
graph = vertices
layout_type = layoutSubgraph.layoutType
layoutOneSubgraph = Interpreter.execute_one(layout_type, graph)
layoutOneSubgraph.attr_graphContent = content
subgraphs.append(layoutOneSubgraph)
return MLayoutSubgraphs(subgraphs)
return 'executed'
@staticmethod
def execute_one(layout, graph):
if layout.__class__.__name__ == "LayoutStyle":
layout_type = "style"
elif layout.__class__.__name__ == "LayoutAlgorithm":
layout_type = "algorithm"
elif layout.__class__.__name__ == "AestheticCriteria":
layout_type = "criteria"
else:
layout_type = "mathCriteria"
print(layout_type)
if layout_type == 'algorithm':
#a map that will contain all information about the algorithm
algorithmProperties = {}
algorithm = layout.algorithm
#the algorithm could be of numerous classes
for attr, value in algorithm.__dict__.iteritems():
if not (attr.startswith('_') or attr == 'parent'):
if attr == 'properties':
for property in value:
for propertyAttr, propertyValue in property.__dict__.iteritems():
if not (propertyAttr.startswith('_') or propertyAttr == 'parent'):
algorithmProperties[propertyAttr] = propertyValue
else:
algorithmProperties[attr] = value
layoutGraph = MLayoutGraph(graph = graph, type = layout_type, algorithm = algorithmProperties)
return layoutGraph
elif layout_type == 'style':
style = layout.style
layoutGraph = MLayoutGraph(graph=graph, type=layout_type, style=style)
return layoutGraph
elif layout_type == 'criteria':
criteriaList = [];
criteria = layout.aestheticCriteria
for criterion in criteria:
criterionProperties = {}
for attr, value in criterion.__dict__.iteritems():
if not (attr.startswith('_') or attr == 'parent'):
criterionProperties[attr] = value
criteriaList.append(criterionProperties)
layoutGraph = MLayoutGraph(graph=graph, type=layout_type, aestheticCriteria=criteriaList)
return layoutGraph
elif layout_type == 'mathCriteria':
m_expression = Interpreter.form_expression(layout.expression)
layout_graph = MLayoutGraph(graph=graph, type=layout_type, criteriaExpression=m_expression )
print(layout_graph.getCriteriaExpression())
return layout_graph
@staticmethod
def form_expression(expression):
terms = [expression.expressionStartTerm]
for orTerm in expression.expressionTerms:
terms.append(orTerm.term)
m_terms = []
for term in terms:
factors = [term.termStartFactor]
for andFactor in term.termFactors:
factors.append(andFactor.factor)
m_factors = []
for factor in factors:
criterionProperties = None
m_expression = None
if factor.criterion is not None:
criterionProperties = {}
for attr, value in factor.criterion.__dict__.iteritems():
if not (attr.startswith('_') or attr == 'parent'):
criterionProperties[attr] = value
if factor.expression is not None:
m_expression = Interpreter.form_expression(factor.expression)
m_factor = MFactor(factor.negative, criterionProperties, m_expression)
m_factors.append(m_factor)
m_term = MTerm(m_factors)
m_terms.append(m_term)
return MExpression(m_terms)
|
{
"content_hash": "52d85de95ba112caf27096dfdf85117a",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 114,
"avg_line_length": 42.306666666666665,
"alnum_prop": 0.502363693665301,
"repo_name": "renatav/GraphDrawing",
"id": "67565b055ea7eba40522ec50d8d5c3dcbe639a0b",
"size": "6346",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "GraphLayoutDSL/target/classes/interpreter/Interpreter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "982631"
},
{
"name": "Python",
"bytes": "326667"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import extras.models
class Migration(migrations.Migration):
dependencies = [
('extras', '0006_add_imageattachments'),
]
operations = [
migrations.AlterField(
model_name='customfield',
name='default',
field=models.CharField(blank=True, help_text='Default value for the field. Use "true" or "false" for booleans. N/A for selection fields.', max_length=100),
),
migrations.AlterField(
model_name='customfield',
name='is_filterable',
field=models.BooleanField(default=True, help_text='This field can be used to filter objects.'),
),
migrations.AlterField(
model_name='customfield',
name='label',
field=models.CharField(blank=True, help_text="Name of the field as displayed to users (if not provided, the field's name will be used)", max_length=50),
),
migrations.AlterField(
model_name='customfield',
name='obj_type',
field=models.ManyToManyField(help_text='The object(s) to which this field applies.', related_name='custom_fields', to='contenttypes.ContentType', verbose_name='Object(s)'),
),
migrations.AlterField(
model_name='customfield',
name='required',
field=models.BooleanField(default=False, help_text='Determines whether this field is required when creating new objects or editing an existing object.'),
),
migrations.AlterField(
model_name='customfield',
name='type',
field=models.PositiveSmallIntegerField(choices=[(100, 'Text'), (200, 'Integer'), (300, 'Boolean (true/false)'), (400, 'Date'), (500, 'URL'), (600, 'Selection')], default=100),
),
migrations.AlterField(
model_name='customfield',
name='weight',
field=models.PositiveSmallIntegerField(default=100, help_text='Fields with higher weights appear lower in a form'),
),
migrations.AlterField(
model_name='customfieldchoice',
name='weight',
field=models.PositiveSmallIntegerField(default=100, help_text='Higher weights appear lower in the list'),
),
migrations.AlterField(
model_name='graph',
name='link',
field=models.URLField(blank=True, verbose_name='Link URL'),
),
migrations.AlterField(
model_name='graph',
name='name',
field=models.CharField(max_length=100, verbose_name='Name'),
),
migrations.AlterField(
model_name='graph',
name='source',
field=models.CharField(max_length=500, verbose_name='Source URL'),
),
migrations.AlterField(
model_name='graph',
name='type',
field=models.PositiveSmallIntegerField(choices=[(100, 'Interface'), (200, 'Provider'), (300, 'Site')]),
),
migrations.AlterField(
model_name='imageattachment',
name='image',
field=models.ImageField(height_field='image_height', upload_to=extras.models.image_upload, width_field='image_width'),
),
migrations.AlterField(
model_name='topologymap',
name='device_patterns',
field=models.TextField(help_text='Identify devices to include in the diagram using regular expressions, one per line. Each line will result in a new tier of the drawing. Separate multiple regexes within a line using semicolons. Devices will be rendered in the order they are defined.'),
),
migrations.AlterField(
model_name='useraction',
name='action',
field=models.PositiveSmallIntegerField(choices=[(1, 'created'), (7, 'bulk created'), (2, 'imported'), (3, 'modified'), (4, 'bulk edited'), (5, 'deleted'), (6, 'bulk deleted')]),
),
]
|
{
"content_hash": "918cbafcd84701e39208008c5c977bf2",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 298,
"avg_line_length": 45.325842696629216,
"alnum_prop": 0.601883986117997,
"repo_name": "snazy2000/netbox",
"id": "c9a624510188026199887927911c76f4240cfc56",
"size": "4105",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/extras/migrations/0007_unicode_literals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167999"
},
{
"name": "HTML",
"bytes": "370021"
},
{
"name": "JavaScript",
"bytes": "13570"
},
{
"name": "Python",
"bytes": "1018536"
},
{
"name": "Shell",
"bytes": "3389"
}
],
"symlink_target": ""
}
|
import contextlib
import cProfile
import datetime
import io
import os
import pstats
import signal
PYSPY_OUTPUT = os.environ.get("PYSPY_OUTPUT", "/files/pyspy/")
@contextlib.contextmanager
def pyspy():
"""
This decorator provide deterministic profiling. It generate and save flame graph to file. It uses``pyspy``
internally.
Running py-spy inside of a docker container will also usually bring up a permissions denied error
even when running as root.
This error is caused by docker restricting the process_vm_readv system call we are using. This can be
overridden by setting --cap-add SYS_PTRACE when starting the docker container.
Alternatively you can edit the docker-compose yaml file
.. code-block:: yaml
your_service:
cap_add:
- SYS_PTRACE
In the case of Airflow Breeze, you should modify the ``tests/utils/perf/perf_kit/python.py`` file.
"""
pid = str(os.getpid())
suffix = datetime.datetime.now().isoformat()
filename = f"{PYSPY_OUTPUT}/flame-{suffix}-{pid}.html"
pyspy_pid = os.spawnlp(
os.P_NOWAIT, "sudo", "sudo", "py-spy", "record", "--idle", "-o", filename, "-p", pid
)
try:
yield
finally:
os.kill(pyspy_pid, signal.SIGINT)
print(f"Report saved to: {filename}")
@contextlib.contextmanager
def profiled(print_callers=False):
"""
This decorator provide deterministic profiling. It uses ``cProfile`` internally. It generates statistic
and print on the screen.
"""
profile = cProfile.Profile()
profile.enable()
try:
yield
finally:
profile.disable()
stat = io.StringIO()
pstatistics = pstats.Stats(profile, stream=stat).sort_stats("cumulative")
if print_callers:
pstatistics.print_callers()
else:
pstatistics.print_stats()
print(stat.getvalue())
if __name__ == "__main__":
def case():
"""
Load modules.
:return:
"""
import logging
import airflow
from airflow.dag_processing.processor import DagFileProcessor
log = logging.getLogger(__name__)
processor = DagFileProcessor(dag_ids=[], log=log)
dag_file = os.path.join(os.path.dirname(airflow.__file__), "example_dags", "example_complex.py")
processor.process_file(file_path=dag_file, callback_requests=[])
# Load modules
case()
# Example:
print("PySpy:")
with pyspy():
case()
# Example:
print("cProfile")
with profiled():
case()
|
{
"content_hash": "9ef5e88659db09f6399091b8ca3126e9",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 110,
"avg_line_length": 26.9375,
"alnum_prop": 0.6295436968290796,
"repo_name": "apache/incubator-airflow",
"id": "596f4f68330c928f015805b580ffda500a34b20d",
"size": "3371",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "tests/test_utils/perf/perf_kit/python.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
}
|
from .sub_resource import SubResource
class ApplicationGatewayBackendHttpSettings(SubResource):
"""Backend address pool settings of an application gateway.
:param id: Resource Identifier.
:type id: str
:param port: Port
:type port: int
:param protocol: Protocol. Possible values are: 'Http' and 'Https'.
Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2015_06_15.models.ApplicationGatewayProtocol
:param cookie_based_affinity: Cookie based affinity. Possible values are:
'Enabled' and 'Disabled'. Possible values include: 'Enabled', 'Disabled'
:type cookie_based_affinity: str or
~azure.mgmt.network.v2015_06_15.models.ApplicationGatewayCookieBasedAffinity
:param request_timeout: Request timeout in seconds. Application Gateway
will fail the request if response is not received within RequestTimeout.
Acceptable values are from 1 second to 86400 seconds.
:type request_timeout: int
:param probe: Probe resource of an application gateway.
:type probe: ~azure.mgmt.network.v2015_06_15.models.SubResource
:param provisioning_state: Gets or sets Provisioning state of the backend
http settings resource Updating/Deleting/Failed
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'cookie_based_affinity': {'key': 'properties.cookieBasedAffinity', 'type': 'str'},
'request_timeout': {'key': 'properties.requestTimeout', 'type': 'int'},
'probe': {'key': 'properties.probe', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayBackendHttpSettings, self).__init__(**kwargs)
self.port = kwargs.get('port', None)
self.protocol = kwargs.get('protocol', None)
self.cookie_based_affinity = kwargs.get('cookie_based_affinity', None)
self.request_timeout = kwargs.get('request_timeout', None)
self.probe = kwargs.get('probe', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
|
{
"content_hash": "9393c71ba09a502a9d20ad10d049bc1b",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 90,
"avg_line_length": 48.26315789473684,
"alnum_prop": 0.6659396583060705,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "dbc04149c72111f1a61cee8a1a6804aeb9300119",
"size": "3225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/application_gateway_backend_http_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='django-stats',
version='0.1',
description='An app for keeping stats about anything within your django project',
author='Chris Drackett',
author_email='chris@shelfworthy.com',
url = "https://github.com/shelfworthy/django-stats",
packages=find_packages(),
install_requires = [
'django-celery>=2.0.2',
'celery>=2.1.1'
],
classifiers = [
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",
]
)
|
{
"content_hash": "4c757fdfd88052dadd9f432e0d8c1973",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 85,
"avg_line_length": 30,
"alnum_prop": 0.6212121212121212,
"repo_name": "shelfworthy/django-stats",
"id": "b3888eff611133624c301644a0b8c0dc456da68d",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4667"
}
],
"symlink_target": ""
}
|
from model.contact import Contact
from random import randrange
def test_modify_contact_name(app):
old_contacts = app.contact.get_contact_list()
index = randrange(len(old_contacts))
contact = Contact(firstname="Test")
contact.id = old_contacts[index].id
if app.contact.count() == 0:
app.contact.create(Contact(firstname="New"))
app.contact.modify_contact_by_index(index, contact)
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts[index] = contact
#assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
|
{
"content_hash": "04e470f4745aea90643b06bc6ef3bd93",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 102,
"avg_line_length": 34.36842105263158,
"alnum_prop": 0.7029096477794793,
"repo_name": "volkodav1985/volkodavpython",
"id": "758062a96aae4a9af961de768ccc77a5d98e588a",
"size": "653",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test 2/modify_contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34352"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, Dict, Tuple, Any, cast
import functools
import numpy as np
import math
import types
import warnings
# trapezoid is a public function for scipy.integrate,
# even though it's actually a NumPy function.
from numpy import trapz as trapezoid
from scipy.special import roots_legendre
from scipy.special import gammaln
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'romb',
'trapezoid', 'trapz', 'simps', 'simpson',
'cumulative_trapezoid', 'cumtrapz', 'newton_cotes',
'AccuracyWarning']
# Make See Also linking for our local copy work properly
def _copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)"""
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
trapezoid = _copy_func(trapezoid)
if trapezoid.__doc__:
trapezoid.__doc__ = trapezoid.__doc__.replace(
'sum, cumsum', 'numpy.cumsum')
# Note: alias kept for backwards compatibility. Rename was done
# because trapz is a slur in colloquial English (see gh-12924).
def trapz(y, x=None, dx=1.0, axis=-1):
"""An alias of `trapezoid`.
`trapz` is kept for backwards compatibility. For new code, prefer
`trapezoid` instead.
"""
return trapezoid(y, x=x, dx=dx, axis=axis)
class AccuracyWarning(Warning):
pass
if TYPE_CHECKING:
# workaround for mypy function attributes see:
# https://github.com/python/mypy/issues/2087#issuecomment-462726600
from typing_extensions import Protocol
class CacheAttributes(Protocol):
cache: Dict[int, Tuple[Any, Any]]
else:
CacheAttributes = Callable
def cache_decorator(func: Callable) -> CacheAttributes:
return cast(CacheAttributes, func)
@cache_decorator
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simpson : integrators for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
Examples
--------
>>> from scipy import integrate
>>> import numpy as np
>>> f = lambda x: x**8
>>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
(0.1110884353741496, None)
>>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
(0.11111111111111102, None)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
(0.9999999771971152, None)
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
(1.000000000039565, None)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in range(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See Also
--------
romberg : adaptive Romberg quadrature
fixed_quad : fixed-order Gaussian quadrature
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romb : integrator for sampled data
simpson : integrator for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
Examples
--------
>>> from scipy import integrate
>>> import numpy as np
>>> f = lambda x: x**8
>>> integrate.quadrature(f, 0.0, 1.0)
(0.11111111111111106, 4.163336342344337e-17)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.quadrature(np.cos, 0.0, np.pi/2)
(0.9999999999999536, 3.9611425250996035e-11)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in range(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
# Note: alias kept for backwards compatibility. Rename was done
# because cumtrapz is a slur in colloquial English (see gh-12924).
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""An alias of `cumulative_trapezoid`.
`cumtrapz` is kept for backwards compatibility. For new code, prefer
`cumulative_trapezoid` instead.
"""
return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=initial)
def cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, insert this value at the beginning of the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
romb : integrators for sampled data
ode : ODE integrators
odeint : ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumulative_trapezoid(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
axis=axis)
return res
def _basic_simpson(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even-spaced Simpson's rule.
result = np.sum(y[slice0] + 4*y[slice1] + y[slice2], axis=axis)
result *= dx / 3.0
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = np.float64(h[sl0])
h1 = np.float64(h[sl1])
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = np.true_divide(h0, h1, out=np.zeros_like(h0), where=h1 != 0)
tmp = hsum/6.0 * (y[slice0] *
(2.0 - np.true_divide(1.0, h0divh1,
out=np.zeros_like(h0divh1),
where=h0divh1 != 0)) +
y[slice1] * (hsum *
np.true_divide(hsum, hprod,
out=np.zeros_like(hsum),
where=hprod != 0)) +
y[slice2] * (2.0 - h0divh1))
result = np.sum(tmp, axis=axis)
return result
# Note: alias kept for backwards compatibility. simps was renamed to simpson
# because the former is a slur in colloquial English (see gh-12924).
def simps(y, x=None, dx=1.0, axis=-1, even='avg'):
"""An alias of `simpson`.
`simps` is kept for backwards compatibility. For new code, prefer
`simpson` instead.
"""
return simpson(y, x=x, dx=dx, axis=axis, even=even)
def simpson(y, x=None, dx=1.0, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : float, optional
Spacing of integration points along axis of `x`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
romb : integrators for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
Examples
--------
>>> from scipy import integrate
>>> import numpy as np
>>> x = np.arange(0, 10)
>>> y = np.arange(0, 10)
>>> integrate.simpson(y, x)
40.5
>>> y = np.power(x, 3)
>>> integrate.simpson(y, x)
1642.5
>>> integrate.quad(lambda x: x**3, 0, 9)[0]
1640.25
>>> integrate.simpson(y, x, even='first')
1644.5
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simpson(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simpson(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simpson(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See Also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simpson : integrators for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import numpy as np
>>> x = np.arange(10, 14.25, 0.25)
>>> y = np.arange(3, 12)
>>> integrate.romb(y)
56.0
>>> y = np.sin(np.power(x, 2.5))
>>> integrate.romb(y)
-0.742561336672229
>>> integrate.romb(y, show=True)
Richardson Extrapolation Table for Romberg Integration
====================================================================
-0.81576
4.63862 6.45674
-1.10581 -3.02062 -3.65245
-2.57379 -3.06311 -3.06595 -3.05664
-1.34093 -0.92997 -0.78776 -0.75160 -0.74256
====================================================================
-0.742561336672229
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in range(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in range(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="\n")
for i in range(k+1):
for j in range(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to SciPy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in range(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in range(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e., whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simpson : Integrators for sampled data.
cumulative_trapezoid : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> import numpy as np
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in range(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in range(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Newton-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
r"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
+ B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
where :math:`\xi \in [x_0,x_N]`
and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Examples
--------
Compute the integral of sin(x) in [0, :math:`\pi`]:
>>> from scipy.integrate import newton_cotes
>>> import numpy as np
>>> def f(x):
... return np.sin(x)
>>> a = 0
>>> b = np.pi
>>> exact = 2
>>> for N in [2, 4, 6, 8, 10]:
... x = np.linspace(a, b, N + 1)
... an, B = newton_cotes(N, 1)
... dx = (b - a) / N
... quad = dx * np.sum(an * f(x))
... error = abs(quad - exact)
... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
...
2 2.094395102 9.43951e-02
4 1.998570732 1.42927e-03
6 2.000017814 1.78136e-05
8 1.999999835 1.64725e-07
10 2.000000001 1.14677e-09
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except Exception:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
|
{
"content_hash": "9f27f41d050bf85231f782a2d48fd8b2",
"timestamp": "",
"source": "github",
"line_count": 1037,
"max_line_length": 103,
"avg_line_length": 32.79267116682739,
"alnum_prop": 0.5758101511497971,
"repo_name": "perimosocordiae/scipy",
"id": "485356e6020c364a141394a78380aeeb13c7ff07",
"size": "34006",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scipy/integrate/_quadrature.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4620237"
},
{
"name": "C++",
"bytes": "959068"
},
{
"name": "Cython",
"bytes": "1059810"
},
{
"name": "Dockerfile",
"bytes": "16894"
},
{
"name": "Fortran",
"bytes": "5211680"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "143727"
},
{
"name": "Python",
"bytes": "15434780"
},
{
"name": "R",
"bytes": "3059"
},
{
"name": "Shell",
"bytes": "18009"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
"""
WSGI config for task_service project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "task_service.settings")
application = get_wsgi_application()
|
{
"content_hash": "cebd726b6eee16a3523a74c41bee3d19",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.0625,
"alnum_prop": 0.770573566084788,
"repo_name": "cognoma/task-service",
"id": "7fcb12de4e2efdf341171f924206ce85d64019a8",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "task_service/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "51559"
}
],
"symlink_target": ""
}
|
import os
from django.db import models
from tagging.models import Tag
from graphite.events.compat import ModelTaggedItemManager
if os.environ.get('READTHEDOCS'):
TagField = lambda *args, **kwargs: None
else:
from tagging.fields import TagField
class Event(models.Model):
when = models.DateTimeField()
what = models.CharField(max_length=255)
data = models.TextField(blank=True)
tags = TagField(default="")
def get_tags(self):
return Tag.objects.get_for_object(self)
def __str__(self):
return "%s: %s" % (self.when, self.what)
@staticmethod
def find_events(time_from=None, time_until=None, tags=None, set_operation=None):
if tags is not None:
if set_operation == 'union':
query = Event.tagged.with_any(tags)
elif set_operation == 'intersection':
query = Event.tagged.with_intersection(tags)
else:
query = Event.tagged.with_all(tags)
else:
query = Event.objects.all()
if time_from is not None:
query = query.filter(when__gte=time_from)
if time_until is not None:
query = query.filter(when__lte=time_until)
result = list(query.order_by("when"))
return result
def as_dict(self):
return dict(
when=self.when,
what=self.what,
data=self.data,
tags=self.tags.split(),
id=self.id,
)
# We use this rather than tagging.register() so that tags can be exposed
# in the admin UI
ModelTaggedItemManager().contribute_to_class(Event, 'tagged')
|
{
"content_hash": "ee54f0b2147b36c1b2fd797b911f7342",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 84,
"avg_line_length": 27.864406779661017,
"alnum_prop": 0.6058394160583942,
"repo_name": "cosm0s/graphite-web",
"id": "8f4fd905b05b22fe04b74a9b7b021a0a654e3b8b",
"size": "1644",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webapp/graphite/events/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "149965"
},
{
"name": "HTML",
"bytes": "21170"
},
{
"name": "JavaScript",
"bytes": "1679914"
},
{
"name": "Perl",
"bytes": "857"
},
{
"name": "Python",
"bytes": "753924"
},
{
"name": "Ruby",
"bytes": "1950"
},
{
"name": "Shell",
"bytes": "1113"
}
],
"symlink_target": ""
}
|
"""Module containing base test results classes."""
import functools
import sys
import threading
from lib.results import result_types # pylint: disable=import-error
class ResultType:
"""Class enumerating test types.
Wraps the results defined in //build/util/lib/results/.
"""
PASS = result_types.PASS
SKIP = result_types.SKIP
FAIL = result_types.FAIL
CRASH = result_types.CRASH
TIMEOUT = result_types.TIMEOUT
UNKNOWN = result_types.UNKNOWN
NOTRUN = result_types.NOTRUN
@staticmethod
def GetTypes():
"""Get a list of all test types."""
return [ResultType.PASS, ResultType.SKIP, ResultType.FAIL,
ResultType.CRASH, ResultType.TIMEOUT, ResultType.UNKNOWN,
ResultType.NOTRUN]
@functools.total_ordering
class BaseTestResult:
"""Base class for a single test result."""
def __init__(self, name, test_type, duration=0, log='', failure_reason=None):
"""Construct a BaseTestResult.
Args:
name: Name of the test which defines uniqueness.
test_type: Type of the test result as defined in ResultType.
duration: Time it took for the test to run in milliseconds.
log: An optional string listing any errors.
"""
assert name
assert test_type in ResultType.GetTypes()
self._name = name
self._test_type = test_type
self._duration = duration
self._log = log
self._failure_reason = failure_reason
self._links = {}
def __str__(self):
return self._name
def __repr__(self):
return self._name
def __eq__(self, other):
return self.GetName() == other.GetName()
def __lt__(self, other):
return self.GetName() == other.GetName()
def __hash__(self):
return hash(self._name)
def SetName(self, name):
"""Set the test name.
Because we're putting this into a set, this should only be used if moving
this test result into another set.
"""
self._name = name
def GetName(self):
"""Get the test name."""
return self._name
def SetType(self, test_type):
"""Set the test result type."""
assert test_type in ResultType.GetTypes()
self._test_type = test_type
def GetType(self):
"""Get the test result type."""
return self._test_type
def GetDuration(self):
"""Get the test duration."""
return self._duration
def SetLog(self, log):
"""Set the test log."""
self._log = log
def GetLog(self):
"""Get the test log."""
return self._log
def SetFailureReason(self, failure_reason):
"""Set the reason the test failed.
This should be the first failure the test encounters and exclude any stack
trace.
"""
self._failure_reason = failure_reason
def GetFailureReason(self):
"""Get the reason the test failed.
Returns None if the test did not fail or if the reason the test failed is
unknown.
"""
return self._failure_reason
def SetLink(self, name, link_url):
"""Set link with test result data."""
self._links[name] = link_url
def GetLinks(self):
"""Get dict containing links to test result data."""
return self._links
class TestRunResults:
"""Set of results for a test run."""
def __init__(self):
self._links = {}
self._results = set()
self._results_lock = threading.RLock()
def SetLink(self, name, link_url):
"""Add link with test run results data."""
self._links[name] = link_url
def GetLinks(self):
"""Get dict containing links to test run result data."""
return self._links
def GetLogs(self):
"""Get the string representation of all test logs."""
with self._results_lock:
s = []
for test_type in ResultType.GetTypes():
if test_type != ResultType.PASS:
for t in sorted(self._GetType(test_type)):
log = t.GetLog()
if log:
s.append('[%s] %s:' % (test_type, t))
s.append(log)
if sys.version_info.major == 2:
decoded = [u.decode(encoding='utf-8', errors='ignore') for u in s]
return '\n'.join(decoded)
return '\n'.join(s)
def GetGtestForm(self):
"""Get the gtest string representation of this object."""
with self._results_lock:
s = []
plural = lambda n, s, p: '%d %s' % (n, p if n != 1 else s)
tests = lambda n: plural(n, 'test', 'tests')
s.append('[==========] %s ran.' % (tests(len(self.GetAll()))))
s.append('[ PASSED ] %s.' % (tests(len(self.GetPass()))))
skipped = self.GetSkip()
if skipped:
s.append('[ SKIPPED ] Skipped %s, listed below:' % tests(len(skipped)))
for t in sorted(skipped):
s.append('[ SKIPPED ] %s' % str(t))
all_failures = self.GetFail().union(self.GetCrash(), self.GetTimeout(),
self.GetUnknown())
if all_failures:
s.append('[ FAILED ] %s, listed below:' % tests(len(all_failures)))
for t in sorted(self.GetFail()):
s.append('[ FAILED ] %s' % str(t))
for t in sorted(self.GetCrash()):
s.append('[ FAILED ] %s (CRASHED)' % str(t))
for t in sorted(self.GetTimeout()):
s.append('[ FAILED ] %s (TIMEOUT)' % str(t))
for t in sorted(self.GetUnknown()):
s.append('[ FAILED ] %s (UNKNOWN)' % str(t))
s.append('')
s.append(plural(len(all_failures), 'FAILED TEST', 'FAILED TESTS'))
return '\n'.join(s)
def GetShortForm(self):
"""Get the short string representation of this object."""
with self._results_lock:
s = []
s.append('ALL: %d' % len(self._results))
for test_type in ResultType.GetTypes():
s.append('%s: %d' % (test_type, len(self._GetType(test_type))))
return ''.join([x.ljust(15) for x in s])
def __str__(self):
return self.GetGtestForm()
def AddResult(self, result):
"""Add |result| to the set.
Args:
result: An instance of BaseTestResult.
"""
assert isinstance(result, BaseTestResult)
with self._results_lock:
self._results.discard(result)
self._results.add(result)
def AddResults(self, results):
"""Add |results| to the set.
Args:
results: An iterable of BaseTestResult objects.
"""
with self._results_lock:
for t in results:
self.AddResult(t)
def AddTestRunResults(self, results):
"""Add the set of test results from |results|.
Args:
results: An instance of TestRunResults.
"""
assert isinstance(results, TestRunResults), (
'Expected TestRunResult object: %s' % type(results))
with self._results_lock:
# pylint: disable=W0212
self._results.update(results._results)
def GetAll(self):
"""Get the set of all test results."""
with self._results_lock:
return self._results.copy()
def _GetType(self, test_type):
"""Get the set of test results with the given test type."""
with self._results_lock:
return set(t for t in self._results if t.GetType() == test_type)
def GetPass(self):
"""Get the set of all passed test results."""
return self._GetType(ResultType.PASS)
def GetSkip(self):
"""Get the set of all skipped test results."""
return self._GetType(ResultType.SKIP)
def GetFail(self):
"""Get the set of all failed test results."""
return self._GetType(ResultType.FAIL)
def GetCrash(self):
"""Get the set of all crashed test results."""
return self._GetType(ResultType.CRASH)
def GetTimeout(self):
"""Get the set of all timed out test results."""
return self._GetType(ResultType.TIMEOUT)
def GetUnknown(self):
"""Get the set of all unknown test results."""
return self._GetType(ResultType.UNKNOWN)
def GetNotPass(self):
"""Get the set of all non-passed test results."""
return self.GetAll() - self.GetPass()
def DidRunPass(self):
"""Return whether the test run was successful."""
return not self.GetNotPass() - self.GetSkip()
|
{
"content_hash": "fce22c8ad69571f5b51f89a6b4c324d9",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 80,
"avg_line_length": 28.68840579710145,
"alnum_prop": 0.6203586764334428,
"repo_name": "nwjs/chromium.src",
"id": "8a1610eb722c6ebf32cae9d93bded3f4d1d7414e",
"size": "8059",
"binary": false,
"copies": "11",
"ref": "refs/heads/nw70",
"path": "build/android/pylib/base/base_test_result.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from . import ObjectCreationParameters
__author__ = 'Shamal Faily'
class ClassAssociationParameters(ObjectCreationParameters.ObjectCreationParameters):
def __init__(self,envName,headName,headDim,headNav,headType,headMultiplicity,headRole,tailRole,tailMultiplicity,tailType,tailNav,tailDim,tailName,rationale = ''):
ObjectCreationParameters.ObjectCreationParameters.__init__(self)
self.theEnvironmentName = envName
self.theHeadAsset = headName
self.theHeadDim = headDim
self.theHeadNav = headNav
self.theHeadType = headType
self.theHeadMultiplicity = headMultiplicity
self.theHeadRole = headRole
self.theTailRole = tailRole
self.theTailMultiplicity = tailMultiplicity
self.theTailType = tailType
self.theTailNav = tailNav
self.theTailDim = tailDim
self.theTailAsset = tailName
self.theRationale = rationale
def environment(self): return self.theEnvironmentName
def headAsset(self): return self.theHeadAsset
def headDimension(self): return self.theHeadDim
def headNavigation(self): return self.theHeadNav
def headType(self): return self.theHeadType
def headMultiplicity(self): return self.theHeadMultiplicity
def headRole(self): return self.theHeadRole
def tailRole(self): return self.theTailRole
def tailMultiplicity(self): return self.theTailMultiplicity
def tailType(self): return self.theTailType
def tailNavigation(self): return self.theTailNav
def tailDimension(self): return self.theTailDim
def tailAsset(self): return self.theTailAsset
def rationale(self): return self.theRationale
|
{
"content_hash": "ad69f7ae320408721483f0f4e85f8e93",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 164,
"avg_line_length": 43.916666666666664,
"alnum_prop": 0.788741302972802,
"repo_name": "nathanbjenx/cairis",
"id": "846f84aee30fe7666230706763f5190b482cb285",
"size": "2380",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cairis/core/ClassAssociationParameters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588306"
},
{
"name": "Dockerfile",
"bytes": "829"
},
{
"name": "Gherkin",
"bytes": "1615"
},
{
"name": "HTML",
"bytes": "1664076"
},
{
"name": "JavaScript",
"bytes": "416319"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "PLpgSQL",
"bytes": "1494775"
},
{
"name": "Python",
"bytes": "4006311"
},
{
"name": "Shell",
"bytes": "7035"
}
],
"symlink_target": ""
}
|
import logging
import os
import sys
import time
import numpy as np
import tensorflow as tf
import bigru_model
import data_util
tf.app.flags.DEFINE_float("learning_rate", 1., "Learning rate.")
tf.app.flags.DEFINE_integer("size", 400, "Size of hidden layers.")
tf.app.flags.DEFINE_integer("embsize", 200, "Size of embedding.")
tf.app.flags.DEFINE_integer("num_layers", 1, "Number of layers in the model.")
tf.app.flags.DEFINE_string("data_dir", "data", "Data directory")
tf.app.flags.DEFINE_string("test_file", "", "Test filename.")
tf.app.flags.DEFINE_string("test_output", "output.txt", "Test output.")
tf.app.flags.DEFINE_string("train_dir", "model", "Training directory.")
tf.app.flags.DEFINE_string("tfboard", "tfboard", "Tensorboard log directory.")
tf.app.flags.DEFINE_boolean("decode", False, "Set to True for testing.")
tf.app.flags.DEFINE_boolean("geneos", True, "Do not generate EOS. ")
tf.app.flags.DEFINE_float("max_gradient", 1.0, "Clip gradients l2 norm to this range.")
tf.app.flags.DEFINE_integer("batch_size", 80, "Batch size in training / beam size in testing.")
tf.app.flags.DEFINE_integer("doc_vocab_size", 30000, "Document vocabulary size.")
tf.app.flags.DEFINE_integer("sum_vocab_size", 30000, "Summary vocabulary size.")
tf.app.flags.DEFINE_integer("max_train", 0, "Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("max_iter", 1000000, "Maximum training iterations.")
tf.app.flags.DEFINE_integer("steps_per_validation", 1000, "Training steps between validations.")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 10000, "Training steps between checkpoints.")
tf.app.flags.DEFINE_string("checkpoint", "", "Checkpoint to load (use up-to-date if not set)")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets for sampling
_buckets = [(30, 10), (50, 20), (70, 20), (100, 20), (200, 30)]
def create_bucket(source, target):
data_set = [[] for _ in _buckets]
for s, t in zip(source, target):
t = [data_util.ID_GO] + t + [data_util.ID_EOS]
for bucket_id, (s_size, t_size) in enumerate(_buckets):
if len(s) <= s_size and len(t) <= t_size:
data_set[bucket_id].append([s, t])
break
return data_set
def create_model(session, forward_only):
"""Create model and initialize or load parameters in session."""
dtype = tf.float32
model = bigru_model.BiGRUModel(
FLAGS.doc_vocab_size,
FLAGS.sum_vocab_size,
_buckets,
FLAGS.size,
FLAGS.num_layers,
FLAGS.embsize,
FLAGS.max_gradient,
FLAGS.batch_size,
FLAGS.learning_rate,
forward_only=forward_only,
dtype=dtype)
if FLAGS.checkpoint != "":
ckpt = FLAGS.checkpoint
else:
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt:
ckpt = ckpt.model_checkpoint_path
if ckpt and tf.train.checkpoint_exists(ckpt):
logging.info("Reading model parameters from %s", ckpt)
model.saver.restore(session, ckpt)
else:
logging.info("Created model with fresh parameters.")
session.run(tf.global_variables_initializer())
return model
def train():
logging.info("Preparing summarization data.")
# docid, sumid, doc_dict, sum_dict = \
# data_util.load_data(
# FLAGS.data_dir + "/train.article.txt",
# FLAGS.data_dir + "/train.title.txt",
# FLAGS.data_dir + "/doc_dict.txt",
# FLAGS.data_dir + "/sum_dict.txt",
# FLAGS.doc_vocab_size, FLAGS.sum_vocab_size)
docid, doc_dict = data_util.load_data(
"document",
FLAGS.data_dir + "/train.article.txt",
FLAGS.data_dir + "/doc_dict.txt",
FLAGS.doc_vocab_size)
sumid, sum_dict = data_util.load_data(
"summary",
FLAGS.data_dir + "/train.title.txt",
FLAGS.data_dir + "/sum_dict.txt",
FLAGS.sum_vocab_size)
val_docid = data_util.load_valid_data(FLAGS.data_dir + "/valid.article.filter.txt", doc_dict)
val_sumid = data_util.load_valid_data(FLAGS.data_dir + "/valid.title.filter.txt", sum_dict)
with tf.Session() as sess:
# Create model.
logging.info("Creating %d layers of %d units.", FLAGS.num_layers, FLAGS.size)
train_writer = tf.summary.FileWriter(FLAGS.tfboard, sess.graph)
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
logging.info("Create buckets.")
dev_set = create_bucket(val_docid, val_sumid)
train_set = create_bucket(docid, sumid)
train_bucket_sizes = [len(train_set[b]) for b in range(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
train_buckets_scale = [
sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in range(len(train_bucket_sizes))]
for (s_size, t_size), nsample in zip(_buckets, train_bucket_sizes):
logging.info("Train set bucket (%d, %d) has %d samples.", s_size, t_size, nsample)
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = sess.run(model.global_step)
while current_step <= FLAGS.max_iter:
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in range(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, encoder_len, decoder_len = model.get_batch(train_set, bucket_id)
step_loss, _ = model.step(
sess, encoder_inputs, decoder_inputs,
encoder_len, decoder_len, False, train_writer)
step_time += (time.time() - start_time) / \
FLAGS.steps_per_validation
loss += step_loss * FLAGS.batch_size / np.sum(decoder_len) \
/ FLAGS.steps_per_validation
current_step += 1
# Once in a while, we save checkpoint.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "model.ckpt")
model.saver.save(sess, checkpoint_path,
global_step=model.global_step)
# Once in a while, we print statistics and run evals.
if current_step % FLAGS.steps_per_validation == 0:
# Print statistics for the previous epoch.
perplexity = np.exp(float(loss))
logging.info("global step %d step-time %.2f ppl %.2f", model.global_step.eval(), step_time, perplexity)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
for bucket_id in range(len(_buckets)):
if len(dev_set[bucket_id]) == 0:
logging.info(" eval: empty bucket %d", bucket_id)
continue
encoder_inputs, decoder_inputs, encoder_len, decoder_len = model.get_batch(dev_set, bucket_id)
eval_loss, _ = model.step(sess, encoder_inputs,
decoder_inputs, encoder_len,
decoder_len, True)
eval_loss = eval_loss * FLAGS.batch_size \
/ np.sum(decoder_len)
eval_ppx = np.exp(float(eval_loss))
logging.info(" eval: bucket %d ppl %.2f", bucket_id, eval_ppx)
sys.stdout.flush()
def decode():
# Load vocabularies.
doc_dict = data_util.load_dict(FLAGS.data_dir + "/doc_dict.txt")
sum_dict = data_util.load_dict(FLAGS.data_dir + "/sum_dict.txt")
if doc_dict is None or sum_dict is None:
logging.warning("Dict not found.")
data = data_util.load_test_data(FLAGS.test_file, doc_dict)
with tf.Session() as sess:
# Create model and load parameters.
logging.info("Creating %d layers of %d units", FLAGS.num_layers, FLAGS.size)
model = create_model(sess, True)
result = []
for idx, token_ids in enumerate(data):
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, encoder_len, decoder_len =\
model.get_batch(
{0: [(token_ids, [data_util.ID_GO, data_util.ID_EOS])]}, 0)
if FLAGS.batch_size == 1 and FLAGS.geneos:
loss, outputs = model.step(sess, encoder_inputs, decoder_inputs, encoder_len, decoder_len, True)
outputs = [np.argmax(item) for item in outputs[0]]
else:
outputs = model.step_beam(sess, encoder_inputs, encoder_len, geneos=FLAGS.geneos)
# If there is an EOS symbol in outputs, cut them at that point.
if data_util.ID_EOS in outputs:
outputs = outputs[:outputs.index(data_util.ID_EOS)]
gen_sum = " ".join(data_util.sen_map2tok(outputs, sum_dict[1]))
gen_sum = data_util.sen_postprocess(gen_sum)
result.append(gen_sum)
logging.info("Finish %d samples. :: %s", idx, gen_sum[:75])
with open(FLAGS.test_output, "w") as f:
for item in result:
print(item, file=f)
def main(_):
if FLAGS.decode:
decode()
else:
train()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s",
datefmt='%b %d %H:%M')
try:
os.makedirs(FLAGS.train_dir)
except:
pass
tf.app.run()
|
{
"content_hash": "fc7f93e75488fef43c6659d5a7148a48",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 119,
"avg_line_length": 42.034188034188034,
"alnum_prop": 0.5917039446929646,
"repo_name": "SebastienBoisard/Thunlp-Summarization",
"id": "5ed1d55f5de06ee580fe13d70997f329cfc60e67",
"size": "9836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/summarization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37158"
}
],
"symlink_target": ""
}
|
"""OAuth 2.0 WSGI server middleware providing MyProxy certificates as access tokens
"""
__author__ = "R B Wilkinson"
__date__ = "12/12/11"
__copyright__ = "(C) 2011 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = "$Id$"
from ndg.oauth.server.lib.access_token.access_token_interface import \
AccessTokenInterface
from ndg.oauth.server.lib.register.access_token import AccessToken
class BearerTokenGenerator(AccessTokenInterface):
'''Class to generate bearer token'''
def __init__(self, lifetime, token_type, **kw):
"""
@type lifetime: int
@param lifetime: lifetimes of generated tokens in seconds
@type token_type: str
@param token_type: token type name
@type kw:dict
@param kw: additional keywords
"""
self.lifetime = lifetime
self.token_type = token_type
def get_access_token(self, _arg):
"""
Gets an access token with an ID that is a random UUID used as a bearer
token.
@type request: ndg.oauth.server.lib.access_token.AccessTokenRequest /
ndg.oauth.server.lib.oauth.authorize.AuthorizeRequest
@param request: access token request
@type _arg:
ndg.oauth.server.lib.register.authorization_grant.AuthorizationGrant /
ndg.oauth.server.lib.oauth.authorize.AuthorizeRequest
@param _arg: authorization grant (authorisation code flow) or
authorisation request (implicit flow)
@rtype: ndg.oauth.server.lib.register.access_token.AccessToken
@return: access token or None if an error occurs
"""
return AccessToken.create(self.token_type, _arg, self.lifetime)
|
{
"content_hash": "aeb476e21c3528e6513d7fa3af8f4fcf",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 83,
"avg_line_length": 36.96,
"alnum_prop": 0.6504329004329005,
"repo_name": "cedadev/ndg_oauth",
"id": "cfab1a57d1795e130cb929ddd40d3678a7af09d1",
"size": "1848",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ndg_oauth_server/ndg/oauth/server/lib/access_token/bearer_token_generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6368"
},
{
"name": "HTML",
"bytes": "27842"
},
{
"name": "JavaScript",
"bytes": "4044"
},
{
"name": "Python",
"bytes": "260160"
}
],
"symlink_target": ""
}
|
from rx import Observable
from rx.internal import extensionmethod
@extensionmethod(Observable, name="slice")
def slice_(self, start=None, stop=None, step=1):
"""Slices the given observable. It is basically a wrapper around the
operators skip(), skip_last(), take(), take_last() and filter().
This marble diagram helps you remember how slices works with streams.
Positive numbers is relative to the start of the events, while negative
numbers are relative to the end (on_completed) of the stream.
r---e---a---c---t---i---v---e---|
0 1 2 3 4 5 6 7 8
-8 -7 -6 -5 -4 -3 -2 -1
Example:
result = source.slice(1, 10)
result = source.slice(1, -2)
result = source.slice(1, -1, 2)
Keyword arguments:
:param Observable self: Observable to slice
:param int start: Number of elements to skip of take last
:param int stop: Last element to take of skip last
:param int step: Takes every step element. Must be larger than zero
:returns: Returns a sliced observable sequence.
:rtype: Observable
"""
source = self
if start is not None:
if start < 0:
source = source.take_last(abs(start))
else:
source = source.skip(start)
if stop is not None:
if stop > 0:
start = start or 0
source = source.take(stop - start)
else:
source = source.skip_last(abs(stop))
if step is not None:
if step > 1:
source = source.filter(lambda x, i: i % step == 0)
elif step < 0:
# Reversing events is not supported
raise TypeError("Negative step not supported.")
return source
@extensionmethod(Observable)
def __getitem__(self, key):
"""Slices the given observable using Python slice notation. The
arguments to slice is start, stop and step given within brackets [] and
separated with the ':' character. It is basically a wrapper around the
operators skip(), skip_last(), take(), take_last() and filter().
This marble diagram helps you remember how slices works with streams.
Positive numbers is relative to the start of the events, while negative
numbers are relative to the end (on_completed) of the stream.
r---e---a---c---t---i---v---e---|
0 1 2 3 4 5 6 7 8
-8 -7 -6 -5 -4 -3 -2 -1
Example:
result = source[1:10]
result = source[1:-2]
result = source[1:-1:2]
Keyword arguments:
:param Observable self: Observable to slice
:param slice key: Slice object
:returns: A sliced observable sequence.
:rtype: Observable
:raises TypeError: If key is not of type int or slice
"""
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
elif isinstance(key, int):
start, stop, step = key, key + 1, 1
else:
raise TypeError("Invalid argument type.")
return self.slice(start, stop, step)
|
{
"content_hash": "a3930c93a7f045edf358e81e89522fa8",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 75,
"avg_line_length": 31.78723404255319,
"alnum_prop": 0.6198125836680054,
"repo_name": "Sprytile/Sprytile",
"id": "1097031c9de10d98ecb1c5ad130403e09df9bf0d",
"size": "2988",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rx/linq/observable/slice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "720766"
}
],
"symlink_target": ""
}
|
"""radius URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from rest_framework.urlpatterns import format_suffix_patterns
from django.contrib import admin
from rest_framework.routers import DefaultRouter
from freeradius import views as FreeRadiusView
router = DefaultRouter()
router.register(r'radpostauths', FreeRadiusView.RadPostAuthViewSet)
router.register(r'radchecks', FreeRadiusView.RadCheckViewSet)
router.register(r'radreplys', FreeRadiusView.RadReplyViewSet)
router.register(r'userinfos', FreeRadiusView.UserInfoViewSet)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/v1/userdata/', FreeRadiusView.UserDataList.as_view()),
url(r'^api/v1/userquota/', FreeRadiusView.UserQuotaList.as_view()),
url(r'^api/v1/userinfo/', FreeRadiusView.UserInfoList.as_view()),
url(r'^api/v1/userbilling/', FreeRadiusView.UserBillingDetailList.as_view()),
url(r'^api/v1/quotausage/$', FreeRadiusView.QuotaDetail.as_view()),
url(r'^api/v1/routers/', include(router.urls)),
url(r'^api/v1/quotausage/(?P<username>.+\@[a-xA-Z0-9\.]+)/$', FreeRadiusView.QuotaDetail.as_view()),
url(r'^api/v1/quotausage/(?P<username>.+\@.+)/(?P<period>\d+)/$', FreeRadiusView.QuotaDetail.as_view()),
url(r'^rest-auth/', include('rest_auth.urls'))
]
|
{
"content_hash": "6aefd5b95c4b0e4f560c8749868856c5",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 108,
"avg_line_length": 45.829268292682926,
"alnum_prop": 0.7232570516232039,
"repo_name": "realworldtech/radius_restserver",
"id": "38f1f7174140c7ff4fe50641b142f43787935294",
"size": "1879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/radius/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1410"
},
{
"name": "Dockerfile",
"bytes": "478"
},
{
"name": "HTML",
"bytes": "12060"
},
{
"name": "JavaScript",
"bytes": "44972"
},
{
"name": "Python",
"bytes": "53342"
},
{
"name": "Shell",
"bytes": "1181"
}
],
"symlink_target": ""
}
|
FAILEDOPERATION = 'FailedOperation'
# 设备固件升级任务已经完成。
FAILEDOPERATION_DEVICEFIRMWARETASKALREADDONE = 'FailedOperation.DeviceFirmwareTaskAlreadDone'
# 设备正在升级中。
FAILEDOPERATION_DEVICEISUPDATING = 'FailedOperation.DeviceIsUpdating'
# 设备已经运行其他ota升级任务。
FAILEDOPERATION_DEVICERUNNINGOTHEROTATASK = 'FailedOperation.DeviceRunningOtherOtaTask'
# 无相关操作权限。
FAILEDOPERATION_PERMISSIONDENIED = 'FailedOperation.PermissionDenied'
# 内部错误。
INTERNALERROR = 'InternalError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 固件已存在。
INVALIDPARAMETERVALUE_FIRMWAREALREADYEXIST = 'InvalidParameterValue.FirmwareAlreadyExist'
# 设备固件升级任务不存在。
RESOURCENOTFOUND_DEVICEFIRMWARETASKNOTEXIST = 'ResourceNotFound.DeviceFirmwareTaskNotExist'
# 设备无固件版本。
RESOURCENOTFOUND_DEVICEHASNOFIRMWARE = 'ResourceNotFound.DeviceHasNoFirmware'
# 固件不存在。
RESOURCENOTFOUND_FIRMWARENOTEXIST = 'ResourceNotFound.FirmwareNotExist'
# 固件升级任务不存在。
RESOURCENOTFOUND_FIRMWARETASKNOTEXIST = 'ResourceNotFound.FirmwareTaskNotExist'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
|
{
"content_hash": "5386f0d1876aa4936b41e29f49054804",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 93,
"avg_line_length": 26.627906976744185,
"alnum_prop": 0.8445414847161572,
"repo_name": "tzpBingo/github-trending",
"id": "4932c507866eabe0f987d4e711751e02f8d9014a",
"size": "2044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/tencentcloud/iotvideo/v20211125/errorcodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
}
|
import collections
from neutron_lib import constants
from neutron_lib.db import constants as db_consts
from neutron_lib.services.qos import constants as qos_consts
from oslo_log import log as logging
from neutron.agent.linux import l3_tc_lib as tc_lib
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import rpc as n_rpc
LOG = logging.getLogger(__name__)
SUPPORTED_RULES = {
qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: {
qos_consts.MAX_KBPS: {
'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]},
qos_consts.MAX_BURST: {
'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]},
qos_consts.DIRECTION: {
'type:values': constants.VALID_DIRECTIONS}
}
}
# We use the default values to illustrate:
# 1. QoS policy does not have some direction `bandwidth_limit`, then we use
# the default value.
# 2. default value 0 will be treated as no limit.
# 3. if one IP's rate was changed from x to 0, the extension will do
# a tc filter clean procedure.
IP_DEFAULT_RATE = 0
IP_DEFAULT_BURST = 0
class RateLimitMaps(object):
def __init__(self):
self.qos_policy_resources = collections.defaultdict(dict)
self.known_policies = {}
self.resource_policies = {}
def update_policy(self, policy):
self.known_policies[policy.id] = policy
def get_policy(self, policy_id):
return self.known_policies.get(policy_id)
def get_resources(self, policy):
return self.qos_policy_resources[policy.id].values()
def get_resource_policy(self, resource):
policy_id = self.resource_policies.get(resource)
return self.get_policy(policy_id)
def set_resource_policy(self, resource, policy):
"""Attach a resource to policy
and return any previous policy on resource.
"""
old_policy = self.get_resource_policy(resource)
self.update_policy(policy)
self.resource_policies[resource] = policy.id
self.qos_policy_resources[policy.id][resource] = resource
if old_policy and old_policy.id != policy.id:
del self.qos_policy_resources[old_policy.id][resource]
def clean_by_resource(self, resource):
"""Detach resource from policy
and cleanup data we don't need anymore.
"""
if resource in self.resource_policies:
del self.resource_policies[resource]
for qos_policy_id, res_dict in self.qos_policy_resources.items():
if resource in res_dict:
del res_dict[resource]
if not res_dict:
self._clean_policy_info(qos_policy_id)
return
LOG.debug("L3 QoS extension did not have "
"information on floating IP %s", resource)
def _clean_policy_info(self, qos_policy_id):
del self.qos_policy_resources[qos_policy_id]
del self.known_policies[qos_policy_id]
class L3QosAgentExtensionBase(object):
SUPPORTED_RESOURCE_TYPES = [resources.QOS_POLICY]
def consume_api(self, agent_api):
self.agent_api = agent_api
def _handle_notification(self, context, resource_type,
qos_policies, event_type):
pass
def _process_update_policy(self, qos_policy):
pass
def _policy_rules_modified(self, old_policy, policy):
return not (len(old_policy.rules) == len(policy.rules) and
all(i in old_policy.rules for i in policy.rules))
def _register_rpc_consumers(self):
registry.register(self._handle_notification, resources.QOS_POLICY)
self._connection = n_rpc.Connection()
endpoints = [resources_rpc.ResourcesPushRpcCallback()]
topic = resources_rpc.resource_type_versioned_topic(
resources.QOS_POLICY)
self._connection.create_consumer(topic, endpoints, fanout=True)
self._connection.consume_in_threads()
def _get_tc_wrapper(self, device):
return tc_lib.FloatingIPTcCommand(device.name,
namespace=device.namespace)
def get_policy_rates(self, policy):
rates = {}
for rule in policy.rules:
# NOTE(liuyulong): for now, the L3 agent QoS extensions only
# use ``bandwidth_limit`` rules.
if rule.rule_type in SUPPORTED_RULES:
if rule.direction not in rates:
rates[rule.direction] = {"rate": rule.max_kbps,
"burst": rule.max_burst_kbps}
# The return rates dict must contain all directions. If there is no
# one specific direction QoS rule, use the default values.
for direction in constants.VALID_DIRECTIONS:
if direction not in rates:
LOG.debug("Policy %(id)s does not have '%(direction)s' "
"bandwidth_limit rule, use default value instead.",
{"id": policy.id,
"direction": direction})
rates[direction] = {"rate": IP_DEFAULT_RATE,
"burst": IP_DEFAULT_BURST}
return rates
def _get_router_info(self, router_id):
router_info = self.agent_api.get_router_info(router_id)
if router_info:
return router_info
LOG.debug("Router %s is not managed by this agent. "
"It was possibly deleted concurrently.",
router_id)
|
{
"content_hash": "8342ea35624ea62fde852fed5e59010c",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 77,
"avg_line_length": 37.205298013245034,
"alnum_prop": 0.6167675329298683,
"repo_name": "noironetworks/neutron",
"id": "8ab50b10333859275bce70a7a420d842348d864a",
"size": "6254",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/agent/l3/extensions/qos/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11420614"
},
{
"name": "Shell",
"bytes": "38791"
}
],
"symlink_target": ""
}
|
from pipedream.dispatcher import Dispatcher
from pipedream.exceptions import *
|
{
"content_hash": "7e680e4e297ee3a84ea2daab12e8e8d7",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 43,
"avg_line_length": 39.5,
"alnum_prop": 0.8607594936708861,
"repo_name": "tgecho/pipedream",
"id": "ab9b32598c80a69b459edd7f94ee40e374b3e9a4",
"size": "79",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipedream/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "16848"
}
],
"symlink_target": ""
}
|
import cv2
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)
cameraNum = 1
video_capture = cv2.VideoCapture(cameraNum)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=4,
minSize=(20, 20),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
print("x: " + str(x) + " y: " + str(y) + " w,h: " + str(w) + "," + str(h))
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
{
"content_hash": "0e9622ce3e57502b21cb160a5325c9b3",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 75,
"avg_line_length": 24.08108108108108,
"alnum_prop": 0.6127946127946128,
"repo_name": "ResearcherOne/RobotTakimiRepo",
"id": "6334c854f73fb63392bcc6be873e6a2d50fb7789",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "3_HAAR_Training/02_haarTrainingTestCode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "38234"
},
{
"name": "CMake",
"bytes": "1551"
},
{
"name": "CSS",
"bytes": "5157"
},
{
"name": "HTML",
"bytes": "13471"
},
{
"name": "Lua",
"bytes": "5620"
},
{
"name": "Python",
"bytes": "1629"
},
{
"name": "Shell",
"bytes": "1781"
}
],
"symlink_target": ""
}
|
"""
kombu.transport.virtual
=======================
Virtual transport implementation.
Emulates the AMQ API for non-AMQ transports.
"""
from __future__ import absolute_import
import base64
import socket
import warnings
from itertools import count
from time import sleep, time
from Queue import Empty
from kombu.exceptions import StdChannelError
from kombu.utils import emergency_dump_state, say, uuid
from kombu.utils.compat import OrderedDict
from kombu.utils.encoding import str_to_bytes, bytes_to_str
from kombu.utils.finalize import Finalize
from kombu.transport import base
from .scheduling import FairCycle
from .exchange import STANDARD_EXCHANGE_TYPES
UNDELIVERABLE_FMT = """\
Message could not be delivered: No queues bound to exchange %(exchange)r
using binding key %(routing_key)r
"""
class Base64(object):
def encode(self, s):
return bytes_to_str(base64.b64encode(str_to_bytes(s)))
def decode(self, s):
return base64.b64decode(str_to_bytes(s))
class NotEquivalentError(Exception):
"""Entity declaration is not equivalent to the previous declaration."""
pass
class UndeliverableWarning(UserWarning):
"""The message could not be delivered to a queue."""
pass
class BrokerState(object):
#: exchange declarations.
exchanges = None
#: active bindings.
bindings = None
def __init__(self, exchanges=None, bindings=None):
self.exchanges = {} if exchanges is None else exchanges
self.bindings = {} if bindings is None else bindings
def clear(self):
self.exchanges.clear()
self.bindings.clear()
class QoS(object):
"""Quality of Service guarantees.
Only supports `prefetch_count` at this point.
:param channel: AMQ Channel.
:keyword prefetch_count: Initial prefetch count (defaults to 0).
"""
#: current prefetch count value
prefetch_count = 0
#: :class:`~collections.OrderedDict` of active messages.
#: *NOTE*: Can only be modified by the consuming thread.
_delivered = None
#: acks can be done by other threads than the consuming thread.
#: Instead of a mutex, which doesn't perform well here, we mark
#: the delivery tags as dirty, so subsequent calls to append() can remove
#: them.
_dirty = None
#: If disabled, unacked messages won't be restored at shutdown.
restore_at_shutdown = True
def __init__(self, channel, prefetch_count=0):
self.channel = channel
self.prefetch_count = prefetch_count or 0
self._delivered = OrderedDict()
self._delivered.restored = False
self._dirty = set()
self._on_collect = Finalize(self,
self.restore_unacked_once,
exitpriority=1)
def can_consume(self):
"""Returns true if the channel can be consumed from.
Used to ensure the client adhers to currently active
prefetch limits.
"""
pcount = self.prefetch_count
return not pcount or len(self._delivered) - len(self._dirty) < pcount
def append(self, message, delivery_tag):
"""Append message to transactional state."""
if self._dirty:
self._flush()
self._delivered[delivery_tag] = message
def get(self, delivery_tag):
return self._delivered[delivery_tag]
def _flush(self):
"""Flush dirty (acked/rejected) tags from."""
dirty = self._dirty
delivered = self._delivered
while 1:
try:
dirty_tag = dirty.pop()
except KeyError:
break
delivered.pop(dirty_tag, None)
def ack(self, delivery_tag):
"""Acknowledge message and remove from transactional state."""
self._dirty.add(delivery_tag)
def reject(self, delivery_tag, requeue=False):
"""Remove from transactional state and requeue message."""
if requeue:
self.channel._restore(self._delivered[delivery_tag])
self._dirty.add(delivery_tag)
def restore_unacked(self):
"""Restore all unacknowledged messages."""
self._flush()
delivered = self._delivered
errors = []
while delivered:
try:
_, message = delivered.popitem()
except KeyError: # pragma: no cover
break
try:
self.channel._restore(message)
except BaseException, exc:
errors.append((exc, message))
delivered.clear()
return errors
def restore_unacked_once(self):
"""Restores all unacknowledged message at shutdown/gc collect.
Will only be done once for each instance.
"""
self._on_collect.cancel()
self._flush()
state = self._delivered
if not self.restore_at_shutdown:
return
elif not self.channel.do_restore or getattr(state, 'restored', None):
assert not state
return
try:
if state:
say('Restoring %r unacknowledged message(s).',
len(self._delivered))
unrestored = self.restore_unacked()
if unrestored:
errors, messages = zip(*unrestored)
say('UNABLE TO RESTORE %s MESSAGES: %s',
len(errors), errors)
emergency_dump_state(messages)
finally:
state.restored = True
def restore_visible(self, start=0, num=10, interval=10):
pass
class Message(base.Message):
def __init__(self, channel, payload, **kwargs):
self._raw = payload
properties = payload['properties']
body = payload.get('body')
if body:
body = channel.decode_body(body, properties.get('body_encoding'))
fields = {'body': body,
'delivery_tag': properties['delivery_tag'],
'content_type': payload.get('content-type'),
'content_encoding': payload.get('content-encoding'),
'headers': payload.get('headers'),
'properties': properties,
'delivery_info': properties.get('delivery_info'),
'postencode': 'utf-8'}
super(Message, self).__init__(channel, **dict(kwargs, **fields))
def serializable(self):
props = self.properties
body, _ = self.channel.encode_body(self.body,
props.get('body_encoding'))
headers = dict(self.headers)
# remove compression header
headers.pop('compression', None)
return {'body': body,
'properties': props,
'content-type': self.content_type,
'content-encoding': self.content_encoding,
'headers': headers}
class AbstractChannel(object):
"""This is an abstract class defining the channel methods
you'd usually want to implement in a virtual channel.
Do not subclass directly, but rather inherit from :class:`Channel`
instead.
"""
def _get(self, queue, timeout=None):
"""Get next message from `queue`."""
raise NotImplementedError('Virtual channels must implement _get')
def _put(self, queue, message):
"""Put `message` onto `queue`."""
raise NotImplementedError('Virtual channels must implement _put')
def _purge(self, queue):
"""Remove all messages from `queue`."""
raise NotImplementedError('Virtual channels must implement _purge')
def _size(self, queue):
"""Return the number of messages in `queue` as an :class:`int`."""
return 0
def _delete(self, queue, *args, **kwargs):
"""Delete `queue`.
This just purges the queue, if you need to do more you can
override this method.
"""
self._purge(queue)
def _new_queue(self, queue, **kwargs):
"""Create new queue.
Some implementations needs to do additional actions when
the queue is created. You can do so by overriding this
method.
"""
pass
def _has_queue(self, queue, **kwargs):
"""Verify that queue exists.
Should return :const:`True` if the queue exists or :const:`False`
otherwise.
"""
return True
def _poll(self, cycle, timeout=None):
"""Poll a list of queues for available messages."""
return cycle.get()
class Channel(AbstractChannel, base.StdChannel):
"""Virtual channel.
:param connection: The transport instance this channel is part of.
"""
#: message class used.
Message = Message
#: QoS class used.
QoS = QoS
#: flag to restore unacked messages when channel
#: goes out of scope.
do_restore = True
#: mapping of exchange types and corresponding classes.
exchange_types = dict(STANDARD_EXCHANGE_TYPES)
#: flag set if the channel supports fanout exchanges.
supports_fanout = False
#: Binary <-> ASCII codecs.
codecs = {'base64': Base64()}
#: Default body encoding.
#: NOTE: ``transport_options['body_encoding']`` will override this value.
body_encoding = 'base64'
#: counter used to generate delivery tags for this channel.
_next_delivery_tag = count(1).next
#: Optional queue where messages with no route is delivered.
#: Set by ``transport_options['deadletter_queue']``.
deadletter_queue = None
# List of options to transfer from :attr:`transport_options`.
from_transport_options = ('body_encoding', 'deadletter_queue')
def __init__(self, connection, **kwargs):
self.connection = connection
self._consumers = set()
self._cycle = None
self._tag_to_queue = {}
self._active_queues = []
self._qos = None
self.closed = False
# instantiate exchange types
self.exchange_types = dict(
(typ, cls(self)) for typ, cls in self.exchange_types.items()
)
self.channel_id = self.connection._next_channel_id()
topts = self.connection.client.transport_options
for opt_name in self.from_transport_options:
try:
setattr(self, opt_name, topts[opt_name])
except KeyError:
pass
def exchange_declare(self, exchange=None, type='direct', durable=False,
auto_delete=False, arguments=None,
nowait=False, passive=False):
"""Declare exchange."""
type = type or 'direct'
exchange = exchange or 'amq.%s' % (type, )
if passive:
if exchange not in self.state.exchanges:
raise StdChannelError(
'404',
u'NOT_FOUND - no exchange %r in vhost %r' % (
exchange, self.connection.client.virtual_host or '/'),
(50, 10), 'Channel.exchange_declare')
return
try:
prev = self.state.exchanges[exchange]
if not self.typeof(exchange).equivalent(prev, exchange, type,
durable, auto_delete,
arguments):
raise NotEquivalentError(
'Cannot redeclare exchange %r in vhost %r with '
'different type, durable or autodelete value' % (
exchange, self.connection.client.virtual_host or '/'))
except KeyError:
self.state.exchanges[exchange] = {
'type': type,
'durable': durable,
'auto_delete': auto_delete,
'arguments': arguments or {},
'table': [],
}
def exchange_delete(self, exchange, if_unused=False, nowait=False):
"""Delete `exchange` and all its bindings."""
for rkey, _, queue in self.get_table(exchange):
self.queue_delete(queue, if_unused=True, if_empty=True)
self.state.exchanges.pop(exchange, None)
def queue_declare(self, queue=None, passive=False, **kwargs):
"""Declare queue."""
queue = queue or 'amq.gen-%s' % uuid()
if passive and not self._has_queue(queue, **kwargs):
raise StdChannelError(
'404',
u'NOT_FOUND - no queue %r in vhost %r' % (
queue, self.connection.client.virtual_host or '/'),
(50, 10), 'Channel.queue_declare')
else:
self._new_queue(queue, **kwargs)
return queue, self._size(queue), 0
def queue_delete(self, queue, if_unusued=False, if_empty=False, **kwargs):
"""Delete queue."""
if if_empty and self._size(queue):
return
try:
exchange, routing_key, arguments = self.state.bindings[queue]
except KeyError:
return
meta = self.typeof(exchange).prepare_bind(queue, exchange,
routing_key, arguments)
self._delete(queue, exchange, *meta)
self.state.bindings.pop(queue, None)
def after_reply_message_received(self, queue):
self.queue_delete(queue)
def exchange_bind(self, destination, source='', routing_key='',
nowait=False, arguments=None):
raise NotImplementedError('transport does not support exchange_bind')
def exchange_unbind(self, destination, source='', routing_key='',
nowait=False, arguments=None):
raise NotImplementedError('transport does not support exchange_unbind')
def queue_bind(self, queue, exchange=None, routing_key='',
arguments=None, **kwargs):
"""Bind `queue` to `exchange` with `routing key`."""
if queue in self.state.bindings:
return
exchange = exchange or 'amq.direct'
table = self.state.exchanges[exchange].setdefault('table', [])
self.state.bindings[queue] = exchange, routing_key, arguments
meta = self.typeof(exchange).prepare_bind(queue,
exchange,
routing_key,
arguments)
table.append(meta)
if self.supports_fanout:
self._queue_bind(exchange, *meta)
def queue_unbind(self, queue, exchange=None, routing_key='',
arguments=None, **kwargs):
raise NotImplementedError('transport does not support queue_unbind')
def list_bindings(self):
for exchange in self.state.exchanges:
table = self.get_table(exchange)
for routing_key, pattern, queue in table:
yield queue, exchange, routing_key
def queue_purge(self, queue, **kwargs):
"""Remove all ready messages from queue."""
return self._purge(queue)
def basic_publish(self, message, exchange, routing_key, **kwargs):
"""Publish message."""
props = message['properties']
message['body'], props['body_encoding'] = \
self.encode_body(message['body'], self.body_encoding)
props['delivery_info']['exchange'] = exchange
props['delivery_info']['routing_key'] = routing_key
props['delivery_tag'] = self._next_delivery_tag()
self.typeof(exchange).deliver(message,
exchange, routing_key, **kwargs)
def basic_consume(self, queue, no_ack, callback, consumer_tag, **kwargs):
"""Consume from `queue`"""
self._tag_to_queue[consumer_tag] = queue
self._active_queues.append(queue)
def _callback(raw_message):
message = self.Message(self, raw_message)
if not no_ack:
self.qos.append(message, message.delivery_tag)
return callback(message)
self.connection._callbacks[queue] = _callback
self._consumers.add(consumer_tag)
self._reset_cycle()
def basic_cancel(self, consumer_tag):
"""Cancel consumer by consumer tag."""
if consumer_tag in self._consumers:
self._consumers.remove(consumer_tag)
self._reset_cycle()
queue = self._tag_to_queue.pop(consumer_tag, None)
try:
self._active_queues.remove(queue)
except ValueError:
pass
self.connection._callbacks.pop(queue, None)
def basic_get(self, queue, no_ack=False, **kwargs):
"""Get message by direct access (synchronous)."""
try:
message = self.Message(self, self._get(queue))
if not no_ack:
self.qos.append(message, message.delivery_tag)
return message
except Empty:
pass
def basic_ack(self, delivery_tag):
"""Acknowledge message."""
self.qos.ack(delivery_tag)
def basic_recover(self, requeue=False):
"""Recover unacked messages."""
if requeue:
return self.qos.restore_unacked()
raise NotImplementedError('Does not support recover(requeue=False)')
def basic_reject(self, delivery_tag, requeue=False):
"""Reject message."""
self.qos.reject(delivery_tag, requeue=requeue)
def basic_qos(self, prefetch_size=0, prefetch_count=0,
apply_global=False):
"""Change QoS settings for this channel.
Only `prefetch_count` is supported.
"""
self.qos.prefetch_count = prefetch_count
def get_exchanges(self):
return list(self.state.exchanges)
def get_table(self, exchange):
"""Get table of bindings for `exchange`."""
return self.state.exchanges[exchange]['table']
def typeof(self, exchange, default='direct'):
"""Get the exchange type instance for `exchange`."""
try:
type = self.state.exchanges[exchange]['type']
except KeyError:
type = default
return self.exchange_types[type]
def _lookup(self, exchange, routing_key, default=None):
"""Find all queues matching `routing_key` for the given `exchange`.
Returns `default` if no queues matched.
"""
if default is None:
default = self.deadletter_queue
try:
R = self.typeof(exchange).lookup(self.get_table(exchange),
exchange, routing_key, default)
except KeyError:
R = []
if not R and default is not None:
warnings.warn(UndeliverableWarning(UNDELIVERABLE_FMT % {
'exchange': exchange, 'routing_key': routing_key}))
self._new_queue(default)
R = [default]
return R
def _restore(self, message):
"""Redeliver message to its original destination."""
delivery_info = message.delivery_info
message = message.serializable()
message['redelivered'] = True
for queue in self._lookup(delivery_info['exchange'],
delivery_info['routing_key']):
self._put(queue, message)
def drain_events(self, timeout=None):
if self._consumers and self.qos.can_consume():
if hasattr(self, '_get_many'):
return self._get_many(self._active_queues, timeout=timeout)
return self._poll(self.cycle, timeout=timeout)
raise Empty()
def message_to_python(self, raw_message):
"""Convert raw message to :class:`Message` instance."""
if not isinstance(raw_message, self.Message):
return self.Message(self, payload=raw_message)
return raw_message
def prepare_message(self, body, priority=None, content_type=None,
content_encoding=None, headers=None, properties=None):
"""Prepare message data."""
properties = properties or {}
info = properties.setdefault('delivery_info', {})
info['priority'] = priority or 0
return {'body': body,
'content-encoding': content_encoding,
'content-type': content_type,
'headers': headers or {},
'properties': properties or {}}
def flow(self, active=True):
"""Enable/disable message flow.
:raises NotImplementedError: as flow
is not implemented by the base virtual implementation.
"""
raise NotImplementedError('virtual channels does not support flow.')
def close(self):
"""Close channel, cancel all consumers, and requeue unacked
messages."""
if not self.closed:
self.closed = True
for consumer in list(self._consumers):
self.basic_cancel(consumer)
if self._qos:
self._qos.restore_unacked_once()
if self._cycle is not None:
self._cycle.close()
self._cycle = None
if self.connection is not None:
self.connection.close_channel(self)
self.exchange_types = None
def encode_body(self, body, encoding=None):
if encoding:
return self.codecs.get(encoding).encode(body), encoding
return body, encoding
def decode_body(self, body, encoding=None):
if encoding:
return self.codecs.get(encoding).decode(body)
return body
def _reset_cycle(self):
self._cycle = FairCycle(self._get, self._active_queues, Empty)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@property
def state(self):
"""Broker state containing exchanges and bindings."""
return self.connection.state
@property
def qos(self):
""":class:`QoS` manager for this channel."""
if self._qos is None:
self._qos = self.QoS(self)
return self._qos
@property
def cycle(self):
if self._cycle is None:
self._reset_cycle()
return self._cycle
class Management(base.Management):
def __init__(self, transport):
super(Management, self).__init__(transport)
self.channel = transport.client.channel()
def get_bindings(self):
return [dict(destination=q, source=e, routing_key=r)
for q, e, r in self.channel.list_bindings()]
def close(self):
self.channel.close()
class Transport(base.Transport):
"""Virtual transport.
:param client: :class:`~kombu.Connection` instance
"""
Channel = Channel
Cycle = FairCycle
Management = Management
#: :class:`BrokerState` containing declared exchanges and
#: bindings (set by constructor).
state = BrokerState()
#: :class:`~kombu.transport.virtual.scheduling.FairCycle` instance
#: used to fairly drain events from channels (set by constructor).
cycle = None
#: port number used when no port is specified.
default_port = None
#: active channels.
channels = None
#: queue/callback map.
_callbacks = None
#: Time to sleep between unsuccessful polls.
polling_interval = 1.0
def __init__(self, client, **kwargs):
self.client = client
self.channels = []
self._avail_channels = []
self._callbacks = {}
self.cycle = self.Cycle(self._drain_channel, self.channels, Empty)
self._next_channel_id = count(1).next
polling_interval = client.transport_options.get('polling_interval')
if polling_interval is not None:
self.polling_interval = polling_interval
def create_channel(self, connection):
try:
return self._avail_channels.pop()
except IndexError:
channel = self.Channel(connection)
self.channels.append(channel)
return channel
def close_channel(self, channel):
try:
try:
self.channels.remove(channel)
except ValueError:
pass
finally:
channel.connection = None
def establish_connection(self):
# creates channel to verify connection.
# this channel is then used as the next requested channel.
# (returned by ``create_channel``).
self._avail_channels.append(self.create_channel(self))
return self # for drain events
def close_connection(self, connection):
self.cycle.close()
for l in self._avail_channels, self.channels:
while l:
try:
channel = l.pop()
except (IndexError, KeyError): # pragma: no cover
pass
else:
channel.close()
def drain_events(self, connection, timeout=None):
loop = 0
time_start = time()
get = self.cycle.get
polling_interval = self.polling_interval
while 1:
try:
item, channel = get(timeout=timeout)
except Empty:
if timeout and time() - time_start >= timeout:
raise socket.timeout()
loop += 1
if polling_interval is not None:
sleep(polling_interval)
else:
break
message, queue = item
if not queue or queue not in self._callbacks:
raise KeyError(
"Received message for queue '%s' without consumers: %s" % (
queue, message))
self._callbacks[queue](message)
def _drain_channel(self, channel, timeout=None):
return channel.drain_events(timeout=timeout)
@property
def default_connection_params(self):
return {'port': self.default_port, 'hostname': 'localhost'}
|
{
"content_hash": "8ffad8b3250e99d7868fe3c35f034760",
"timestamp": "",
"source": "github",
"line_count": 787,
"max_line_length": 79,
"avg_line_length": 32.95552731893265,
"alnum_prop": 0.5783081431215299,
"repo_name": "neumerance/deploy",
"id": "6936b1ce738bb528cb2b561e92f5b83ca482f290",
"size": "25936",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/kombu/transport/virtual/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49399"
},
{
"name": "CSS",
"bytes": "769836"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Erlang",
"bytes": "31042"
},
{
"name": "JavaScript",
"bytes": "642626"
},
{
"name": "PHP",
"bytes": "3858"
},
{
"name": "Perl",
"bytes": "386749"
},
{
"name": "Python",
"bytes": "23358678"
},
{
"name": "Racket",
"bytes": "28441"
},
{
"name": "Ruby",
"bytes": "453"
},
{
"name": "Shell",
"bytes": "29414"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import httplib2
import os
import argparse
from OpenSSL import SSL
from flask import Flask, jsonify
from flask_restful import Resource, Api, reqparse
from flask.ext.mysql import MySQL
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
import db_info
app = Flask(__name__)
api = Api(app)
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = db_info.db_user
app.config['MYSQL_DATABASE_PASSWORD'] = db_info.db_pw
app.config['MYSQL_DATABASE_DB'] = db_info.db_name
app.config['MYSQL_DATABASE_HOST'] = db_info.db_host
app.config['MYSQL_USE_UNICODE'] = 'True'
mysql.init_app(app)
SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Gmail API Python Quickstart'
class CreateUser(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Gmail of user')
args = parser.parse_args()
_username = args['username']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('createUser', args=[_username])
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return { 'statuscode': '200', 'message': 'User creation success' }
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
except Exception as e:
return { 'error': str(e) }
class AddTask(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of task')
parser.add_argument('deadline', type=str, help='Complete by date')
parser.add_argument('description', type=str, help='Detail of task')
args = parser.parse_args()
_username = args['username']
_deadline = args['deadline']
_description = args['description']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('add_task', (_username, _deadline, _description))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return { 'statuscode': '200', 'message': 'Successfully added task' }
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
except Exception as e:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of task')
parser.add_argument('deadline', type=str, help='Complete by date')
parser.add_argument('description', type=str, help='Detail of task')
args = parser.parse_args()
return { 'error': str(e) }
class DeleteTask(Resource):
def delete(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('taskid', type=int, help='Which task')
args = parser.parse_args()
_taskid = args['taskid']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('delete_task', args=[_taskid])
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return { 'statuscode': '200', 'message': 'Task deleted successfully' }
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
except Exception as e:
return { 'error': str(e) }
class CheckTask(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('taskid', type=int, help='Task ID')
parser.add_argument('status', type=int, help='Task status')
args = parser.parse_args()
_taskid = args['taskid']
_status = args['status']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('check_task', (_taskid, _status))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return { 'statuscode': '200', 'message': 'Checked task' + str(_status) }
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
except Exception as e:
return { 'error': str(e) }
class GetTask(Resource):
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of tasks')
args = parser.parse_args()
_username = args['username']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('get_all_tasks', args=[_username])
data = cursor.fetchall()
task_list = []
none_list = []
for task in data:
if task[1] is None:
i = { 'taskid': task[0],
'deadline': 'none',
'description': task[2],
'status': task[3] }
none_list.append(i)
else:
i = { 'taskid': task[0],
'deadline': str(task[1]),
'description': task[2],
'status': task[3] }
task_list.append(i)
return task_list + none_list
except Exception as e:
return { 'error': str(e) }
class GetAListedSite(Resource):
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of listedsite')
parser.add_argument('domainName', type=str, help='Current domain of user')
args = parser.parse_args()
_username = args['username']
_domainName = args['domainName']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('getAListedSite', (_username, _domainName))
data = cursor.fetchone()
site = { 'owner': data[0],
'domainName': data[1],
'dailyTime': data[2],
'blockedTime': data[3],
'isBlocked': data[4],
'timeCap': data[5],
'idleTime': data[6] }
return site
except Exception as e:
return { 'error': str(e) }
class IncrementAListedSite(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of listedsite')
parser.add_argument('domainName', type=str, help='Current domain of user')
parser.add_argument('dailyTime', type=int, help='Time spent today')
args = parser.parse_args()
_username = args['username']
_domainName = args['domainName']
_dailyTime = args['dailyTime']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('incrementAListedSite', (_username, _domainName, _dailyTime))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
cursor.close()
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
# Needs a new cursor to execute another procedure
cursor = conn.cursor()
cursor.callproc('getAListedSite', (_username, _domainName))
data = cursor.fetchone()
site = { 'owner': data[0],
'domainName': data[1],
'dailyTime': data[2],
'blockedTime': data[3],
'isBlocked': data[4],
'timeCap': data[5],
'idleTime': data[6] }
return site
except Exception as e:
return { 'error': str(e) }
class IncrementABlockedSite(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of listedsite')
parser.add_argument('domainName', type=str, help='Current domain of user')
parser.add_argument('dailyTime', type=int, help='Time spent today')
parser.add_argument('blockedTime', type=int, help='Time spent today when blocking')
args = parser.parse_args()
_username = args['username']
_domainName = args['domainName']
_dailyTime = args['dailyTime']
_blockedTime = args['blockedTime']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('incrementABlockedSite', (_username, _domainName, _dailyTime, _blockedTime))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
cursor.close()
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
cursor = conn.cursor()
cursor.callproc('getAListedSite', (_username, _domainName))
data = cursor.fetchone()
site = { 'owner': data[0],
'domainName': data[1],
'dailyTime': data[2],
'blockedTime': data[3],
'isBlocked': data[4],
'timeCap': data[5],
'idleTime': data[6] }
return site
except Exception as e:
return { 'error': str(e) }
# Ignore _isBlocked, phpMyAdmin is being dumb
# IMPORTANT: This method is atomic, if createListedSite or createSiteTimeHistory fails
# then nothing will be committed
class AddListedSite(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of listedsite')
parser.add_argument('domainName', type=str, help='Domain to block')
parser.add_argument('timeCap', type=int, help='Time before block')
args = parser.parse_args()
_username = args['username']
_domainName = args['domainName']
_timeCap = args['timeCap']
_isBlocked = 0
if _timeCap is None:
_timeCap = 0
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('createListedSite', (_username, _domainName, _isBlocked, _timeCap))
data = cursor.fetchall()
if len(data) is 0:
cursor.close()
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
cursor = conn.cursor()
cursor.callproc('createSiteTimeHistory', (_username, _domainName))
data = cursor.fetchall()
if len(data) is 0:
cursor.close()
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
cursor = conn.cursor()
cursor.callproc('createIdleTimeHistory', (_username, _domainName))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
cursor.close()
return { 'message': 'Successfully created ListedSite, SiteTimeHistory, and IdleTimeHistory' }
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
except Exception as e:
return { 'error': str(e), 'note': 'Possible error in ListedSite, SiteTimeHistory, IdleTimeHistory!'}
class EditListedSite(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of listedsite')
parser.add_argument('domainName', type=str, help='Domain to block')
parser.add_argument('isBlocked', type=int, help='Blocking or tracking')
parser.add_argument('timeCap', type=int, help='Time before block')
args = parser.parse_args()
_username = args['username']
_domainName = args['domainName']
_isBlocked = args['isBlocked']
_timeCap = args['timeCap']
if _timeCap is None:
_timeCap = 0
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('updateListedSite', (_username, _domainName, _isBlocked, _timeCap))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
cursor.close()
return { 'message': 'Successfully updated ListedSite' }
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
except Exception as e:
return { 'error': str(e) }
class DeleteListedSite(Resource):
def delete(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of listedsite')
parser.add_argument('domainName', type=str, help='Domain to block')
args = parser.parse_args()
_username = args['username']
_domainName = args['domainName']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('deleteListedSite', (_username, _domainName))
data = cursor.fetchall()
if len(data) is 0:
cursor.close()
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
cursor = conn.cursor()
cursor.callproc('deleteSiteTimeHistory', (_username, _domainName))
data = cursor.fetchall()
if len(data) is 0:
cursor.close()
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
cursor = conn.cursor()
cursor.callproc('deleteIdleTimeHistory', (_username, _domainName))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
cursor.close()
return { 'message': 'Successfully deleted ListedSite, SiteTimeHistory, IdleTimeHistory' }
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
except Exception as e:
return { 'error': str(e) }
class GetASiteTimeHistory(Resource):
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of SiteTimeHistory')
parser.add_argument('domainName', type=str, help='Domain to block')
args = parser.parse_args()
_username = args['username']
_domainName = args['domainName']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('getSiteTimeHistory', (_username, _domainName))
data = cursor.fetchone()
history = { 'owner': data[0],
'domainName': data[1],
'dailyTime_0': data[2],
'dailyTime_1': data[3],
'dailyTime_2': data[4],
'dailyTime_3': data[5],
'dailyTime_4': data[6],
'dailyTime_5': data[7],
'dailyTime_6': data[8] }
return history
except Exception as e:
return { 'error': str(e) }
class GetAnIdleTimeHistory(Resource):
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of IdleTimeHistory')
parser.add_argument('domainName', type=str, help='Domain to block')
args = parser.parse_args()
_username = args['username']
_domainName = args['domainName']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('getIdleTimeHistory', (_username, _domainName))
data = cursor.fetchone()
history = { 'owner': data[0],
'domainName': data[1],
'idleTime_0': data[2],
'idleTime_1': data[3],
'idleTime_2': data[4],
'idleTime_3': data[5],
'idleTime_4': data[6],
'idleTime_5': data[7],
'idleTime_6': data[8] }
return history
except Exception as e:
return { 'error': str(e) }
class GetListedSites(Resource):
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of ListedSite')
args = parser.parse_args()
_username = args['username']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('getListedSites', args=[_username])
data = cursor.fetchall()
site_list = []
for i in data:
site = { 'owner': i[0],
'domainName': i[1],
'dailyTime': i[2],
'blockedTime': i[3],
'isBlocked': i[4],
'timeCap': i[5],
'idleTime': i[6] }
site_list.append(site)
return site_list
except Exception as e:
return { 'error': str(e) }
class GetSiteTimeHistories(Resource):
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of SiteTimeHistory')
args = parser.parse_args()
_username = args['username']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('getSiteTimeHistories', args=[_username])
data = cursor.fetchall()
site_history_list = []
for i in data:
history = { 'owner': i[0],
'domainName': i[1],
'dailyTime_0': i[2],
'dailyTime_1': i[3],
'dailyTime_2': i[4],
'dailyTime_3': i[5],
'dailyTime_4': i[6],
'dailyTime_5': i[7],
'dailyTime_6': i[8] }
site_history_list.append(history)
return site_history_list
except Exception as e:
return { 'error': str(e) }
class GetIdleTimeHistories(Resource):
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of IdleTimeHistory')
args = parser.parse_args()
_username = args['username']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('getIdleTimeHistories', args=[_username])
data = cursor.fetchall()
site_history_list = []
for i in data:
history = { 'owner': i[0],
'domainName': i[1],
'idleTime_0': i[2],
'idleTime_1': i[3],
'idleTime_2': i[4],
'idleTime_3': i[5],
'idleTime_4': i[6],
'idleTime_5': i[7],
'idleTime_6': i[8] }
site_history_list.append(history)
return site_history_list
except Exception as e:
return { 'error': str(e) }
class GetATimeHistory(Resource):
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of IdleTimeHistory')
parser.add_argument('domainName', type=str, help='Domain to block')
args = parser.parse_args()
_username = args['username']
_domainName = args['domainName']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('getIdleTimeHistory', (_username, _domainName))
data = cursor.fetchone()
time_history = []
idle_time_history = { 'owner': data[0],
'domainName': data[1],
'idleTime_0': data[2],
'idleTime_1': data[3],
'idleTime_2': data[4],
'idleTime_3': data[5],
'idleTime_4': data[6],
'idleTime_5': data[7],
'idleTime_6': data[8] }
cursor.close()
cursor = conn.cursor()
cursor.callproc('getSiteTimeHistory', (_username, _domainName))
data = cursor.fetchone()
site_time_history = { 'owner': data[0],
'domainName': data[1],
'dailyTime_0': data[2],
'dailyTime_1': data[3],
'dailyTime_2': data[4],
'dailyTime_3': data[5],
'dailyTime_4': data[6],
'dailyTime_5': data[7],
'dailyTime_6': data[8] }
time_history.append(site_time_history)
time_history.append(idle_time_history)
return time_history
except Exception as e:
return { 'error': str(e) }
class UpdateIdleTime(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of ListedSite')
parser.add_argument('domainName', type=str, help='Site name')
parser.add_argument('idleTime', type=int, help='Time spent idle')
args = parser.parse_args()
_username = args['username']
_domainName = args['domainName']
_idleTime = args['idleTime']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('updateIdleTime', (_username, _domainName, _idleTime))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
conn.close()
return { 'message': 'Successfully updated IdleTime!' }
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
except Exception as e:
return { 'error': str(e) }
class CreateEvent(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of event')
parser.add_argument('date', type=str, help='Date of event')
parser.add_argument('description', type=str, help='Event description')
args = parser.parse_args()
_username = args['username']
_date = args['date']
_description = args['description']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('createCalendarEvent', (_username, _date, _description))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
cursor.close()
return { 'message': 'Successfully created event!', 'date': str(_date) }
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
except Exception as e:
return { 'error': str(e) }
class DeleteEvent(Resource):
def delete(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('event_id', type=str, help='Event id')
args = parser.parse_args()
_event_id = args['event_id']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('deleteCalendarEvent', args=[_event_id])
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
cursor.close()
return { 'message': 'Successfully deleted task' }
else:
return { 'statuscode': '1000', 'message': str(data[0]) }
except Exception as e:
return { 'error': str(e) }
class GetAllEvents(Resource):
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of event')
args = parser.parse_args()
_username = args['username']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('getEvents', args=[_username])
data = cursor.fetchall()
event_list = []
for i in data:
event = { 'event_id': i[0],
'date': str(i[1]),
'description': i[2] }
event_list.append(event)
return event_list
except Exception as e:
return { 'error': str(e) }
class GetSpecialEvents(Resource):
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='Owner of event')
args = parser.parse_args()
_username = args['username']
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('getSpecialEvents', args=[_username] )
data = cursor.fetchall()
event_list = []
for i in data:
event = { 'date': str(i[0]),
'description': i[1] }
event_list.append(event)
return event_list
except Exception as e:
return {'error': str(e) }
class DeskTab(Resource):
def get(self):
return {'message': 'DeskTab is up!' }
# DeskTab
api.add_resource(DeskTab, '/')
# User
api.add_resource(CreateUser, '/CreateUser')
# Tasks
api.add_resource(AddTask, '/AddTask')
api.add_resource(DeleteTask, '/DeleteTask')
api.add_resource(CheckTask, '/CheckTask')
api.add_resource(GetTask, '/GetTask')
# ListedSites
api.add_resource(GetAListedSite, '/ListedSite/GetAListedSite')
api.add_resource(IncrementAListedSite, '/ListedSite/IncrementAListedSite')
api.add_resource(IncrementABlockedSite, '/ListedSite/IncrementABlockedSite')
api.add_resource(AddListedSite, '/ListedSite/AddListedSite')
api.add_resource(EditListedSite, '/ListedSite/EditListedSite')
api.add_resource(DeleteListedSite, '/ListedSite/DeleteListedSite')
api.add_resource(GetASiteTimeHistory, '/ListedSite/GetASiteTimeHistory')
api.add_resource(GetAnIdleTimeHistory, '/ListedSite/GetAnIdleTimeHistory')
api.add_resource(GetATimeHistory, '/ListedSite/GetATimeHistory')
api.add_resource(GetListedSites, '/ListedSite/GetListedSites')
api.add_resource(GetSiteTimeHistories, '/ListedSite/GetSiteTimeHistories')
api.add_resource(GetIdleTimeHistories, '/ListedSite/GetIdleTimeHistories')
api.add_resource(UpdateIdleTime, '/ListedSite/UpdateIdleTime')
# Calendar
api.add_resource(CreateEvent, '/Calendar/CreateEvent')
api.add_resource(GetAllEvents, '/Calendar/GetAllEvents')
api.add_resource(DeleteEvent, '/Calendar/DeleteEvent')
api.add_resource(GetSpecialEvents, '/Calendar/GetSpecialEvents')
if __name__ == '__main__':
context = ('desktab_me.ca-bundle.crt', 'desktab.me.key')
app.run(debug=True, ssl_context=context)
|
{
"content_hash": "bc9287721277452b890aa3f52dcf0a61",
"timestamp": "",
"source": "github",
"line_count": 812,
"max_line_length": 112,
"avg_line_length": 34.48891625615764,
"alnum_prop": 0.5145509730405284,
"repo_name": "ktang012/CS180",
"id": "a45b7f8b853e7af79903c0c651a506d840ab40b7",
"size": "28005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2276"
},
{
"name": "HTML",
"bytes": "2990"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import json
import argparse
parser = argparse.ArgumentParser(description='Plot ev3dev datalogs.')
parser.add_argument('infile', help='the input file to be logged')
args = parser.parse_args()
# Note, this code is a modified version from these pages:
#
# http://www.randalolson.com/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/
# http://matplotlib.org/examples/pylab_examples/subplots_demo.html
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), (44, 160, 44), (152, 223, 138),
(214, 39, 40), (255, 152, 150), (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), (188, 189, 34), (219, 219, 141),
(23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
plt.style.use(['dark_background'])
test = json.loads(open(args.infile).read())
values = {}
# Extract the data from the log in a format that's useful for plotting
for k, d in test['data'].items():
values['k'] = {}
values['k']['x'] = [row[0] for row in d]
values['k']['y'] = []
for i, a in enumerate(test['meta']['ports'][k]['log_attributes']):
values['k']['y'].append({'name': a, 'values': [row[1][i] for row in d]})
f, axarr = plt.subplots(3, sharex=True)
axarr[2].set_xlabel('Time (seconds)')
f.text(.95, 0, args.infile, fontsize=10, horizontalalignment='left', verticalalignment='center')
f.text(.5,
1,
"{0} - {1}".format(test['meta']['title'], k),
fontsize=14,
horizontalalignment='center',
verticalalignment='center')
f.text(.5,
.96,
"{0}".format(test['meta']['subtitle']),
fontsize=10,
horizontalalignment='center',
verticalalignment='center')
f.text(.92,
.5,
"{0}".format(test['meta']['notes']),
fontsize=10,
horizontalalignment='left',
verticalalignment='center')
# Clean up the chartjunk
for i, ax in enumerate(axarr):
print(i, ax)
# Remove the plot frame lines. They are unnecessary chartjunk.
ax.spines["top"].set_visible(False)
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary chartjunk.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
axarr[i].plot(values['k']['x'], values['k']['y'][i]['values'], lw=1.5, color=tableau20[i])
axarr[i].text(.95,
1,
"{0}".format(values['k']['y'][i]['name']),
fontsize=14,
color=tableau20[i],
horizontalalignment='right',
verticalalignment='center',
transform=axarr[i].transAxes)
plt.savefig("{0}-{1}.png".format(args.infile, k), bbox_inches="tight")
|
{
"content_hash": "d7529a1c074514f870c36b313b41016e",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 113,
"avg_line_length": 36.13333333333333,
"alnum_prop": 0.567650676506765,
"repo_name": "rhempel/ev3dev-lang-python",
"id": "c7a3725867b611fe460fae4a5262287db47fbf57",
"size": "3252",
"binary": false,
"copies": "2",
"ref": "refs/heads/ev3dev-stretch",
"path": "tests/motor/plot_matplotlib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "296032"
}
],
"symlink_target": ""
}
|
def main(request, response):
encoding = request.GET['encoding']
tmpl = request.GET['tmpl']
sheet = tmpl % u'\\0000E5'
return [("Content-Type", "text/css; charset=%s" % encoding)], sheet.encode(encoding)
|
{
"content_hash": "dfc7aea368afe153cc74bb9936b6085a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 88,
"avg_line_length": 43.8,
"alnum_prop": 0.6438356164383562,
"repo_name": "youtube/cobalt",
"id": "d7c9bce2786b44cbd3c4fa755ad60191032188d1",
"size": "219",
"binary": false,
"copies": "243",
"ref": "refs/heads/master",
"path": "third_party/web_platform_tests/html/infrastructure/urls/resolving-urls/query-encoding/resources/css-tmpl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import abc
class ServicePluginBase(object):
""" defines base interface for any Advanced Service plugin """
__metaclass__ = abc.ABCMeta
supported_extension_aliases = []
@abc.abstractmethod
def get_plugin_type(self):
""" returns one of predefine service types. see
quantum/plugins/common/constants.py """
pass
@abc.abstractmethod
def get_plugin_description(self):
""" returns string description of the plugin """
pass
|
{
"content_hash": "062a671b827b60de02d2ff765c2030dd",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 66,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.6483739837398373,
"repo_name": "aristanetworks/arista-ovs-quantum",
"id": "dfa074d4ef2cbe0b857b893166763634ad7a416c",
"size": "1167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/plugins/services/service_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "2568389"
},
{
"name": "Scala",
"bytes": "4525"
},
{
"name": "Shell",
"bytes": "7843"
},
{
"name": "XML",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
import numpy as np
import cv2
import logging
import urllib.request
import base64
from threading import Thread, Event
logger = logging.getLogger('universe')
class IPCamera:
def __init__(self, url):
self.stream = urllib.request.urlopen(url)
self.b = bytearray()
def read(self):
max = 1000
while max > 0:
self.b.extend(self.stream.read(1024))
x = self.b.find(b'\xff\xd8')
y = self.b.find(b'\xff\xd9')
max -= 1
if x != -1 and y != -1:
jpg = self.b[x:y+2]
self.b = self.b[y+2:]
jpg = np.asarray(jpg, dtype=np.uint8)
i = cv2.imdecode(jpg, cv2.IMREAD_UNCHANGED)
return True, i
class Camera:
def __init__(self, source, fx, fy, size=(640, 480)):
self.source = source
self.fx = fx
self.fy = fy
self.width, self.height = size
class Eye:
def __init__(self, camera):
self.frame = None
source = camera.source
self.cap = cv2.VideoCapture(source)
if not self.cap.isOpened():
raise ConnectionError('Unable to connect to video source "{}".'.format(source))
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera.width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera.height)
self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
if self.width != camera.width or self.height != camera.height:
raise ConnectionError('Unable to open video source at {:d} x {:d}.'
.format(source, camera.width, camera.height))
# Threading.
self.thread = Thread(target=self.run)
self.event = Event()
# Start.
self.thread.start()
def run(self):
while not self.event.is_set():
_, self.frame = self.cap.read()
def close(self):
"""
Shuts down the thread.
"""
self.event.set()
self.thread.join()
self.cap.release()
def get_base64(self):
"""
Encode the current frame into base64.
:return: The data in base64, ready for display in browser.
"""
encode_param = (cv2.IMWRITE_JPEG_QUALITY, 90)
cnt = cv2.imencode('.jpg', self.frame, encode_param)[1]
data = 'data:image/jpeg;base64,' + base64.encodebytes(cnt.flatten()).decode()
return data
def get_gray_frame(self):
"""
Get a gray frame.
:return: Frame.
"""
if self.frame is not None:
return cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
else:
return None
def get_color_frame(self):
"""
Get a color frame.
:return: Frame.
"""
if self.frame is not None:
return self.frame.copy()
else:
return None
def get_flipped_frame(self):
"""
Get a flipped frame.
:return: Frame
"""
if self.frame is not None:
return cv2.flip(self.frame, flipCode=-1)
else:
return None
def get_both_frames(self):
"""
Get bot the gray and colored frame.
Guaranteed to be the same frame.
:return: Color, Gray.
"""
if self.frame is not None:
frame = self.frame.copy()
return frame, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
else:
return None
|
{
"content_hash": "600c08c0f32edff03f662257980acf8c",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 91,
"avg_line_length": 25.51798561151079,
"alnum_prop": 0.5351000845785171,
"repo_name": "bobbyluig/Eclipse",
"id": "38621aabd687e7436a1c1c6db81d3b57821c0206",
"size": "3547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/theia/eye.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1401"
},
{
"name": "C",
"bytes": "18849"
},
{
"name": "C++",
"bytes": "1175923"
},
{
"name": "CMake",
"bytes": "58991"
},
{
"name": "CSS",
"bytes": "592222"
},
{
"name": "Forth",
"bytes": "329"
},
{
"name": "HTML",
"bytes": "48266"
},
{
"name": "JavaScript",
"bytes": "790348"
},
{
"name": "Jupyter Notebook",
"bytes": "84755"
},
{
"name": "Python",
"bytes": "412895"
},
{
"name": "Shell",
"bytes": "6860"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0009_abstractnode_preprint_file'),
('osf', '0010_auto_20161025_1332'),
]
operations = [
]
|
{
"content_hash": "e3730d47d7b44f14a9dcadca116d7970",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 51,
"avg_line_length": 18.928571428571427,
"alnum_prop": 0.6339622641509434,
"repo_name": "acshi/osf.io",
"id": "b762d936a650ee668ecd6bba06044b375d40e2a2",
"size": "335",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "osf/migrations/0011_merge.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "176516"
},
{
"name": "HTML",
"bytes": "181969"
},
{
"name": "JavaScript",
"bytes": "2017102"
},
{
"name": "Mako",
"bytes": "756427"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "8555915"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
}
|
import collections
import re
from oslo_log import log as logging
from tempest.lib.common.utils import data_utils
from tempest import config
from tempest.scenario import manager
from tempest import test
from tempest.lib import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple',
['floating_ip', 'server'])
ICMP_HEADER_LEN = 8
class TestNetworkMultiNode(manager.NetworkScenarioTest):
"""
The Neutron ML2 driver will create a VLAN, when configured for VLAN, on the
underlying network element when the first virtual machine (VM) is attached
to a network/compute host. Conversely, the ML2 driver will delete
the VLAN associated with a network/compute host when the last VM is
removed from that network/compute host.
This test is designed for a multi-node OpenStack deployment
with the goal of creating the maximum number of network create and
delete events given the available resources.
The test does the following:
* Creates Aggregates/Zones with a one to one mapping of compute host
to zone. This allows the test to place a VM on a particular compute
host.
* Create networks based on the number of VMs and compute hosts.
* Create VMs and distribute them on networks.
* Selects one of the VMs as a ping source to send ping packets to each
of the other (east/west) VMs using both the floating IP and the fixed
IP address. The number of ping packets and the size of the ping
packets are controlled by the following tempest
configuration variables:
- test_packet_count: The number of packets to send for each packet size
- test_packet_sizes: A list of packet sizes used during testing
The packet and byte counts are verified for each ping test sequence.
* The VMs are then deleted, not as part of test cleanup but to allow the
network delete events from the ML2 driver to be captured and verified.
"""
credentials = ['primary', 'admin']
@classmethod
def resource_setup(cls):
# Create no network resources for these tests.
cls.set_network_resources()
super(TestNetworkMultiNode, cls).resource_setup()
@classmethod
def skip_checks(cls):
super(TestNetworkMultiNode, cls).skip_checks()
if not (CONF.network.project_networks_reachable
or CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise exceptions.InvalidConfiguration(msg)
for ext in ['router', 'security-group']:
if not test.is_extension_enabled(ext, 'network'):
msg = "%s extension not enabled." % ext
raise exceptions.InvalidConfiguration(msg)
@classmethod
def setup_credentials(cls):
# Create no network resources for these tests.
cls.set_network_resources()
super(TestNetworkMultiNode, cls).setup_credentials()
# Use admin client by default
cls.manager = cls.admin_manager
def _delete_aggregate(self, aggregate):
self.aggregates_client.delete_aggregate(aggregate_id=aggregate['id'])
def _add_host(self, aggregate_id, host):
host_args = {
'host': host
}
aggregate_resp = self.aggregates_client.add_host(
aggregate_id=aggregate_id, **host_args)
aggregate = aggregate_resp['aggregate']
self.addCleanup(self._remove_host, aggregate['id'], host)
self.assertIn(host, aggregate['hosts'])
def _remove_host(self, aggregate_id, host):
host_args = {
'host': host
}
aggregate_resp = self.aggregates_client.remove_host(
aggregate_id=aggregate_id, **host_args)
aggregate = aggregate_resp['aggregate']
self.assertNotIn(host, aggregate['hosts'])
def _create_server(self, name, network, zone=None, image=None):
create_kwargs = self.srv_kwargs
create_kwargs['networks'] = [{'uuid': network.id}]
if zone is not None:
create_kwargs['availability_zone'] = zone
server = self.create_server(name=name, wait_until='ACTIVE', image=image, **create_kwargs)
return dict(server=server, keypair=self.keypair)
def setup_aggregates(self):
"""
Setup Aggregates/Zones - one compute host per zone so that the test
can control which compute host the VMs land on.
"""
self.aggregates_client = self.manager.aggregates_client
self.hypervisor_client = self.manager.hypervisor_client
hypervisors_resp = self.hypervisor_client.list_hypervisors()
self.hypervisors_list = hypervisors_resp['hypervisors']
# Verify the hypervisors are operational and make a list
# of them for later use
self.hypervisors = []
self.aggregates = []
i = 0
for hypervisor in self.hypervisors_list:
if hypervisor['status'] == 'enabled':
if hypervisor['state'] == 'up':
self.hypervisors.append(hypervisor)
# Create an aggregate/zone per hypervisor host
name = data_utils.rand_name('Agg')
aggregate_kwargs = {
'name': '{0}'.format(name),
'availability_zone': '{0}-Zone{1}'.format(name, i)
}
i += 1
aggregate_resp = self.aggregates_client.create_aggregate(
**aggregate_kwargs)
aggregate = aggregate_resp['aggregate']
self.addCleanup(self._delete_aggregate, aggregate)
self.aggregates.append(aggregate)
self._add_host(aggregate['id'],
hypervisor['hypervisor_hostname'])
def setUp(self):
super(TestNetworkMultiNode, self).setUp()
self.keypair = self.create_keypair()
self.floating_ip_tuples = []
self.linux_client = None
self.private_key = None
self.servers = {}
self.srv_kwargs = {'key_name': self.keypair['name']}
self.tenant_id = self.manager.identity_client.tenant_id
self.total_expected_pkts = 0
self.total_expected_bytes = 0
self.segmentation_ids = []
self.number_instances_per_compute = 1
self.number_routers_per_tenant = 1
self.network_vms = {}
self.routers = []
# Classes that inherit this class can redefine packet size/count
# based on their own needs or accept the default in the CONF
if not hasattr(self, 'test_packet_sizes'):
self.test_packet_sizes = map(int, CONF.scenario.test_packet_sizes)
if not hasattr(self, 'test_packet_count'):
self.test_packet_count = CONF.scenario.test_packet_count
if not hasattr(self, 'max_instances_per_tenant'):
self.max_instances_per_tenant = (
CONF.scenario.max_instances_per_tenant)
# Allows the ability to place VMs on specific compute nodes
self.setup_aggregates()
self.num_networks = int(self.max_instances_per_tenant /
len(self.hypervisors))
# If user specified max_instances_per_tenant less than
# number of hypervisors availabe then result is zero
# give at least one.
if self.num_networks == 0:
self.num_networks = 1
LOG.debug("Max instances per tenant = {0}".
format(self.max_instances_per_tenant))
LOG.debug("Number of instances per Network/compute = {0}".
format(self.number_instances_per_compute))
LOG.debug("Number of Networks = {0}".format(self.num_networks))
self.security_group = self._create_security_group(
tenant_id=self.tenant_id)
my_security_groups = [{'name': self.security_group['name']}]
self.srv_kwargs['security_groups'] = my_security_groups
try:
self._create_loginable_secgroup_rule(secgroup=self.security_group)
except Exception as e:
LOG.debug("Login sec group already exists: {0}".format(e))
self.setup_networks()
self.setup_vms()
def add_network(self, client=None, tenant_id=None, router=None,
vlan_transparent=False):
if CONF.baremetal.driver_enabled:
network = self._get_network_by_name(
CONF.compute.fixed_network_name)
router = None
subnet = None
else:
if CONF.network_feature_enabled.vlan_transparent:
network = self._create_network(client=client,
tenant_id=tenant_id,
vlan_transparent=True)
else:
network = self._create_network(client=client,
tenant_id=tenant_id)
if router is None:
router = self._get_router(client=client, tenant_id=tenant_id)
subnet = self._create_subnet(network=network, client=client)
subnet.add_to_router(router.id)
return network, subnet, router
def setup_networks(self):
self.networks = []
router = None
for i in range(0, self.num_networks):
if i % (self.num_networks / self.number_routers_per_tenant) is 0:
if router is not None:
self.routers.append(router)
router = None
self.network, self.subnet, router = self.add_network(
tenant_id=self.tenant_id, router=router)
if len(self.routers) == 0:
self.routers.append(router)
self.networks.append(self.network)
segmentation_id = self.network['provider:segmentation_id']
self.segmentation_ids.append(segmentation_id)
def setup_vms(self, image=None):
# Create a VM on a each hypervisor per network
for network in self.networks:
for aggregate in self.aggregates:
name = data_utils.rand_name('server')
for i in range(0, 2):
try:
if CONF.scenario.use_host_aggregates and \
CONF.scenario.use_host_aggregates is True:
server_dict = \
self._create_server(name,
network,
zone=aggregate['availability_zone'],
image=image)
else:
server_dict = self._create_server(name, network, image=image)
except Exception as e:
LOG.debug("Exception {0}".format(e))
LOG.debug("Failed to bring up server")
LOG.debug("Retrying")
continue
break
id = server_dict['server']['id']
self.assertIsNotNone(server_dict)
self.servers[id] = server_dict['keypair']
if network.id in self.network_vms:
self.network_vms[network.id].append(id)
else:
self.network_vms[network.id] = [id]
# Safety net for max_instances_per_tenant
if len(self.servers) == self.max_instances_per_tenant:
return
def delete_vms(self):
"""
This method is not designed for clean up at the end of the test. Some
tests will need to verify that network delete events occur when the VMs
are deleted.
:return:
"""
for server in self.servers.keys():
LOG.debug("Deleting server {0}".format(server))
self.servers_client.delete_server(server)
del self.servers[server]
def verify_network_create_events(self):
"""
Implement in network element specific test class
"""
pass
def verify_network_delete_events(self):
"""
Implement in network element specific test class
"""
pass
def verify_network_element_ready(self):
"""
Implement in network element specific test class
"""
pass
def verify_network_element_traffic_flows(self):
"""
Implement in network element specific test class
"""
pass
def _ping_east_west(self, linux_client, target_ip,
count=CONF.validation.ping_count,
size=CONF.validation.ping_size):
"""
From a remote linux host ping an IP address and return a
data structure containing the results.
:param linux_client: A remote_client object
:param target_ip: The IP Address to ping from the remote client
:param count: How many pings
:param size: The packet size for each ping
:return: A dictionary with received pkts/byts, summary, round-trip data
"""
ping_data = {}
bytes_rx = 0
pkts_rx = 0
# RegEx for data mining the ping results.
pings = re.compile(r"""
^(\d+)\sbytes\sfrom\s # Store num bytes
([\d\.]+):\s # Store the IP address
(icmp_)?seq=(\d+)\s # Account for Cirros diff and
# store seq num
ttl=(\d+)\s # Store ttl
time=([\d\.]+)\sms # Store time
""", re.VERBOSE | re.IGNORECASE)
pings_summary = re.compile(r"""
^(\d+) # Store num transmitted
\spackets\stransmitted,\s # Common to all
(\d+)\s # Store num received
(packets[ ])?received,\s # Cirros is different
(\d+)[%]\spacket\sloss # Store pkt loss
([, ]+time[ ](\d+)ms)? # Cirros is different
""", re.VERBOSE | re.IGNORECASE)
round_trip = re.compile(r"""
^(rtt|round-trip)\s
min/avg/max(/mdev)?\s=\s
([\d\.]+)[/] # Store min time
([\d\.]+)[/] # Store avg time
([\d\.]+) # Store max time
.*""", re.VERBOSE | re.IGNORECASE)
ping_result = None
for x in range(0, 3):
try:
if CONF.scenario.advanced_vm_capabilities:
ping_result = linux_client.ping_host(
target_ip,
count=count,
size=size,
interval=.2).splitlines()
else:
ping_result = linux_client.ping_host(
target_ip,
count=count,
size=size).splitlines()
break
except exceptions.SSHExecCommandFailed:
LOG.debug("SSHExecCommandFailed - retrying")
except Exception:
LOG.debug("Catch all - retrying")
self.assertIsNotNone(ping_result,
"SSHExecCommandFailed - ping failed")
if ping_result is not None and len(ping_result) >= count:
for line in ping_result:
m = pings.match(line)
if m is not None:
bytes_rx += int(m.group(1))
pkts_rx += 1
continue
m = pings_summary.match(line)
if m is not None:
ping_data['summary'] = {'pkts_tx': int(m.group(1)),
'pkts_rx': int(m.group(2)),
'loss': int(m.group(4))}
continue
m = round_trip.match(line)
if m is not None:
ping_data['round-trip'] = {'min': float(m.group(3)),
'ave': float(m.group(4)),
'max': float(m.group(5))}
continue
ping_data['data-received'] = {'packets': pkts_rx, 'bytes': bytes_rx}
return ping_data
def setup_linux_client(self):
fip_tuple = self.floating_ip_tuples[0]
self.linux_client_ip, server = fip_tuple
self.private_key = self.servers[server['id']]['private_key']
self.linux_client = self.get_remote_client(
ip_address=self.linux_client_ip.
floating_ip_address,
private_key=self.private_key)
super(TestNetworkMultiNode, self).check_vm_connectivity(
self.linux_client_ip.floating_ip_address,
username=CONF.validation.image_ssh_user,
private_key=self.private_key,
should_connect=True)
def ping_target_ip(self,
linux_client,
source_ip,
target_ip,
pkt_size=CONF.validation.ping_size):
LOG.debug("Ping from {0} to {1}".format(source_ip, target_ip))
LOG.debug("Testing with packet size {0}".format(pkt_size))
ping_result = self._ping_east_west(linux_client,
target_ip,
count=self.test_packet_count,
size=pkt_size)
self.assertIsNotNone(ping_result,
"Ping from {0} to {1} failed".
format(source_ip, target_ip))
msg = "Ping result indicates packet loss from {0} to {1}".format(
source_ip, target_ip)
self.assertEqual(0, ping_result['summary']['loss'], msg)
# Calculate expected pkts/bytes
self.total_expected_pkts += self.test_packet_count
self.total_expected_bytes += self.test_packet_count * (pkt_size +
ICMP_HEADER_LEN)
# Store actual pkts/bytes used later for test
self.total_actual_pkts += int(ping_result['data-received']['packets'])
self.total_actual_bytes += int(ping_result['data-received']['bytes'])
def verify_vm_to_vm_connectivity(self):
"""
Selects one of the VMs created and uses it as a ping source to
ping all other VMs.
:return:
"""
self.assertTrue(len(self.servers) >= 2,
"Not enough servers to check VM to VM connectivity")
self.total_actual_pkts = 0
self.total_actual_bytes = 0
self.total_expected_pkts = 0
self.total_expected_bytes = 0
if self.linux_client is None:
self.setup_linux_client()
# Cycle through the VMs pinging each one from the testing VM
# First use floating IPs and fixed IPs
if self.floating_ip_tuples is not None:
for i in range(1, len(self.floating_ip_tuples)):
fip_tuple = self.floating_ip_tuples[i]
target_ip, server = fip_tuple
for pkt_size in self.test_packet_sizes:
self.ping_target_ip(self.linux_client,
self.linux_client_ip.
floating_ip_address,
target_ip.floating_ip_address,
pkt_size)
self.ping_target_ip(self.linux_client,
self.linux_client_ip.
floating_ip_address,
target_ip.fixed_ip_address,
pkt_size)
LOG.debug("Received {0} Packets "
"containing {1} bytes".format(self.total_actual_pkts,
self.total_actual_bytes))
LOG.debug("Expected {0} Packets "
"containing {1} bytes".format(self.total_expected_pkts,
self.total_expected_bytes))
self.assertEqual(self.total_expected_pkts,
self.total_actual_pkts,
"Total packets received failed")
self.assertEqual(self.total_expected_bytes,
self.total_actual_bytes,
"Total bytes received failed")
def create_floating_ips(self):
for server_id in self.servers.keys():
server = {'id': server_id, 'tenant_id': self.tenant_id}
floating_ip = self.create_floating_ip(server)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
self.floating_ip_tuples.append(self.floating_ip_tuple)
def delete_floating_ips(self):
if self.floating_ip_tuples is not None:
for i in range(0, len(self.floating_ip_tuples)):
fip_tuple = self.floating_ip_tuples.pop()
floating_ip, server = fip_tuple
self._disassociate_floating_ip(floating_ip)
def verify_vm_connectivity(self):
if self.floating_ip_tuples is not None:
for i in range(1, len(self.floating_ip_tuples)):
fip_tuple = self.floating_ip_tuples[i]
target_ip, server = fip_tuple
msg = "Timeout waiting for %s" % target_ip.floating_ip_address
self.assertTrue(self.
ping_ip_address(target_ip.floating_ip_address,
should_succeed=True),
msg=msg)
@test.idempotent_id('094f246d-9800-4c79-b249-361dab5d5a0f')
@test.services('compute', 'network')
def test_network_multi_node(self):
self.verify_network_create_events()
self.create_floating_ips()
self.verify_vm_connectivity()
self.verify_network_element_ready()
self.verify_vm_to_vm_connectivity()
self.verify_network_element_traffic_flows()
self.delete_vms()
self.verify_network_delete_events()
|
{
"content_hash": "28dcd3395b79616020014959f5096e33",
"timestamp": "",
"source": "github",
"line_count": 535,
"max_line_length": 97,
"avg_line_length": 42.65607476635514,
"alnum_prop": 0.5341133166820035,
"repo_name": "cisco-openstack/tempest",
"id": "b3d4910b4040590bece0fa7871371c3d6f78568f",
"size": "23449",
"binary": false,
"copies": "1",
"ref": "refs/heads/proposed",
"path": "tempest/scenario/test_network_multi_node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4431271"
},
{
"name": "Shell",
"bytes": "7435"
}
],
"symlink_target": ""
}
|
from docutils import nodes
from docutils.parsers.rst import Directive, directives
CODE = ("""<iframe width="{width}" height="{height}"
scrolling="no" frameborder="no"
src="https://w.soundcloud.com/player/?url=http://api.soundcloud.com/tracks/"""
"""{sid}">
</iframe>""")
class SoundCloud(Directive):
""" Restructured text extension for inserting SoundCloud embedded music
Usage:
.. soundcloud:: <sound id>
:height: 400
:width: 600
"""
has_content = True
required_arguments = 1
option_spec = {
'width': directives.positive_int,
'height': directives.positive_int,
}
def run(self):
""" Required by the Directive interface. Create docutils nodes """
self.check_content()
options = {
'sid': self.arguments[0],
'width': 600,
'height': 160,
}
options.update(self.options)
return [nodes.raw('', CODE.format(**options), format='html')]
def check_content(self):
""" Emit a deprecation warning if there is content """
if self.content:
raise self.warning("This directive does not accept content. The "
"'key=value' format for options is deprecated, "
"use ':key: value' instead")
directives.register_directive('soundcloud', SoundCloud)
|
{
"content_hash": "ab91ee59c77b1dd640cc357bd9e9c8b0",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 79,
"avg_line_length": 29.74468085106383,
"alnum_prop": 0.5836909871244635,
"repo_name": "servalproject/nikola",
"id": "6bdd4d51749fbdd92245f087d99a4422cdbbb47a",
"size": "1415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/plugins/compile_rest/soundcloud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "156379"
},
{
"name": "Python",
"bytes": "429762"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
}
|
from tempest.services.volume.json.admin import volume_quotas_client
class VolumeQuotasV2Client(volume_quotas_client.BaseVolumeQuotasClientJSON):
"""
Client class to send CRUD Volume V2 API requests to a Cinder endpoint
"""
def __init__(self, auth_provider):
super(VolumeQuotasV2Client, self).__init__(auth_provider)
self.api_version = "v2"
|
{
"content_hash": "df2db543e6c469cdc0d5587c39f90c5f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 76,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.7127659574468085,
"repo_name": "Lilywei123/tempest",
"id": "64f4f338abd1d1426e2ac1cf59f05bd5dbdbc678",
"size": "1012",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tempest/services/volume/v2/json/admin/volume_quotas_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2780467"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.