text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import os
ignoredContent = [
"is not safe, destructor throws exception",
# "::initialise' can be const.", # ignore, because some are not const and we keep all non-const for similarity
# "::shutdown' can be const.", # ignore, because some are not const and we keep all non-const for similarity
"(style) The function '", # "Function is never used". False positive for library project
"MyGUI_UString", # warnings from UString, gnore because it wasn't written by the MyGUI developers
#"pugixml", # warnings from pugixml, ignore because it wasn't written by the MyGUI developers
#"sigslot", # warnings from sigslot, ignore because it wasn't written by the MyGUI developers
# "Technically the member function 'input::", # useless, because with other input system this function could be non-const
# "Cppcheck cannot find all the include files",
"is assigned in constructor body. Consider performing initialization in initialization list.", # sometimes it makes sense, when non-trivial logic involved
"(performance) Prefer prefix ++/-- operators for non-primitive types", # fine for us - leave this optimisation to compiler
# "(error) Reference to temporary returned." # only false positives (revert later)
#"does not have a copy constructor which is recommended since the class contains a pointer to allocated memory." # ignore, not important
]
def isIgnoredWarning(warning):
for ignore in ignoredContent:
if warning.find(ignore) != -1:
return True
return False
def parseOutput():
file = open("temp.cppcheck", 'r')
line = file.readline()
while line != "":
line = line[0:len(line)-1]
if (not isIgnoredWarning(line)):
print line
line = file.readline()
file.close ()
def checkFolderSources(folder, flags) :
os.system("cppcheck --enable=all -I Scripts/cppcheck -I MyGUIEngine/include -D MYGUI_COMPILER=2 " + flags + " " + folder + " 2>temp.cppcheck")
parseOutput()
checkFolderSources('MyGUIEngine', '')
checkFolderSources('Demos', '-I Common -I Common/Base/Ogre -I Common/Input/OIS -I Platforms/Ogre/OgrePlatform/include -I Plugins/Plugin_BerkeliumWidget')
checkFolderSources('Tools', '-I Common -I Common/Base/Ogre -I Common/Input/OIS -I Platforms/Ogre/OgrePlatform/include -I Tools/EditorFramework')
checkFolderSources('UnitTests/UnitTest_*', '-I Common -I Common/Base/Ogre -I Common/Input/OIS -I Platforms/Ogre/OgrePlatform/include')
checkFolderSources('Common', '-I Common -I Common/Input/OIS -I Platforms/Ogre/OgrePlatform/include')
checkFolderSources('Platforms/OpenGL/OpenGLPlatform/src', '-I Platforms/OpenGL/OpenGLPlatform/include -I Common')
checkFolderSources('Platforms/Ogre/OgrePlatform/src', '-I Platforms/Ogre/OgrePlatform/include')
checkFolderSources('Platforms/DirectX/DirectXPlatform/src', '-I Platforms/DirectX/DirectXPlatform/include -I Common')
checkFolderSources('Platforms/DirectX11/DirectX11Platform/src', '-I Platforms/DirectX11/DirectX11Platform/include -I Common')
checkFolderSources('Platforms/OpenGL3/OpenGL3Platform/src', '-I Platforms/OpenGL3/OpenGL3Platform/include -I Common')
checkFolderSources('Plugins', '')
checkFolderSources('Wrappers/MyGUI_Export', ' -I Common')
|
{
"content_hash": "832852ae1987d92f5b81da1bde108b79",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 155,
"avg_line_length": 57.98148148148148,
"alnum_prop": 0.7623762376237624,
"repo_name": "fmwviormv/mygui",
"id": "28b26384d36b0441764242360786c5711f4b004e",
"size": "3202",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Scripts/cppcheck/cppcheck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "768"
},
{
"name": "C",
"bytes": "3738716"
},
{
"name": "C#",
"bytes": "454399"
},
{
"name": "C++",
"bytes": "4634942"
},
{
"name": "CMake",
"bytes": "113360"
},
{
"name": "Python",
"bytes": "18418"
},
{
"name": "Tcl",
"bytes": "28555"
}
],
"symlink_target": ""
}
|
from unittest.mock import patch
import certifi
import pytest
from pyproj.network import set_ca_bundle_path
@patch.dict("os.environ", {}, clear=True)
@patch("pyproj.network._set_ca_bundle_path")
def test_ca_bundle_path__default(c_set_ca_bundle_path_mock):
set_ca_bundle_path()
c_set_ca_bundle_path_mock.assert_called_with(certifi.where())
@pytest.mark.parametrize(
"env_var", ["PROJ_CURL_CA_BUNDLE", "CURL_CA_BUNDLE", "SSL_CERT_FILE"]
)
@patch("pyproj.network._set_ca_bundle_path")
def test_ca_bundle_path__always_certifi(c_set_ca_bundle_path_mock, env_var):
with patch.dict("os.environ", {env_var: "/tmp/dummy/path/cacert.pem"}, clear=True):
set_ca_bundle_path(True)
c_set_ca_bundle_path_mock.assert_called_with(certifi.where())
@patch.dict("os.environ", {}, clear=True)
@patch("pyproj.network._set_ca_bundle_path")
def test_ca_bundle_path__skip(c_set_ca_bundle_path_mock):
set_ca_bundle_path(False)
c_set_ca_bundle_path_mock.assert_called_with("")
@pytest.mark.parametrize(
"env_var", ["PROJ_CURL_CA_BUNDLE", "CURL_CA_BUNDLE", "SSL_CERT_FILE"]
)
@patch("pyproj.network._set_ca_bundle_path")
def test_ca_bundle_path__env_var_skip(c_set_ca_bundle_path_mock, env_var):
with patch.dict("os.environ", {env_var: "/tmp/dummy/path/cacert.pem"}, clear=True):
set_ca_bundle_path()
c_set_ca_bundle_path_mock.assert_called_with("")
@pytest.mark.parametrize(
"env_var", ["PROJ_CURL_CA_BUNDLE", "CURL_CA_BUNDLE", "SSL_CERT_FILE"]
)
@patch("pyproj.network._set_ca_bundle_path")
def test_ca_bundle_path__custom_path(c_set_ca_bundle_path_mock, env_var):
with patch.dict("os.environ", {env_var: "/tmp/dummy/path/cacert.pem"}, clear=True):
set_ca_bundle_path("/my/path/to/cacert.pem")
c_set_ca_bundle_path_mock.assert_called_with("/my/path/to/cacert.pem")
|
{
"content_hash": "ba6892daa087aee2e61b5391b6f7ba13",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 87,
"avg_line_length": 36.52,
"alnum_prop": 0.6900328587075575,
"repo_name": "pyproj4/pyproj",
"id": "3c559c9a9ee1cfc9148121da981d29c8c24baf93",
"size": "1826",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "test/test_network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "194750"
},
{
"name": "Makefile",
"bytes": "2671"
},
{
"name": "Python",
"bytes": "651842"
},
{
"name": "Shell",
"bytes": "10347"
}
],
"symlink_target": ""
}
|
import unittest
import os
from conans.paths import CONANFILE, CONAN_MANIFEST
from conans.util.files import save, load
from conans.model.ref import ConanFileReference
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.model.manifest import FileTreeManifest
from conans.test.tools import TestClient
class ExportSettingsTest(unittest.TestCase):
def test_basic(self):
client = TestClient()
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "1.2"
settings = {"os": ["Linux"]}
"""
files = {CONANFILE: conanfile}
client.save(files)
client.run("export lasote/stable")
self.assertIn("WARN: Conanfile doesn't have a 'license'", client.user_io.out)
client.run("install Hello/1.2@lasote/stable -s os=Windows", ignore_error=True)
self.assertIn("'Windows' is not a valid 'settings.os' value", client.user_io.out)
self.assertIn("Possible values are ['Linux']", client.user_io.out)
class ExportTest(unittest.TestCase):
def setUp(self):
self.conan = TestClient()
self.files = cpp_hello_conan_files("Hello0", "0.1")
self.conan_ref = ConanFileReference("Hello0", "0.1", "lasote", "stable")
self.conan.save(self.files)
self.conan.run("export lasote/stable")
def test_basic(self):
""" simple registration of a new conans
"""
reg_path = self.conan.paths.export(self.conan_ref)
manif = FileTreeManifest.loads(load(self.conan.paths.digestfile_conanfile(self.conan_ref)))
self.assertIn('%s: A new conanfile.py version was exported' % str(self.conan_ref),
self.conan.user_io.out)
self.assertIn('%s: Folder: %s' % (str(self.conan_ref), reg_path), self.conan.user_io.out)
self.assertTrue(os.path.exists(reg_path))
for name in list(self.files.keys()):
self.assertTrue(os.path.exists(os.path.join(reg_path, name)))
expected_sums = {'hello.cpp': '4f005274b2fdb25e6113b69774dac184',
'main.cpp': '0479f3c223c9a656a718f3148e044124',
'CMakeLists.txt': 'bc3405da4bb0b51a3b9f05aca71e58c8',
'conanfile.py': '5632cf850a7161388ab24f42b9bdb3fd',
'executable': '68b329da9893e34099c7d8ad5cb9c940',
'helloHello0.h': '9448df034392fc8781a47dd03ae71bdd'}
self.assertEqual(expected_sums, manif.file_sums)
def test_case_sensitive(self):
self.files = cpp_hello_conan_files("hello0", "0.1")
self.conan_ref = ConanFileReference("hello0", "0.1", "lasote", "stable")
self.conan.save(self.files)
error = self.conan.run("export lasote/stable", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: Cannot export package with same name but different case",
self.conan.user_io.out)
def test_export_filter(self):
content = """
from conans import ConanFile
class OpenSSLConan(ConanFile):
name = "openssl"
version = "2.0.1"
"""
save(os.path.join(self.conan.current_folder, CONANFILE), content)
self.conan.run("export lasote/stable")
reg_path = self.conan.paths.export(ConanFileReference.loads('openssl/2.0.1@lasote/stable'))
self.assertEqual(sorted(os.listdir(reg_path)), [CONANFILE, CONAN_MANIFEST])
content = """
from conans import ConanFile
class OpenSSLConan(ConanFile):
name = "openssl"
version = "2.0.1"
exports = ('*.txt', '*.h')
"""
save(os.path.join(self.conan.current_folder, CONANFILE), content)
self.conan.run("export lasote/stable")
reg_path = self.conan.paths.export(ConanFileReference.loads('openssl/2.0.1@lasote/stable'))
self.assertEqual(sorted(os.listdir(reg_path)),
['CMakeLists.txt', CONANFILE, CONAN_MANIFEST, 'helloHello0.h'])
# Now exports being a list instead a tuple
content = """
from conans import ConanFile
class OpenSSLConan(ConanFile):
name = "openssl"
version = "2.0.1"
exports = ['*.txt', '*.h']
"""
save(os.path.join(self.conan.current_folder, CONANFILE), content)
self.conan.run("export lasote/stable")
reg_path = self.conan.paths.export(ConanFileReference.loads('openssl/2.0.1@lasote/stable'))
self.assertEqual(sorted(os.listdir(reg_path)),
['CMakeLists.txt', CONANFILE, CONAN_MANIFEST, 'helloHello0.h'])
def test_export_the_same_code(self):
file_list = self._create_packages_and_builds()
# Export the same conans
conan2 = TestClient(self.conan.base_folder)
files2 = cpp_hello_conan_files("Hello0", "0.1")
conan2.save(files2)
conan2.run("export lasote/stable")
reg_path2 = conan2.paths.export(self.conan_ref)
digest2 = FileTreeManifest.loads(load(conan2.paths.digestfile_conanfile(self.conan_ref)))
self.assertNotIn('A new Conan version was exported', conan2.user_io.out)
self.assertNotIn('Cleaning the old builds ...', conan2.user_io.out)
self.assertNotIn('Cleaning the old packs ...', conan2.user_io.out)
self.assertNotIn('All the previous packs were cleaned', conan2.user_io.out)
self.assertIn('%s: A new conanfile.py version was exported' % str(self.conan_ref),
self.conan.user_io.out)
self.assertIn('%s: Folder: %s' % (str(self.conan_ref), reg_path2), self.conan.user_io.out)
self.assertTrue(os.path.exists(reg_path2))
for name in list(files2.keys()):
self.assertTrue(os.path.exists(os.path.join(reg_path2, name)))
expected_sums = {'hello.cpp': '4f005274b2fdb25e6113b69774dac184',
'main.cpp': '0479f3c223c9a656a718f3148e044124',
'CMakeLists.txt': 'bc3405da4bb0b51a3b9f05aca71e58c8',
'conanfile.py': '5632cf850a7161388ab24f42b9bdb3fd',
'executable': '68b329da9893e34099c7d8ad5cb9c940',
'helloHello0.h': '9448df034392fc8781a47dd03ae71bdd'}
self.assertEqual(expected_sums, digest2.file_sums)
for f in file_list:
self.assertTrue(os.path.exists(f))
def test_export_a_new_version(self):
self._create_packages_and_builds()
# Export an update of the same conans
conan2 = TestClient(self.conan.base_folder)
files2 = cpp_hello_conan_files("Hello0", "0.1")
files2[CONANFILE] = "# insert comment\n %s" % files2[CONANFILE]
conan2.save(files2)
conan2.run("export lasote/stable")
reg_path3 = conan2.paths.export(self.conan_ref)
digest3 = FileTreeManifest.loads(load(conan2.paths.digestfile_conanfile(self.conan_ref)))
self.assertIn('%s: A new conanfile.py version was exported' % str(self.conan_ref),
self.conan.user_io.out)
self.assertIn('%s: Folder: %s' % (str(self.conan_ref), reg_path3), self.conan.user_io.out)
self.assertTrue(os.path.exists(reg_path3))
for name in list(files2.keys()):
self.assertTrue(os.path.exists(os.path.join(reg_path3, name)))
expected_sums = {'hello.cpp': '4f005274b2fdb25e6113b69774dac184',
'main.cpp': '0479f3c223c9a656a718f3148e044124',
'CMakeLists.txt': 'bc3405da4bb0b51a3b9f05aca71e58c8',
'conanfile.py': '7f7a5352e781be814b86e8f333593b4f',
'executable': '68b329da9893e34099c7d8ad5cb9c940',
'helloHello0.h': '9448df034392fc8781a47dd03ae71bdd'}
self.assertEqual(expected_sums, digest3.file_sums)
# for f in file_list:
# self.assertFalse(os.path.exists(f))
def _create_packages_and_builds(self):
reg_builds = self.conan.paths.builds(self.conan_ref)
reg_packs = self.conan.paths.packages(self.conan_ref)
folders = [os.path.join(reg_builds, '342525g4f52f35f'),
os.path.join(reg_builds, 'ew9o8asdf908asdf80'),
os.path.join(reg_packs, '342525g4f52f35f'),
os.path.join(reg_packs, 'ew9o8asdf908asdf80')]
file_list = []
for f in folders:
for name, content in {'file1.h': 'asddfasdf', 'file1.dll': 'asddfasdf'}.items():
file_path = os.path.join(f, name)
save(file_path, content)
file_list.append(file_path)
return file_list
|
{
"content_hash": "6e881f1ecfe4c1a20ae865e87fce14d9",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 99,
"avg_line_length": 44.06666666666667,
"alnum_prop": 0.6273711160246712,
"repo_name": "dragly/conan",
"id": "4325282e9d5675ca31fb769a88c10596c306897d",
"size": "8593",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/command/export_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1383611"
},
{
"name": "Shell",
"bytes": "1132"
}
],
"symlink_target": ""
}
|
'''
Dummy Socks5 server for testing.
'''
from __future__ import print_function, division, unicode_literals
import socket, threading, Queue
import traceback, sys
### Protocol constants
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
### Utility functions
def recvall(s, n):
'''Receive n bytes from a socket, or fail'''
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
### Implementation classes
class Socks5Configuration(object):
'''Proxy configuration'''
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command(object):
'''Information about an incoming socks5 command'''
def __init__(self, cmd, atyp, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection(object):
def __init__(self, serv, conn, peer):
self.serv = serv
self.conn = conn
self.peer = peer
def handle(self):
'''
Handle socks5 request according to RFC1928
'''
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
(ver,cmd,rsv,atyp) = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = str(recvall(self.conn, n))
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
print('Proxy: ', cmdin)
# Fall through to disconnect
except Exception,e:
traceback.print_exc(file=sys.stderr)
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server(object):
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = Queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, peer) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn, peer)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.start()
def start(self):
assert(not self.running)
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join()
|
{
"content_hash": "f79c8a24ea464f41b0648bd4ac918673",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 125,
"avg_line_length": 35.12738853503185,
"alnum_prop": 0.5592021758839528,
"repo_name": "marlengit/hardfork_prototype_1_mvf-bu",
"id": "844c0c64462a97cc6c0aafe5e3254951abbade93",
"size": "5760",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/test_framework/socks5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "647624"
},
{
"name": "C++",
"bytes": "4698227"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "3821"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "156005"
},
{
"name": "Makefile",
"bytes": "96858"
},
{
"name": "Objective-C",
"bytes": "5375"
},
{
"name": "Objective-C++",
"bytes": "7360"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "765443"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "36574"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta
from abc import abstractmethod
import six
class CommunicatorBase(six.with_metaclass(ABCMeta)):
'''Interface definition of all communicators.
All communicators that have compatible set of methods with this
class is supposed to work in ChainerMN's parallel computation
implementation. The methods are named after MPI functions, such
as ``bcast()`` came from ``MPI_Bcast()``.
There are two types of methods: one that treats Python objects
have ``_obj`` suffix. The other has methods without any suffix
and it handles ndarray and arrays filled with scaler values. So
the number of methods would be ::
[send, recv, bcast, gather, allreduce] * [ '_obj', '']
(with single exception ``alltoall``, ``allreduce_grad``, ``split``
and ``bcast_data`` so far). Also methods are supposed to be
written in this order. All those methods must be implemented in
its implementation class, or otherwise it cannot be instantiated
in runtime.
.. note:: As most implementation of ``_obj``-sufficed methods
involves Python object pickling and unpickling, there is an
implicit size limit.
TODO(kuenishi): as of now no implementation class actually has
``allreduce`` method.
'''
def __init__(self):
pass
@property
def rank(self):
'''Rank (process id in the cluster) of this process in integer.'''
raise NotImplementedError()
@property
def size(self):
'''Number of processes of the cluster.'''
raise NotImplementedError()
@property
def intra_rank(self):
'''Intra rank (process id in the machine) of this process.'''
raise NotImplementedError()
@property
def intra_size(self):
'''Number of processes in the machine of this process.'''
raise NotImplementedError()
@property
def inter_rank(self):
'''The rank of this node in the cluster.'''
raise NotImplementedError()
@property
def inter_size(self):
'''Number of nodes that participates the cluster.'''
raise NotImplementedError()
@abstractmethod
def split(self, color, key):
"""A function anologous to ``MPI_Comm_Split`` .
This method splits the inter MPI commnicator and return a wrapped
ChainerMN communicator.
Args:
color (int):
Index of new group. The process with the same color will be
assigned to the same group.
key (int):
Control of rank assignment. The process will be assigned
a rank in the new group ordered by the value of key.
If you do not care of the rank, you can just simply specify
the original rank.
Returns:
CommunicatorBase
"""
raise NotImplementedError()
@abstractmethod
def alltoall(self, xs):
'''All-to-all implementation for ndarray
Args:
xs (tuple of numpy/cupy array)
Returns:
ys (tuple of numpy/cupy array):
Received arrays. The length of tuple equals to
the communicator size.
'''
raise NotImplementedError()
# on ndarrays and such
@abstractmethod
def send(self, data, dest, tag):
'''Sends an ndarray to destination
Receiver must invoke ``recv()`` to wait for the message.
Args:
data: data to be sent (tuple, list or raw numpy/cupy array)
dest (int): Rank of the destination process
tag (int): The tag to identify the message
'''
raise NotImplementedError()
@abstractmethod
def recv(self, source, tag):
'''Receives an ndarray from source.
To receive the message, sender must send the data.
Args:
source (int): Rank of the source process
tag (int): The tag to specifically receive the message
Returns:
The data sent from source process
'''
raise NotImplementedError()
@abstractmethod
def bcast(self, data, max_buf_len=None, root=0):
'''Broadcasts an ndarray from root process to all processes
Args:
data (numpy/cupy array): for root process, the data to broadcast.
For non-root processes, this argument is ignored.
max_buf_len (int): Length of send buffer.
root (int): the process who has the data to broadcast.
Returns:
ys (numpy/cupy array) : The data sent from root process
'''
raise NotImplementedError()
@abstractmethod
def gather(self, data, root=0):
'''Gathers an ndarray from all processes to root process
Args:
data (ndarray, or scaler): for root process this is ignored. For
For non-root processes, the data to send to root process.
root (int): rank of the process who receives the data.
Returns:
For root process, the ndarray sent from non-root processes.
For non-root processes, what?
'''
raise NotImplementedError()
@abstractmethod
def allgather(self, x):
"""A primitive of inter-process all-gather communication.
This method tries to invoke all-gather communication within the
communicator. All processes in the communicator are expected to
invoke ``allgather()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
Note that this method can only handle the same shapes of data
over all processes, and cannot handle tuple data.
Args:
x (numpy/cupy array): Array to be gathered.
Returns:
ys (tuple of numpy/cupy array): Received arrays.
"""
raise NotImplementedError()
@abstractmethod
def allreduce(self, data):
'''Allreduce operation among processes
Processes one of several aggregation operations using all data from
all processes and returns the result of the aggregation to all
processes.
TODO(kuenishi): add ``op`` argument once we find a use case
for operations other than 'SUM'.
Args:
data (ndarray): the data to aggregate among all nodes.
Returns:
Sum of all data from all processes.
'''
raise NotImplementedError()
# on objects
@abstractmethod
def send_obj(self, obj, dest, tag):
'''Sends an arbitrary Python object to destination with a tag.
Args:
obj: Arbitrary object to send to receiver.
dest (int): Rank number of receiver process (destination).
tag: tag to identify the message.
'''
raise NotImplementedError()
@abstractmethod
def recv_obj(self, source, tag):
'''Receives an arbitrary Python object from source process with a tag.
Args:
source (int): Rank number of sender process, to selectively receive
the object.
tag: tag to identify the message.
Returns:
an object sent from the source by ``send_obj``.
'''
raise NotImplementedError()
@abstractmethod
def bcast_obj(self, obj, max_buf_len=None, root=0):
'''Broadcasts an arbitrary object from root to all non-root processes.
Args:
obj: arbitrary object to broadcast to all other non-root processes.
Will be ignored at all non-root processes.
max_buf_len (int): max length of the send buffer
root (int): rank of the root processes who sends an object
Returns:
an object sent from the root process.
'''
raise NotImplementedError()
@abstractmethod
def gather_obj(self, obj, root=0):
'''Gathers arbitrary objects from all non-root processes to root process.
Args:
obj: arbtrary object to send to root process. Root process will
receive this argument included in returned list.
root (int): rank of the root node who receives all objects.
Returns:
A list of objects sent from all processes.
TODO(kuenishi): make sure the ordering of objects in the returned list.
'''
raise NotImplementedError()
@abstractmethod
def allreduce_obj(self, obj):
'''Apply a reduce operation to all objects and spread the result.
For example of integers and summation, equivalent local code is::
>>> from functools import reduce
>>> reduce(lambda x, y: x + y, [1, 2, 3, 4, 5])
15
The only operation currently supported is summation.
TODO(kuenishi): support other operations such as 'MAX', 'MIN'
and 'PROD' with ``op`` argument once we need any of them.
Args:
obj: An arbitrary object to apply reduce operation. Must have
corresponding operation method e.g. ``__plus__()``.
Returns:
The result of the operation applied to all objects.
'''
raise NotImplementedError()
# Special communication methods on grads and data of models
@abstractmethod
def bcast_data(self, model):
'''Broadcast Chainer model parameter data'''
raise NotImplementedError()
def broadcast_data(self, model):
'''Broadcast Chainer model parameter data
Left for backward compatibility, but ill be deprecated in
future version. Use ``bcast_data()`` method instad.
'''
self.bcast_data(model)
@abstractmethod
def allreduce_grad(self, model):
'''Works as same as ``allreduce_obj`` but for Chainer model gradients
.. note:: this only supports `SUM` same as ``allreduce_obj``.
'''
raise NotImplementedError()
|
{
"content_hash": "ea48ae9ceeee9857470d5b22b52ff68f",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 81,
"avg_line_length": 31.39622641509434,
"alnum_prop": 0.6171875,
"repo_name": "tkerola/chainer",
"id": "978c3e8c2073eedb80f4ee56670b0bb95876e5bb",
"size": "9984",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "chainermn/communicators/communicator_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3471733"
}
],
"symlink_target": ""
}
|
import argparse,json
parser = argparse.ArgumentParser(description='Traefik acme.json key and cert extractor utility.')
parser.add_argument('filepath', metavar='<filepath>', help='Path to acme.json')
parser.add_argument('fqdn', metavar='<FQDN>', help="FQDN to match in a certificates 'main' or 'sans' field")
# Only one of these options can be used at a time, `const` is the key value that will be queried:
key_or_cert = parser.add_mutually_exclusive_group(required=True)
key_or_cert.add_argument('--key', dest='requested', action='store_const', const='key', help='Output the key data to stdout')
key_or_cert.add_argument('--cert', dest='requested', action='store_const', const='certificate', help='Output the cert data to stdout')
args = parser.parse_args()
def has_fqdn(domains, fqdn):
main = domains.get('main', '')
sans = domains.get('sans', [])
return main == fqdn or fqdn in sans
# Searches the acme.json data for the target FQDN,
# upon a match returns the requested key or cert:
def retrieve_data():
with open(args.filepath) as json_file:
acme_data = json.load(json_file)
for key, value in acme_data.items():
try:
certs = value['Certificates'] or []
for cert in certs:
if has_fqdn(cert['domain'], args.fqdn):
return cert[args.requested]
# One of the expected keys is missing.. return an empty result
# Certificates: [{domain: [main, sans], key, certificate}]
except KeyError:
return None
# No match == 'None', we convert to empty string for
# existing error handling:
result = retrieve_data() or ''
print(result)
|
{
"content_hash": "fc27d34a4fd4b0ccb63277335fd350d0",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 134,
"avg_line_length": 45.13157894736842,
"alnum_prop": 0.646064139941691,
"repo_name": "tomav/docker-mailserver",
"id": "e9e207df78d358f737fd241de3c0ff0220b2b424",
"size": "1738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "target/bin/acme_extract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "198"
},
{
"name": "Dockerfile",
"bytes": "13608"
},
{
"name": "Makefile",
"bytes": "3607"
},
{
"name": "NASL",
"bytes": "1260"
},
{
"name": "Python",
"bytes": "1738"
},
{
"name": "Shell",
"bytes": "424419"
},
{
"name": "Sieve",
"bytes": "450"
}
],
"symlink_target": ""
}
|
__author__ = 'tan'
import datetime
import pytz
from boto.dynamodb2.fields import HashKey, RangeKey, GlobalKeysOnlyIndex
from boto.dynamodb2.types import STRING
from juliabox.db import JBoxDB, JBoxDBItemNotFound
class JBoxAPISpec(JBoxDB):
NAME = 'jbox_apispec'
SCHEMA = [
HashKey('api_name', data_type=STRING)
]
INDEXES = [
GlobalKeysOnlyIndex('publisher-api_name-index', parts=[
HashKey('publisher', data_type=STRING),
RangeKey('api_name', data_type=STRING)
])
]
TABLE = None
KEYS = ['api_name']
ATTRIBUTES = ['publisher', 'cmd', 'image_name', 'description', 'timeout_secs', 'create_time']
def __init__(self, api_name, cmd=None, image_name=None, description=None,
publisher=None, timeout_secs=None, create=False):
try:
self.item = self.fetch(api_name=api_name)
self.is_new = False
except JBoxDBItemNotFound:
if create:
dt = datetime.datetime.now(pytz.utc)
data = {
'api_name': api_name,
'cmd': cmd,
'description': description,
'publisher': publisher,
'create_time': JBoxAPISpec.datetime_to_epoch_secs(dt)
}
if image_name is not None:
data['image_name'] = image_name
if timeout_secs is not None:
data['timeout_secs'] = timeout_secs
self.create(data)
self.item = self.fetch(api_name=api_name)
self.is_new = True
else:
raise
def get_api_name(self):
return self.get_attrib('api_name', None)
def get_timeout_secs(self):
return int(self.get_attrib('timeout_secs', 30))
def get_description(self):
return self.get_attrib('description', None)
def get_publisher(self):
return self.get_attrib('publisher', None)
def get_image_name(self):
return self.get_attrib('image_name', 'juliabox/juliaboxapi:latest')
def get_cmd(self):
return self.get_attrib('cmd', None)
def get_create_time(self):
return int(self.get_attrib('create_time', None))
def set_cmd(self, cmd):
self.set_attrib('cmd', cmd)
def set_description(self, description):
self.set_attrib('description', description)
def set_timeout_secs(self, timeout_secs):
self.set_attrib('timeout_secs', timeout_secs)
def set_publisher(self, publisher):
self.set_attrib('publisher', publisher)
def set_image_name(self, image_name):
self.set_attrib('image_name', image_name)
def as_json(self):
def _add_not_none(d, n, v):
if v is not None:
d[n] = v
jsonval = dict()
_add_not_none(jsonval, 'api_name', self.get_api_name())
_add_not_none(jsonval, 'cmd', self.get_cmd())
_add_not_none(jsonval, 'image_name', self.get_image_name())
_add_not_none(jsonval, 'description', self.get_description())
_add_not_none(jsonval, 'publisher', self.get_publisher())
_add_not_none(jsonval, 'timeout_secs', self.get_timeout_secs())
_add_not_none(jsonval, 'create_time', self.get_create_time())
return jsonval
@staticmethod
def get_api_info(publisher, api_name):
if publisher is None and api_name is None:
raise
ret = []
if publisher is None:
ret.append(JBoxAPISpec(api_name).as_json())
else:
if api_name is None:
api_name = ' '
records = JBoxAPISpec.query(publisher__eq=publisher, api_name__gte=api_name,
index='publisher-api_name-index')
for api in records:
ret.append(JBoxAPISpec(api['api_name']).as_json())
return ret
@staticmethod
def set_api_info(api_name, cmd=None, image_name=None, description=None, publisher=None, timeout_secs=None):
try:
api = JBoxAPISpec(api_name)
if cmd is not None:
api.set_cmd(cmd)
if image_name is not None:
api.set_image_name(image_name)
if description is not None:
api.set_description(description)
if publisher is not None:
api.set_publisher(publisher)
if timeout_secs is not None:
api.set_timeout_secs(timeout_secs)
api.save()
except JBoxDBItemNotFound:
JBoxAPISpec(api_name, cmd=cmd, image_name=image_name, description=description, publisher=publisher,
timeout_secs=timeout_secs, create=True)
|
{
"content_hash": "5212a230602681315cd38e1b2f1ce46e",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 111,
"avg_line_length": 34.107142857142854,
"alnum_prop": 0.5660732984293194,
"repo_name": "EricForgy/JuliaBox",
"id": "77349cb3a598f738756eb61c25d1e1947064bef8",
"size": "4775",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "engine/src/juliabox/db/api_spec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6283"
},
{
"name": "HTML",
"bytes": "3261"
},
{
"name": "JavaScript",
"bytes": "54121"
},
{
"name": "Julia",
"bytes": "294"
},
{
"name": "Lua",
"bytes": "10378"
},
{
"name": "Makefile",
"bytes": "135"
},
{
"name": "Python",
"bytes": "408486"
},
{
"name": "Shell",
"bytes": "20351"
},
{
"name": "Smarty",
"bytes": "59656"
}
],
"symlink_target": ""
}
|
"""
oommfmif: A module for calling OOMMF from Python.
"""
import os
import subprocess
import sys
from textwrap import dedent
def retrieve_oommf_path():
"""
retrieve_oommf_path()
Gets value from the environment variable oommf_path.
Returns
-------
string
Path to folder containing oommf.tcl
Notes
-----
Environment variable OOMMF_PATH should point to the directory which
contains 'oommf.tcl'
"""
if 'OOMMF_PATH' not in os.environ:
msg = dedent("""\
Please set the OOMMF_PATH environment variable to point to the
directory that contains the file 'oommf.tcl'. In bash, you can
write:
export OOMMF_PATH=/yourhome/yourpath/to/oommf
This can be added to the ~/.bashrc, for example, to be executed
automatically.
Cowardly stopping here.
""")
print(msg)
sys.exit(1)
else:
oommf_path = os.environ['OOMMF_PATH']
return oommf_path
def retrieve_oommf_executable(path):
"""Given an OOMMF_PATH as path, we either expect 'oommf.tcl' to be the main
retrieve_oommf_executable or a script called 'oommf' which may call
oommf.tcl internally, and maybe sets some envinorment variables. The
conda oommf installation creates such a 'oommf' shell script."""
oommf_path = path
files = os.listdir(oommf_path)
if 'oommf' in files and 'oommf.tcl' not in files:
return 'oommf'
elif 'oommf.tcl' in files and 'oommf' not in files:
return 'oommf.tcl'
elif 'oommf' in files and 'oommf.tcl' in files:
msg = "There is 'oommf' and 'oommf.tcl' in {}. Don't now which"\
" one to use".format(oommf_path)
print(msg)
raise RuntimeError(msg)
elif 'oommf' not in files and 'oommf.tcl' not in files:
msg = "Can't find 'oommf' or 'oommf.tcl' in {}. Giving up."\
.format(oommf_path)
print(msg)
raise RuntimeError(msg)
else:
msg = "Unknown outcome - shoudn't be possible."
raise NotImplementedError(msg)
oommf_path = retrieve_oommf_path()
oommf_executable = retrieve_oommf_executable(oommf_path)
def call_oommf(argstring, workdir=None):
"""Convenience function to call OOMMF: Typical usage
p = call_oommf("+version")
p.wait()
stdout, stderr = p.stdout.read(), p.stderr.read()
the 'prefixcommand' allows to execute the command in a different directory.
"""
if workdir:
command = "cd {} && ".format(workdir)
else:
command = ""
command += os.path.join(oommf_path, oommf_executable) + ' ' + argstring
print("About to execute: '{}'".format(command))
p = subprocess.Popen(command,
shell=True, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
return p
def get_version():
"""Return OOMMF version as string, something like 1.2.0.5"""
p = call_oommf('+version')
p.wait()
stderr = p.stderr.readlines() # version is returned in stderr
# output is something like "<15330> oommf.tcl 1.2.0.6 info:\noommf.tcl
# 1.2.0.6"
line = stderr[0].decode()
assert 'oommf.tcl' in line
versionstring = line.split('oommf.tcl')[1].strip()
return versionstring
def get_oommf_path():
return oommf_path
|
{
"content_hash": "689ff8c68235a98cb029234590f0f23d",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 29.42105263157895,
"alnum_prop": 0.6195587358378056,
"repo_name": "ryanpepper/oommf-python",
"id": "cd51e48c01e3107ecbb6633f433fafb607ff866b",
"size": "3354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "joommf/oommfmif.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "161"
},
{
"name": "Emacs Lisp",
"bytes": "2282"
},
{
"name": "Jupyter Notebook",
"bytes": "101733"
},
{
"name": "Makefile",
"bytes": "352"
},
{
"name": "Python",
"bytes": "137229"
},
{
"name": "Ruby",
"bytes": "295"
},
{
"name": "Shell",
"bytes": "3512"
}
],
"symlink_target": ""
}
|
"""Support for WLED updates."""
from __future__ import annotations
from typing import Any, cast
from homeassistant.components.update import (
UpdateDeviceClass,
UpdateEntity,
UpdateEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .coordinator import WLEDDataUpdateCoordinator
from .helpers import wled_exception_handler
from .models import WLEDEntity
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up WLED update based on a config entry."""
coordinator: WLEDDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities([WLEDUpdateEntity(coordinator)])
class WLEDUpdateEntity(WLEDEntity, UpdateEntity):
"""Defines a WLED update entity."""
_attr_device_class = UpdateDeviceClass.FIRMWARE
_attr_supported_features = (
UpdateEntityFeature.INSTALL | UpdateEntityFeature.SPECIFIC_VERSION
)
_attr_title = "WLED"
_attr_name = "Firmware"
def __init__(self, coordinator: WLEDDataUpdateCoordinator) -> None:
"""Initialize the update entity."""
super().__init__(coordinator=coordinator)
self._attr_unique_id = coordinator.data.info.mac_address
@property
def installed_version(self) -> str | None:
"""Version currently installed and in use."""
if (version := self.coordinator.data.info.version) is None:
return None
return str(version)
@property
def latest_version(self) -> str | None:
"""Latest version available for install."""
# If we already run a pre-release, we consider being on the beta channel.
# Offer beta version upgrade, unless stable is newer
if (
(beta := self.coordinator.data.info.version_latest_beta) is not None
and (current := self.coordinator.data.info.version) is not None
and (current.alpha or current.beta or current.release_candidate)
and (
(stable := self.coordinator.data.info.version_latest_stable) is None
or (stable is not None and stable < beta)
)
):
return str(beta)
if (stable := self.coordinator.data.info.version_latest_stable) is not None:
return str(stable)
return None
@property
def release_url(self) -> str | None:
"""URL to the full release notes of the latest version available."""
if (version := self.latest_version) is None:
return None
return f"https://github.com/Aircoookie/WLED/releases/tag/v{version}"
@wled_exception_handler
async def async_install(
self, version: str | None, backup: bool, **kwargs: Any
) -> None:
"""Install an update."""
if version is None:
# We cast here, as we know that the latest_version is a string.
version = cast(str, self.latest_version)
await self.coordinator.wled.upgrade(version=version)
await self.coordinator.async_refresh()
|
{
"content_hash": "381c3332f317e4fd601dab72ba510a2b",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 84,
"avg_line_length": 35.7,
"alnum_prop": 0.6641767818238407,
"repo_name": "nkgilley/home-assistant",
"id": "75546fdac1a64ff43bec4e87324e7e90c15fc10d",
"size": "3213",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/wled/update.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import omniORB
omniORB.updateModule("CosTradingRepos__POA")
# ** 1. Stub files contributing to this module
import CosTradingRepos_idl
# ** 2. Sub-modules
# ** 3. End
|
{
"content_hash": "1e74bdac5318da1ea72689fbfca75685",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 46,
"avg_line_length": 18.77777777777778,
"alnum_prop": 0.727810650887574,
"repo_name": "amonmoce/corba_examples",
"id": "f4379d70a82ec4c9433262b9abd82839bbe63463",
"size": "255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "omniORBpy-4.2.1/build/python/COS/CosTradingRepos__POA/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "621806"
},
{
"name": "C++",
"bytes": "9176868"
},
{
"name": "CSS",
"bytes": "1124"
},
{
"name": "Groff",
"bytes": "12904"
},
{
"name": "HTML",
"bytes": "1078062"
},
{
"name": "Java",
"bytes": "1838"
},
{
"name": "Makefile",
"bytes": "1832000"
},
{
"name": "Python",
"bytes": "4113588"
},
{
"name": "Shell",
"bytes": "124202"
},
{
"name": "TeX",
"bytes": "513468"
},
{
"name": "Yacc",
"bytes": "28999"
}
],
"symlink_target": ""
}
|
from sudo.settings import COOKIE_NAME, COOKIE_AGE
from sudo.utils import (
grant_sudo_privileges,
revoke_sudo_privileges,
has_sudo_privileges,
)
from django.core.signing import BadSignature
from django.utils.http import is_safe_url
from .base import BaseTestCase
class GrantSudoPrivilegesTestCase(BaseTestCase):
def assertRequestHasToken(self, request, max_age):
token = request.session[COOKIE_NAME]
self.assertRegexpMatches(token, r"^\w{12}$")
self.assertTrue(request._sudo)
self.assertEqual(request._sudo_token, token)
self.assertEqual(request._sudo_max_age, max_age)
def test_grant_token_not_logged_in(self):
with self.assertRaises(ValueError):
grant_sudo_privileges(self.request)
def test_grant_token_default_max_age(self):
self.login()
token = grant_sudo_privileges(self.request)
self.assertIsNotNone(token)
self.assertRequestHasToken(self.request, COOKIE_AGE)
def test_grant_token_explicit_max_age(self):
self.login()
token = grant_sudo_privileges(self.request, 60)
self.assertIsNotNone(token)
self.assertRequestHasToken(self.request, 60)
def test_without_user(self):
delattr(self.request, "user")
token = grant_sudo_privileges(self.request)
self.assertIsNone(token)
class RevokeSudoPrivilegesTestCase(BaseTestCase):
def assertRequestNotSudo(self, request):
self.assertFalse(self.request._sudo)
self.assertNotIn(COOKIE_NAME, self.request.session)
def test_revoke_sudo_privileges_noop(self):
revoke_sudo_privileges(self.request)
self.assertRequestNotSudo(self.request)
def test_revoke_sudo_privileges(self):
self.login()
grant_sudo_privileges(self.request)
revoke_sudo_privileges(self.request)
self.assertRequestNotSudo(self.request)
class HasSudoPrivilegesTestCase(BaseTestCase):
def test_untouched(self):
self.assertFalse(has_sudo_privileges(self.request))
def test_granted(self):
self.login()
grant_sudo_privileges(self.request)
self.assertTrue(has_sudo_privileges(self.request))
def test_revoked(self):
self.login()
grant_sudo_privileges(self.request)
revoke_sudo_privileges(self.request)
self.assertFalse(has_sudo_privileges(self.request))
def test_cookie_and_token_match(self):
self.login()
def get_signed_cookie(key, salt="", max_age=None):
return "abc123"
self.request.session[COOKIE_NAME] = "abc123"
self.request.get_signed_cookie = get_signed_cookie
self.assertTrue(has_sudo_privileges(self.request))
def test_cookie_and_token_mismatch(self):
self.login()
def get_signed_cookie(key, salt="", max_age=None):
return "nope"
self.request.session[COOKIE_NAME] = "abc123"
self.assertFalse(has_sudo_privileges(self.request))
def test_cookie_bad_signature(self):
self.login()
def get_signed_cookie(key, salt="", max_age=None):
raise BadSignature
self.request.session[COOKIE_NAME] = "abc123"
self.assertFalse(has_sudo_privileges(self.request))
def test_missing_keys(self):
self.login()
self.assertFalse(has_sudo_privileges(self.request))
class IsSafeUrlTestCase(BaseTestCase):
def test_success(self):
urls = (
("/", None),
("/foo/", None),
("/", "example.com"),
("http://example.com/foo", "example.com"),
)
for url in urls:
self.assertTrue(is_safe_url(*url))
def test_failure(self):
urls = (
(None, None),
("", ""),
("http://mattrobenolt.com/", "example.com"),
("///example.com/", None),
("ftp://example.com", "example.com"),
("http://example.com\@mattrobenolt.com", "example.com"), # noqa: W605
("http:///example.com", "example.com"),
("\x08//example.com", "example.com"),
)
for url in urls:
self.assertFalse(is_safe_url(*url))
|
{
"content_hash": "13963a43f504ec84f67ae351c5b8a72b",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 82,
"avg_line_length": 31.772727272727273,
"alnum_prop": 0.6306628516928946,
"repo_name": "mattrobenolt/django-sudo",
"id": "67b4c8dcfde82e3dc8ca87ad030473b204857573",
"size": "4194",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "35356"
}
],
"symlink_target": ""
}
|
'''
Created on Mar 3, 2016
@author: enikher
'''
import os
def create_env():
glanceclient.Client()
if __name__ == '__main__':
create_env()
|
{
"content_hash": "02769b58b6c49954a9fe9d6550471bac",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 26,
"avg_line_length": 10.066666666666666,
"alnum_prop": 0.5761589403973509,
"repo_name": "nikolas-hermanns/flash-test",
"id": "90a70597613137432f705ac2e2c5ec22092bf417",
"size": "151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Create_env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32536"
}
],
"symlink_target": ""
}
|
from kuyruk import Kuyruk, Config
config = Config()
config.from_pyfile('/tmp/kuyruk_config.py')
kuyruk = Kuyruk(config=config)
@kuyruk.task()
def print_message(m):
print(m)
if __name__ == '__main__':
print_message('asdf')
|
{
"content_hash": "836c997aa2ca659a957825cf9df5c14b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 43,
"avg_line_length": 15.733333333333333,
"alnum_prop": 0.6567796610169492,
"repo_name": "cenkalti/kuyruk",
"id": "8e1f09bb5d1ab7a6b9ab44494e001f4e32d47dc1",
"size": "236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/loader/onefile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "944"
},
{
"name": "Python",
"bytes": "69162"
},
{
"name": "Shell",
"bytes": "801"
}
],
"symlink_target": ""
}
|
from cms import VERSION
import platform
from setuptools import setup, find_packages
import sys
DEPENDENCIES = [
'django',
'django-suit',
'Pillow',
'django-reversion==1.8.7',
'django-historylinks',
'django-watson',
'django-extensions',
'Werkzeug',
'opbeat',
'bcrypt',
'django-compressor',
'sorl-thumbnail',
'bcrypt',
'onespacemedia-server-management',
'requests',
'python-social-auth',
'python-memcached',
'django-cachalot',
'geoip',
]
if platform.python_implementation() == "PyPy":
DEPENDENCIES.append("psycopg2cffi")
else:
DEPENDENCIES.append("psycopg2")
if sys.version_info[0] == 3:
DEPENDENCIES.remove("python-memcached")
DEPENDENCIES.append("python3-memcached")
DEPENDENCIES.remove("onespacemedia-server-management")
EXCLUDE_FROM_PACKAGES = ['cms.bin']
setup(
name="onespacemedia-cms",
version=".".join(str(n) for n in VERSION),
url="https://github.com/onespacemedia/cms",
author="Daniel Samuels",
author_email="daniel@onespacemedia.com",
license="BSD",
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=['cms/bin/start_cms_project.py'],
zip_safe=False,
entry_points={
"console_scripts": [
"start_cms_project.py = cms.bin.start_cms_project:main",
],
},
description='CMS used by Onespacemedia',
install_requires=DEPENDENCIES,
extras_require={
'usertools': ["django-usertools"],
'testing': ["mock", "coverage", "coveralls", "codecov"],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
{
"content_hash": "6a827ff399a8ad7eb377ad1589a32865",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 68,
"avg_line_length": 26.367088607594937,
"alnum_prop": 0.6197791646663466,
"repo_name": "danielsamuels/cms",
"id": "aded01ad58394d1e1b204f040208b96217767d49",
"size": "2121",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "81790"
},
{
"name": "HTML",
"bytes": "67746"
},
{
"name": "JavaScript",
"bytes": "277617"
},
{
"name": "Python",
"bytes": "370099"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import division
import os
import sys
import math
import logging
import argparse
import threading
from rpaths import PosixPath
from tej import RemoteQueue, JobNotFound, RemoteCommandFailure, JobAlreadyExists
import loading
def msg(message, *args, **kwargs):
print(message.format(*args, **kwargs), file=sys.stdout)
def set_msg(m):
global msg
msg = m
loading.set_msg(m)
def get_envs():
return loading.get_envs()
def get_servers():
return loading.get_servers()
def get_projects():
return loading.get_projects()
def set_password_reuse(reuse_pw):
loading.set_password_reuse(reuse_pw)
def init_passwords():
loading.init_passwords()
def get_connector(project):
with loading.MAIN_LOCK:
if project not in Connector._ALL_CONNECTORS:
Connector(project) # adds itself to the list
return Connector._ALL_CONNECTORS[project]
class Connector(object):
SCRIPT_FILE = "_start"
_ALL_CONNECTORS = {}
def __init__(self, p):
self._lock = threading.RLock()
self._job_number = 0
self._project = loading.get_project(p)
self._rqs = dict([ (s.name, loading.get_remote(s)) for s in self._project["servers"] ])
Connector._ALL_CONNECTORS[p] = self
def get_path(self):
return self._project.path_local
def get_commands(self):
return self._project.commands
def get_env(self):
return self._project["env"].name
def _get_env(self, rq, chk):
if len(chk) == 4:
name, cmd, regex, line = chk
else:
name, cmd, regex, line, _ = chk
output = rq.check_output(cmd)
oarr = output.split("\n")
if line >= len(oarr):
raise ValueError("line {0} not in:\n{1}".format(line, oarr))
m = regex.search(oarr[line])
if m is None:
raise ValueError("unexpected mismatch {0} not in:\n{1}".format(regex.pattern, oarr[line]))
return name, m.group(1)
def get_vital_value(self, rq, chk):
name, c = self._get_env(rq, chk)
asc = chk[4]
if c:
try:
return name, float(c), asc
except TypeError:
pass
return name, float('nan'), asc
def get_vitals(self, rq):
return [ self.get_vital_value(rq, b) for b in self._project["env"]["vital"] ]
def get_servers(self):
return [ s.name for s in self._project["servers"] ]
def get_servers_info(self):
return [ {
"server": s,
"vital": self.get_vital_value(self._rqs[s], self._project["env"]["vital"][0])[1],
} for s in self.get_servers() ]
def get_server_stats(self, s):
server = self._project.servers[s]
rq = self._rqs[s]
return {
"name": server["hostname"],
"versions": [ self._get_env(rq, chk) for chk in self._project["env"]["versions"] ],
"vitals": self.get_vitals(self._rqs[s]),
}
def get_all_vitals(self):
return [ (s, self.get_vitals(self._rqs[s])) for s in self.get_servers() ]
def get_best_server(self):
servers = self.get_servers()
if len(servers) < 2:
return servers[0] if servers else None
all_vitals = self.get_all_vitals()
cur_ix = 0
best_s = []
best_num = float('nan')
while len(best_s) < 2 and cur_ix < len(all_vitals[0][1]):
for (s, cur) in all_vitals:
_, num, asc = cur[cur_ix]
if math.isnan(best_num):
best_s = [ s ]
best_num = num
elif num == best_num:
best_s.append(s)
else:
if asc:
if num < best_num:
best_s = [ s ]
best_num = num
else:
if num > best_num:
best_s = [ s ]
best_num = num
cur_ix += 1
return best_s[0] if len(best_s) > 0 else None
_STATUS = dict([
(RemoteQueue.JOB_DONE, "done"),
(RemoteQueue.JOB_RUNNING, "running"),
(RemoteQueue.JOB_INCOMPLETE, "incomplete"),
(RemoteQueue.JOB_CREATED, "created"),
("missing", "missing"),
("error", "error"),
])
def get_all_jobs(self):
def desc(s, j, info):
if i["status"] == RemoteQueue.JOB_DONE:
if "result" not in i: # FIXME: hack for tej without result in list
return self.get_job_status(s, j)[0]
if int(i["result"]) != 0:
return Connector._STATUS["error"]
return Connector._STATUS.get(i["status"], "?")
return [ (s, j, desc(s, j, i)) for s in self.get_servers() for (j, i) in self.get_job_list(s) ]
def get_job_list(self, s):
prefix = "{0}_".format(self._project.name)
rq = self._rqs[s]
return [ ji for ji in loading.list_jobs(rq) if ji[0].startswith(prefix) ]
def get_job_status(self, s, j):
rq = self._rqs[s]
try:
status, _, result = rq.status(j)
if status == RemoteQueue.JOB_DONE and int(result) != 0:
status = "error"
except JobNotFound:
status = "missing"
result = "?"
except RemoteCommandFailure as rcf:
status = "error"
result = rcf.ret
return Connector._STATUS.get(status, "?"), result
def submit_job(self, s, cmd):
if not cmd.strip():
raise ValueError("cannot execute empty command: {0}".format(cmd))
with self._lock:
rq = self._rqs[s]
path = self._project.path_local
self._project.add_cmd(cmd)
with open(os.path.join(path, Connector.SCRIPT_FILE), 'wb') as f:
print(cmd, file=f)
while True:
try:
job_name = "{0}_{1}".format(self._project.name, self._job_number)
call = "sh -l ./{0}".format(Connector.SCRIPT_FILE)
return rq.submit(job_name, path, call)
except JobAlreadyExists:
pass
finally:
self._job_number += 1
def delete_job(self, s, j):
with self._lock:
rq = self._rqs[s]
loading.kill_job(rq, s, j)
def delete_all_jobs(self):
with self._lock:
for (s, j, _) in self.get_all_jobs():
self.delete_job(s, j)
def get_job_files(self, s, j, rel_path):
rq = self._rqs[s]
status, path, result = rq.status(j)
rel_path = PosixPath(rel_path)
if rel_path.is_absolute:
rel_path = PosixPath(".")
res = rq.check_output("ls -p1t {0}".format(str(path / rel_path))).split("\n")
if rel_path != ".":
res.insert(0, "../")
return res
def get_job_file(self, s, j, req_file):
rq = self._rqs[s]
status, path, result = rq.status(j)
path = PosixPath(loading.DIR_TEMP) / s / j
res = str(path / req_file)
path_str = os.path.dirname(res)
if not os.path.exists(path_str):
os.makedirs(path_str)
if not os.path.exists(res) or status == RemoteQueue.JOB_RUNNING:
try:
rq.download(j, [ req_file ], destination=path_str)
except JobNotFound:
return None
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parcell Connector')
parser.add_argument('--reuse-pw', action='store_true', dest='reuse_pw', help="only ask for one password")
parser.add_argument('-v', '--verbose', action='count', default=1, dest='verbosity', help="augments verbosity level")
parser.add_argument('project', type=str, nargs='?', help="project file")
args = parser.parse_args()
levels = [ logging.CRITICAL, logging.WARNING, logging.INFO, logging.DEBUG ]
logging.basicConfig(level=levels[min(args.verbosity, 3)])
if not args.project:
for p in loading.get_projects():
print(p)
exit(0)
msg("{0}", " ".join(sys.argv))
msg("initializing passwords -- please type as prompted")
set_password_reuse(args.reuse_pw)
init_passwords()
msg("initializing passwords -- done")
conn = Connector(args.project)
for s in conn.get_servers():
for (k, v) in conn.get_server_stats(s).items():
if isinstance(v, (list, tuple)):
print("{0}:".format(k))
for (kk, vv) in v:
print(" {0}: {1}".format(kk, vv))
else:
print("{0}: {1}".format(k, v))
|
{
"content_hash": "6ed102bd1d83cd7151a8f0e387156883",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 120,
"avg_line_length": 33.12359550561798,
"alnum_prop": 0.5293984622342831,
"repo_name": "JosuaKrause/parcell",
"id": "ea9f1389d66ab9db6b32dbd9e3e87d53d4b5badf",
"size": "8890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parcell/connector.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "135071"
},
{
"name": "HTML",
"bytes": "31503"
},
{
"name": "JavaScript",
"bytes": "7752"
},
{
"name": "Python",
"bytes": "55278"
}
],
"symlink_target": ""
}
|
from twisted.internet.defer import inlineCallbacks
from twisted.internet import reactor
from twisted.internet.protocol import ClientCreator
from twisted.python import log
from txamqp.protocol import AMQClient
from txamqp.client import TwistedDelegate
from helpers import random_queue, process_config, update_or_create_repository, \
CommandFailed, read_config
from socket import gethostname
import simplejson as json
import settings
def callback_wrapper(projects_dir, git_user, git_server):
def recv_callback(msg, chan, queue):
log.msg("Received: %s" % msg.content.body)
if msg.content.body == "status":
log.msg("status: ok - %s" % gethostname())
else:
try:
data = json.loads(msg.content.body)
repository = data['repository']
except (ValueError, KeyError):
log.msg("Failed to decode json object")
else:
try:
update_or_create_repository(data['repository'],
projects_dir,
git_user,
git_server)
except CommandFailed, e:
log.msg("Failed to run command: \"%s\"" % e.value)
return (queue.get().addCallback(recv_callback, chan, queue))
return recv_callback
def echo_callback(msg, chan, queue):
log.msg('Received: %s on channel: %s' % (msg.content.body, chan.id))
return (queue.get().addCallback(echo_callback, chan, queue))
@inlineCallbacks
def gotConnection(conn, authentication):
config = process_config(read_config(settings.gitosis_config))
QUEUE = random_queue()
yield conn.start(authentication)
chan = yield conn.channel(1)
yield chan.channel_open()
# Initialize the MQ state
yield chan.queue_declare(queue=QUEUE, durable=False, exclusive=False, auto_delete=True)
yield chan.exchange_declare(exchange=config['exchange'], type="fanout",
durable=True, auto_delete=False)
yield chan.queue_bind(queue=QUEUE, exchange=config['exchange'])
yield chan.basic_consume(queue=QUEUE, no_ack=True, consumer_tag="smart")
queue = yield conn.queue("smart")
recv_callback = callback_wrapper(config['projects_dir'],
config['git_user'],
config['git_server'])
yield (queue.get().addCallback(recv_callback, chan, queue))
# This is all about closing the connection nicely
yield chan.basic_cancel("smart")
yield chan.channel_close()
chan0 = yield conn.channel(0)
yield chan0.connection_close()
reactor.stop()
|
{
"content_hash": "6461cfcfbfc3fa1c58af4cfe9b08022b",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 91,
"avg_line_length": 36.8,
"alnum_prop": 0.6101449275362318,
"repo_name": "silentrob/smart-app-update",
"id": "b2897254305a34ca17903e6bdde9d801a2e7c8b2",
"size": "2760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gitosis_update_listener/listener.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import mock
from flask import request
from tests.base import OsfTestCase
from website.ember_osf_web.decorators import ember_flag_is_active
from osf_tests.factories import FlagFactory, UserFactory
from django.contrib.auth.models import Group
class TestEmberFlagIsActive(OsfTestCase):
def setUp(self):
super(TestEmberFlagIsActive, self).setUp()
self.flag = FlagFactory(name='active_flag')
FlagFactory(name='inactive_flag', everyone=False).save()
self.mock_func = lambda: 'test value'
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_use_ember_app(self, mock_use_ember_app):
ember_flag_is_active('active_flag')(self.mock_func)()
mock_use_ember_app.assert_called_with()
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_dont_use_ember_app(self, mock_use_ember_app):
ember_flag_is_active('inactive_flag')(self.mock_func)()
assert not mock_use_ember_app.called
@mock.patch('website.ember_osf_web.decorators._get_current_user')
@mock.patch('website.ember_osf_web.decorators.waffle.flag_is_active', return_value=True)
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_ember_flag_is_active_authenticated_user(self, mock_use_ember_app, mock_flag_is_active, mock__get_current_user):
user = UserFactory()
mock__get_current_user.return_value = user
ember_flag_is_active('active_flag')(self.mock_func)()
mock_flag_is_active.assert_called_with(request, 'active_flag')
assert request.user == user
mock_use_ember_app.assert_called_with()
@mock.patch('website.ember_osf_web.decorators._get_current_user', return_value=None)
@mock.patch('website.ember_osf_web.decorators.waffle.flag_is_active', return_value=True)
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_ember_flag_is_active_unauthenticated_user(self, mock_use_ember_app, mock_flag_is_active, mock__get_current_user):
ember_flag_is_active('active_flag')(self.mock_func)()
group = Group.objects.create(name='foo')
self.flag.groups.add(group)
mock_flag_is_active.assert_called_with(request, 'active_flag')
assert not request.user.is_authenticated
mock_use_ember_app.assert_called_with()
|
{
"content_hash": "2ff655c538311e6d2f4cefd48a1719fd",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 126,
"avg_line_length": 42.50909090909091,
"alnum_prop": 0.7031650983746792,
"repo_name": "icereval/osf.io",
"id": "ab7f5dc56e326ec95acde2bf5bc0fc5c58c8d49c",
"size": "2339",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "tests/test_ember_osf_web.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "108526"
},
{
"name": "HTML",
"bytes": "261937"
},
{
"name": "JavaScript",
"bytes": "1856123"
},
{
"name": "Mako",
"bytes": "691640"
},
{
"name": "Python",
"bytes": "8331919"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django_webtest import WebTest
from testing.common import create_team
from testing.common import generate_fake_links
class TeamToolUsageTests(WebTest):
def test_list_top_tools_ordered(self):
# Create and log in a user
get_user_model().objects.create_user(userid='user@0001.com')
form = self.app.get(reverse('login')).form
form['userid'] = 'user0001com'
form.submit().follow()
t = create_team(
name='Two members top teams', num_members=2
)
user1 = t.user_set.all()[0]
(self.el1, self.el2,
self.el3, self.el4,
self.el5, self.el6,
self.el7, self.el8,
self.el9, self.el10) = generate_fake_links(
user1,
count=10,
is_external=True
)
for i in range(0, 8):
self.el4.register_usage(user1, force_new=True)
for i in range(0, 6):
self.el8.register_usage(user1, force_new=True)
for i in range(0, 5):
self.el3.register_usage(user1, force_new=True)
for i in range(0, 4):
self.el1.register_usage(user1, force_new=True)
for i in range(0, 2):
self.el9.register_usage(user1, force_new=True)
for i in range(0, 1):
self.el10.register_usage(user1, force_new=True)
response = self.app.get(reverse('team-detail', kwargs={"pk": t.pk}))
tools_list = response.html.find(id="top_links_list")
tools_list_items = tools_list.findChildren('a')
self.assertEqual(
len(tools_list_items),
5
)
self.assertIn(
self.el4.name,
tools_list_items[0].text
)
self.assertIn(
self.el8.name,
tools_list_items[1].text
)
self.assertIn(
self.el3.name,
tools_list_items[2].text
)
self.assertIn(
self.el1.name,
tools_list_items[3].text
)
self.assertIn(
self.el9.name,
tools_list_items[4].text
)
self.assertNotIn(
self.el10.name,
tools_list.text
)
self.assertNotIn(
self.el2.name,
tools_list.text
)
self.assertNotIn(
self.el5.name,
tools_list.text
)
self.assertNotIn(
self.el6.name,
tools_list.text
)
self.assertNotIn(
self.el7.name,
tools_list.text
)
|
{
"content_hash": "9996c33485248c30ef3eb0947193411a",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 76,
"avg_line_length": 26.65,
"alnum_prop": 0.5287054409005628,
"repo_name": "dstl/lighthouse",
"id": "40faf10f090ae49fd0e84e76b9e52659dbaf3e90",
"size": "2707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/teams/tests/test_team_details.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "171981"
},
{
"name": "HTML",
"bytes": "188493"
},
{
"name": "JavaScript",
"bytes": "175233"
},
{
"name": "Python",
"bytes": "301851"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Shell",
"bytes": "8326"
}
],
"symlink_target": ""
}
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='utilkit', # pip install utilkit
description='Collection of useful helper functions for datetime, print, string formatting, directory and file handling, and more',
#long_description=open('README.md', 'rt').read(),
long_description=long_description,
# version
# third part for minor release
# second when api changes
# first when it becomes stable someday
version='0.4.1',
author='Michiel Scholten',
author_email='michiel@diginaut.net',
url='https://github.com/aquatix/python-utilkit',
license='MIT',
# as a practice no need to hard code version unless you know program wont
# work unless the specific versions are used
install_requires=['requests', 'pytz', 'future'],
packages=['utilkit'],
#py_modules=['utilkit'],
classifiers=[
"Topic :: Software Development :: Libraries",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.4",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
zip_safe=True,
)
|
{
"content_hash": "2ed2d7528d80ca78ad010d62ea1c4f05",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 134,
"avg_line_length": 34.63333333333333,
"alnum_prop": 0.6458132820019249,
"repo_name": "aquatix/python-utilkit",
"id": "b9e5d658c5432fdc149e056c16fdc83a1a22f6f2",
"size": "2079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15973"
}
],
"symlink_target": ""
}
|
from distutils.core import setup, Extension
from distutils.command.build import build
import os
import sys
import setuptools
# Replicating the methods used in the RAVEN Makefile to find CROW_DIR,
# If the Makefile changes to be more robust, so should this
# We should be doing a search for CROW, I would think, we should not force a
# directory structure
CURR_DIR = os.path.dirname(os.path.realpath(__file__))
CROW_DIR = os.path.join(CURR_DIR,'crow')
BOOST_INCLUDE_DIR = os.path.join(CROW_DIR,'contrib','include')
RAVEN_INCLUDE_DIR = os.path.join('include','contrib')
DIST_INCLUDE_DIR = os.path.join(CROW_DIR,'include', 'distributions')
UTIL_INCLUDE_DIR = os.path.join(CROW_DIR,'include', 'utilities')
# We need a custom build order in order to ensure that amsc.py is available
# before we try to copy it to the target location
class CustomBuild(build):
sub_commands = [('build_ext', build.has_ext_modules),
('build_py', build.has_pure_modules),
('build_clib', build.has_c_libraries),
('build_scripts', build.has_scripts)]
include_dirs=[RAVEN_INCLUDE_DIR,BOOST_INCLUDE_DIR, DIST_INCLUDE_DIR, UTIL_INCLUDE_DIR]
swig_opts=['-c++','-I'+RAVEN_INCLUDE_DIR, '-I'+BOOST_INCLUDE_DIR,'-I'+DIST_INCLUDE_DIR, '-I'+UTIL_INCLUDE_DIR, '-py3']
extra_compile_args=['-std=c++11']
try:
eigen_flags = subprocess.check_output(["./scripts/find_eigen.py"]).decode("ascii")
except:
eigen_flags = ""
if eigen_flags.startswith("-I"):
include_dirs.append(eigen_flags[2:].rstrip())
long_description = open("README.md", "r").read()
setup(name='raven_framework',
version='2.2',
description='RAVEN (Risk Analysis Virtual Environment) is designed to perform parametric and probabilistic analysis based on the response of complex system codes. RAVEN C++ dependenciences including a library for computing the Approximate Morse-Smale Complex (AMSC) and Crow probability tools',
long_description=long_description,
url="https://raven.inl.gov/",
package_dir={'AMSC': 'src/AMSC', 'crow_modules': 'src/crow_modules', 'ravenframework': 'ravenframework'},
classifiers=['Programming Language :: Python :: 3'],
entry_points={
'console_scripts': [
'raven_framework = ravenframework.Driver:wheelMain'
]
},
ext_modules=[
Extension('crow_modules._distribution1D',
['src/crow_modules/distribution1D.i',
'src/distributions/distribution.cxx',
'src/utilities/MDreader.cxx',
'src/utilities/inverseDistanceWeigthing.cxx',
'src/utilities/microSphere.cxx',
'src/utilities/NDspline.cxx',
'src/utilities/ND_Interpolation_Functions.cxx',
'src/distributions/distributionNDBase.cxx',
'src/distributions/distributionNDNormal.cxx',
'src/distributions/distributionFunctions.cxx',
'src/distributions/DistributionContainer.cxx',
'src/distributions/distribution_1D.cxx',
'src/distributions/randomClass.cxx',
'src/distributions/distributionNDCartesianSpline.cxx'],
include_dirs=include_dirs,
swig_opts=swig_opts,
extra_compile_args=extra_compile_args),
Extension('crow_modules._randomENG',['src/crow_modules/randomENG.i','src/distributions/randomClass.cxx'],include_dirs=include_dirs,swig_opts=swig_opts,extra_compile_args=extra_compile_args),
Extension('crow_modules._interpolationND',['src/crow_modules/interpolationND.i','src/utilities/ND_Interpolation_Functions.cxx','src/utilities/NDspline.cxx','src/utilities/microSphere.cxx','src/utilities/inverseDistanceWeigthing.cxx','src/utilities/MDreader.cxx','src/distributions/randomClass.cxx'],include_dirs=include_dirs,swig_opts=swig_opts,extra_compile_args=extra_compile_args),
Extension('AMSC._amsc',['src/AMSC/amsc.i',
'src/AMSC/UnionFind.cpp',
'src/AMSC/AMSC.cpp'],
include_dirs=include_dirs, swig_opts=swig_opts,extra_compile_args=extra_compile_args)],
py_modules=['AMSC.amsc','crow_modules.distribution1D','crow_modules.randomENG','crow_modules.interpolationND', 'AMSC.AMSC_Object'],
packages=['ravenframework.'+x for x in setuptools.find_packages('ravenframework')]+['ravenframework'],
cmdclass={'build': CustomBuild})
|
{
"content_hash": "4e219eedc25bc361ead7564dec597ccb",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 394,
"avg_line_length": 58.74025974025974,
"alnum_prop": 0.6634976785319479,
"repo_name": "idaholab/raven",
"id": "a82a49c24798864ab9d413fdba62c9bc246e1f8b",
"size": "5112",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1556316"
},
{
"name": "Batchfile",
"bytes": "1095"
},
{
"name": "C",
"bytes": "148504"
},
{
"name": "C++",
"bytes": "48279546"
},
{
"name": "CMake",
"bytes": "9998"
},
{
"name": "Jupyter Notebook",
"bytes": "84202"
},
{
"name": "MATLAB",
"bytes": "202335"
},
{
"name": "Makefile",
"bytes": "2399"
},
{
"name": "Perl",
"bytes": "1297"
},
{
"name": "Python",
"bytes": "7004752"
},
{
"name": "R",
"bytes": "67"
},
{
"name": "SWIG",
"bytes": "8622"
},
{
"name": "Shell",
"bytes": "124289"
},
{
"name": "TeX",
"bytes": "479725"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.rbac import GroupToRoleMappingDB
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.persistence.rbac import GroupToRoleMapping
from st2tests import DbTestCase
from tests.unit.base import BaseDBModelCRUDTestCase
__all__ = [
"RoleDBModelCRUDTestCase",
"UserRoleAssignmentDBModelCRUDTestCase",
"PermissionGrantDBModelCRUDTestCase",
"GroupToRoleMappingDBModelCRUDTestCase",
]
class RoleDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = RoleDB
persistance_class = Role
model_class_kwargs = {
"name": "role_one",
"description": None,
"system": False,
"permission_grants": [],
}
update_attribute_name = "name"
class UserRoleAssignmentDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = UserRoleAssignmentDB
persistance_class = UserRoleAssignment
model_class_kwargs = {
"user": "user_one",
"role": "role_one",
"source": "source_one",
"is_remote": True,
}
update_attribute_name = "role"
class PermissionGrantDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = PermissionGrantDB
persistance_class = PermissionGrant
model_class_kwargs = {
"resource_uid": "pack:core",
"resource_type": "pack",
"permission_types": [],
}
update_attribute_name = "resource_uid"
class GroupToRoleMappingDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = GroupToRoleMappingDB
persistance_class = GroupToRoleMapping
model_class_kwargs = {
"group": "some group",
"roles": ["role_one", "role_two"],
"description": "desc",
"enabled": True,
}
update_attribute_name = "group"
|
{
"content_hash": "15bd8cea26842a002e17dd10921b5868",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 81,
"avg_line_length": 31.17910447761194,
"alnum_prop": 0.7161321206318813,
"repo_name": "Plexxi/st2",
"id": "d9c3fcc958a3d6639b169331325f42e7e02248ba",
"size": "2717",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2common/tests/unit/test_db_rbac.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
}
|
import unittest, sys, random, time
sys.path.extend(['.','..','py'])
import h2o, h2o_browse as h2b, h2o_import as h2i, h2o_hosts
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
pass
print "Will build clouds with incrementing heap sizes and import folder/parse"
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_import_covtype_parse_loop(self):
csvFilename = "covtype.data"
importFolderPath = "standard"
trialMax = 2
for tryHeap in [4,3,2,1]:
print "\n", tryHeap,"GB heap, 4 jvms, import folder, then loop parsing 'covtype.data' to unique keys"
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=4,java_heap_GB=tryHeap)
else:
h2o_hosts.build_cloud_with_hosts(node_count=4,java_heap_GB=tryHeap)
for trial in range(trialMax):
# import each time, because h2o deletes source after parse
csvPathname = importFolderPath + "/" + csvFilename
hex_key = csvFilename + "_" + str(trial) + ".hex"
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, hex_key=hex_key, timeoutSecs=20)
# sticky ports?
h2o.tear_down_cloud()
time.sleep(5)
print "Waiting 60 secs for TIME_WAIT sockets to go away"
time.sleep(60)
if __name__ == '__main__':
h2o.unit_main()
|
{
"content_hash": "8e275363088743eef7be8aea03dc192d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 128,
"avg_line_length": 35.28888888888889,
"alnum_prop": 0.5969773299748111,
"repo_name": "janezhango/BigDataMachineLearning",
"id": "6c2e7c3f9f606e87e06fa4b3cf64043930c23fe2",
"size": "1588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/testdir_multi_jvm/test_import_covtype_parse_4jvm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.views.generic import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
class HomeView(LoginRequiredMixin, TemplateView):
login_url = 'accounts/login/'
redirect_field_name = 'home'
template_name = 'pages/home.html'
|
{
"content_hash": "e3ee56f93cf85a5a0648977e9a0b54b5",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 57,
"avg_line_length": 30.2,
"alnum_prop": 0.7682119205298014,
"repo_name": "mjj55409/cpq-exporter",
"id": "88e2f1e97bfed56457317a32844aa0ffc82a860c",
"size": "326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cpq_exporter/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3501"
},
{
"name": "HTML",
"bytes": "19418"
},
{
"name": "JavaScript",
"bytes": "528"
},
{
"name": "Python",
"bytes": "149200"
},
{
"name": "Shell",
"bytes": "4188"
}
],
"symlink_target": ""
}
|
from pprint import pprint # pylint: disable=unused-import
from test_server import Response
# from grab.error import (
# GrabInternalError,
# GrabCouldNotResolveHostError,
# GrabTimeoutError,
# GrabInvalidUrl,
# )
from tests.util import build_grab
from tests.util import BaseGrabTestCase
class GrabRequestTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_get_method(self):
self.server.add_response(Response())
grab = build_grab()
grab.go(self.server.get_url())
self.assertEqual("GET", self.server.request.method)
def test_delete_method(self):
self.server.add_response(Response())
grab = build_grab()
grab.setup(method="delete")
grab.go(self.server.get_url())
self.assertEqual("DELETE", self.server.request.method)
def test_put_method(self):
self.server.add_response(Response())
grab = build_grab()
grab.setup(method="put", post=b"abc")
grab.go(self.server.get_url())
self.assertEqual("PUT", self.server.request.method)
self.assertEqual("3", self.server.request.headers.get("content-length"))
def test_head_with_invalid_bytes(self):
def callback():
return {
"type": "response",
"status": 200,
"headers": [("Hello-Bug", b"start\xa0end")],
"data": b"",
}
self.server.add_response(Response(callback=callback))
grab = build_grab()
grab.go(self.server.get_url())
# def test_redirect_with_invalid_byte(self):
# url = self.server.get_url()
# invalid_url = "http://\xa0" + url # .encode('ascii')
# def callback():
# return {
# "type": "response",
# "status": 301,
# "headers": [("Location", invalid_url)],
# "data": b"",
# }
# self.server.add_response(Response(callback=callback))
# grab = build_grab()
# # GrabTimeoutError raised when tests are being runned on computer
# # without access to the internet (no DNS service available)
# self.assertRaises(
# (
# GrabInternalError,
# GrabCouldNotResolveHostError,
# GrabTimeoutError,
# GrabInvalidUrl,
# ),
# grab.go,
# self.server.get_url(),
# )
def test_options_method(self):
self.server.add_response(Response())
grab = build_grab()
grab.setup(method="options", post=b"abc")
grab.go(self.server.get_url())
self.assertEqual("OPTIONS", self.server.request.method)
self.assertEqual("3", self.server.request.headers.get("content-length"))
self.server.add_response(Response())
grab = build_grab()
grab.setup(method="options")
grab.go(self.server.get_url())
self.assertEqual("OPTIONS", self.server.request.method)
self.assertTrue("Content-Length" not in self.server.request.headers)
|
{
"content_hash": "f0aa9da5617788f40e75ab8903b459d4",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 80,
"avg_line_length": 32.87234042553192,
"alnum_prop": 0.580906148867314,
"repo_name": "lorien/grab",
"id": "af9a3ef6b8f0609108640c479f068d248306cb10",
"size": "3090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/grab_request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "250730"
},
{
"name": "Makefile",
"bytes": "879"
},
{
"name": "Python",
"bytes": "347132"
}
],
"symlink_target": ""
}
|
import sys
import socket
import select
import time
import re
import json
import struct
import imp
import traceback
import random
import os
import io
import inspect
import types
from collections import deque
import os
import warnings
from encodings import utf_8, ascii
try:
import thread
except ImportError:
# Renamed in Python 3
import _thread as thread
# Reference material
# http://jupyter-client.readthedocs.io/en/latest/messaging.html
# http://pydoc.net/Python/magni/1.4.0/magni.tests.ipynb_examples/
# http://www.xavierdupre.fr/app/pyquickhelper/helpsphinx/_modules/pyquickhelper/ipythonhelper/notebook_runner.html
import visualstudio_py_util as _vspu
to_bytes = _vspu.to_bytes
read_bytes = _vspu.read_bytes
read_int = _vspu.read_int
read_string = _vspu.read_string
write_bytes = _vspu.write_bytes
write_int = _vspu.write_int
write_string = _vspu.write_string
try:
unicode
except NameError:
unicode = str
try:
BaseException
except NameError:
# BaseException not defined until Python 2.5
BaseException = Exception
try:
from Queue import Empty, Queue # Python 2
except ImportError:
from queue import Empty, Queue # Python 3
DEBUG = os.environ.get('DEBUG_EXTENSION_IPYTHON', '0') == '1'
TEST = os.environ.get('VSC_PYTHON_CI_TEST', '0') == '1'
def _debug_write(out):
if DEBUG:
sys.__stdout__.write(out)
sys.__stdout__.write("\n")
sys.__stdout__.flush()
class SafeSendLock(object):
"""a lock which ensures we're released if we take a KeyboardInterrupt exception acquiring it"""
def __init__(self):
self.lock = thread.allocate_lock()
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_value, tb):
self.release()
def acquire(self):
try:
self.lock.acquire()
except KeyboardInterrupt:
try:
self.lock.release()
except:
pass
raise
def release(self):
self.lock.release()
class jediSocketServer(object):
"""back end for executing code in kernel. Handles all of the communication with the remote process."""
"""Messages sent back as responses"""
_PONG = to_bytes('PONG')
_EXIT = to_bytes('EXIT')
_LSKS = to_bytes('LSKS')
_EROR = to_bytes('EROR')
_TEST = to_bytes('TEST')
_STRK = to_bytes('STRK')
_STPK = to_bytes('STPK')
_RSTK = to_bytes('RSTK')
_ITPK = to_bytes('ITPK')
_RUN = to_bytes('RUN ')
_SHEL = to_bytes('SHEL')
_IOPB = to_bytes('IOPB')
def __init__(self):
import threading
self.conn = None
self.send_lock = SafeSendLock()
self.input_event = threading.Lock()
# lock starts acquired (we use it like a manual reset event)
self.input_event.acquire()
self.input_string = None
self.exit_requested = False
self.kernelMonitor = None
self.shell_channel = None
def connect(self, port):
# start a new thread for communicating w/ the remote process
_debug_write('Connecting to socket port: ' + str(port))
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect(('127.0.0.1', port))
_debug_write('Connected to socket')
# perform the handshake
with self.send_lock:
write_string(self.conn, "Some Guid")
write_int(self.conn, os.getpid())
_debug_write('Handshake information sent')
thread.start_new_thread(self.start_processing, ())
def start_processing(self):
"""loop on created thread which processes communicates with the REPL window"""
_debug_write('Started processing thread')
try:
while True:
if self.check_for_exit_socket_loop():
break
# we receive a series of 4 byte commands. Each command then
# has its own format which we must parse before continuing to
# the next command.
self.flush()
self.conn.settimeout(10)
try:
inp = read_bytes(self.conn, 4)
self.conn.settimeout(None)
cmd = jediSocketServer._COMMANDS.get(inp)
if inp and cmd is not None:
id = ""
try:
if jediSocketServer._COMMANDS_WITH_IDS.get(inp) == True:
while True:
try:
id = read_string(self.conn)
break
except socket.timeout:
pass
cmd(self, id)
else:
cmd(self)
except:
commandName = utf_8.decode(inp)[0]
try:
commandName = ascii.Codec.encode(commandName)[
0]
except UnicodeEncodeError:
pass
self.replyWithError(commandName, id)
else:
if inp:
print ('unknown command', inp)
break
except socket.timeout:
pass
except IPythonExitException:
_debug_write('IPythonExitException')
_debug_write(traceback.format_exc())
pass
except socket.error:
_debug_write('socket error')
_debug_write(traceback.format_exc())
pass
except:
_debug_write('error in repl loop')
_debug_write(traceback.format_exc())
# try and exit gracefully, then interrupt main if necessary
time.sleep(2)
traceback.print_exc()
self.exit_process()
def check_for_exit_socket_loop(self):
return self.exit_requested
def replyWithError(self, commandName, id):
with self.send_lock:
traceMessage = traceback.format_exc()
_debug_write('Replying with error:' + traceMessage)
write_bytes(self.conn, jediSocketServer._EROR)
write_string(self.conn, commandName)
write_string(self.conn, "" if id is None else id)
write_string(self.conn, traceMessage)
def _cmd_exit(self):
"""exits the interactive process"""
self.exit_requested = True
self.exit_process()
def _cmd_ping(self, id):
"""ping"""
_debug_write('Ping received')
while True:
try:
message = read_string(self.conn)
break
except socket.timeout:
pass
with self.send_lock:
_debug_write('Pong response being sent out')
write_bytes(self.conn, jediSocketServer._PONG)
write_string(self.conn, id)
write_string(self.conn, message)
def _cmd_lstk(self, id):
"""List kernel specs"""
_debug_write('Listing kernel specs')
kernelspecs = json.dumps(listKernelSpecs())
with self.send_lock:
_debug_write('Replying with kernel Specs')
write_bytes(self.conn, jediSocketServer._LSKS)
write_string(self.conn, id)
write_string(self.conn, kernelspecs)
def _cmd_strk(self, id):
"""Start a kernel by name"""
_debug_write('Listing kernel specs')
while True:
try:
kernelName = read_string(self.conn)
break
except socket.timeout:
pass
kernelUUID = multiKernelManager.start_kernel(kernel_name=kernelName)
self._postStartKernel(kernelUUID)
# get the config and the connection FileExistsError
try:
config = kernel_manager.config
except:
config = {}
try:
connection_file = kernel_manager.connection_file
except:
connection_file = ""
with self.send_lock:
_debug_write('Replying with kernel Specs= ' + str(kernelUUID))
write_bytes(self.conn, jediSocketServer._STRK)
write_string(self.conn, id)
write_string(self.conn, str(kernelUUID))
write_string(self.conn, json.dumps(config))
write_string(self.conn, connection_file)
def _postStartKernel(self, kernelUUID):
kernel_manager = multiKernelManager.get_kernel(kernelUUID)
kernel_client = kernel_manager.client()
kernel_client.start_channels()
try:
# IPython 3.x
kernel_client.wait_for_ready()
iopub = kernel_client
shell = kernel_client
# todo: get_stdin_msg
except AttributeError:
# Ipython 2.x
# Based on https://github.com/paulgb/runipy/pull/49/files
iopub = kernel_client.iopub_channel
shell = kernel_client.shell_channel
shell.get_shell_msg = shell.get_msg
iopub.get_iopub_msg = iopub.get_msg
# todo: get_stdin_msg
self.shell_channel = shell
self.kernelMonitor = iPythonKernelResponseMonitor(
kernelUUID, self.conn, self.send_lock, shell, iopub)
def stopKernel(self, kernelUUID):
"""Shutdown a kernel by UUID"""
try:
if self.kernelMonitor is not None:
self.kernelMonitor.stop()
finally:
pass
try:
kernel_manager = multiKernelManager.get_kernel(kernelUUID)
kernel_client = kernel_manager.client()
kernel_client.stop_channels()
finally:
pass
try:
kernel_manager = multiKernelManager.get_kernel(kernelUUID)
kernel_manager.shutdown_kernel()
except:
pass
finally:
self.shell_channel = None
self.kernelMonitor = None
def _cmd_stpk(self, id):
"""Shutdown a kernel by UUID"""
while True:
try:
kernelUUID = read_string(self.conn)
break
except socket.timeout:
pass
self.stopKernel(kernelUUID)
with self.send_lock:
write_bytes(self.conn, jediSocketServer._STPK)
write_string(self.conn, id)
def _cmd_kill(self, id):
"""Shutdown a kernel by UUID"""
while True:
try:
kernelUUID = read_string(self.conn)
break
except socket.timeout:
pass
try:
if self.kernelMonitor is not None:
self.kernelMonitor.stop()
finally:
pass
try:
kernel_manager = multiKernelManager.get_kernel(kernelUUID)
kernel_client = kernel_manager.client()
kernel_client.stop_channels()
finally:
pass
try:
kernel_manager = multiKernelManager.get_kernel(kernelUUID)
kernel_manager.shutdown_kernel()
except:
pass
def _cmd_rstk(self, id):
"""Restart a kernel by UUID"""
while True:
try:
kernelUUID = read_string(self.conn)
break
except socket.timeout:
pass
kernel_manager = multiKernelManager.get_kernel(kernelUUID)
kernel_manager.restart_kernel(now=True)
with self.send_lock:
write_bytes(self.conn, jediSocketServer._RSTK)
write_string(self.conn, id)
def _cmd_itpk(self, id):
"""Interrupt a kernel by UUID"""
while True:
try:
kernelUUID = read_string(self.conn)
break
except socket.timeout:
pass
kernel_manager = multiKernelManager.get_kernel(kernelUUID)
kernel_manager.interrupt_kernel()
with self.send_lock:
write_bytes(self.conn, jediSocketServer._ITPK)
write_string(self.conn, id)
def _cmd_run(self, id):
"""runs the received snippet of code (kernel is expected to have been started)"""
while True:
try:
code = read_string(self.conn)
break
except socket.timeout:
pass
msg_id = self.shell_channel.execute(code)
_debug_write('msg_id = ' + msg_id)
with self.send_lock:
write_bytes(self.conn, jediSocketServer._RUN)
write_string(self.conn, id)
write_string(self.conn, msg_id)
def _cmd_abrt(self):
"""aborts the current running command"""
# abort command, interrupts execution of the main thread.
pass
def _cmd_inpl(self):
"""handles the input command which returns a string of input"""
self.input_string = read_string(self.conn)
self.input_event.release()
def send_prompt(self, ps1, ps2, update_all=True):
"""sends the current prompt to the interactive window"""
# with self.send_lock:
# write_bytes(self.conn, jediSocketServer._PRPC)
# write_string(self.conn, ps1)
# write_string(self.conn, ps2)
# write_int(self.conn, update_all)
pass
def send_error(self):
"""reports that an error occured to the interactive window"""
with self.send_lock:
write_bytes(self.conn, jediSocketServer._ERRE)
def send_exit(self):
"""reports the that the REPL process has exited to the interactive window"""
with self.send_lock:
write_bytes(self.conn, jediSocketServer._EXIT)
def send_command_executed(self):
with self.send_lock:
write_bytes(self.conn, jediSocketServer._DONE)
def read_line(self):
"""reads a line of input from standard input"""
with self.send_lock:
write_bytes(self.conn, jediSocketServer._RDLN)
self.input_event.acquire()
return self.input_string
def write_stdout(self, value):
"""writes a string to standard output in the remote console"""
with self.send_lock:
write_bytes(self.conn, jediSocketServer._STDO)
write_string(self.conn, value)
def write_stderr(self, value):
"""writes a string to standard input in the remote console"""
with self.send_lock:
write_bytes(self.conn, jediSocketServer._STDE)
write_string(self.conn, value)
################################################################
# Implementation of execution, etc...
def execution_loop(self):
"""loop on the main thread which is responsible for executing code"""
while True:
exit = self.run_one_command(cur_modules, cur_ps1, cur_ps2)
if exit:
return
def run_command(self, command):
"""runs the specified command which is a string containing code"""
pass
def interrupt_main(self):
"""aborts the current running command"""
pass
def exit_process(self):
"""exits the REPL process"""
sys.exit(0)
def flush(self):
"""flushes the stdout/stderr buffers"""
pass
_COMMANDS = {
to_bytes('run '): _cmd_run,
to_bytes('abrt'): _cmd_abrt,
to_bytes('exit'): _cmd_exit,
to_bytes('ping'): _cmd_ping,
to_bytes('inpl'): _cmd_inpl,
to_bytes('lsks'): _cmd_lstk,
to_bytes('strk'): _cmd_strk,
to_bytes('stpk'): _cmd_stpk,
to_bytes('rstk'): _cmd_rstk,
to_bytes('itpk'): _cmd_itpk,
to_bytes('kill'): _cmd_kill,
}
_COMMANDS_WITH_IDS = {
to_bytes('lsks'): True,
to_bytes('ping'): True,
to_bytes('strk'): True,
to_bytes('stpk'): True,
to_bytes('rstk'): True,
to_bytes('itpk'): True,
to_bytes('run '): True,
}
def exit_work_item():
sys.exit(0)
class jediReadLine(object):
def __init__(self):
self._input = io.open(sys.stdin.fileno(), encoding='utf-8')
def _deserialize(self, request):
"""Deserialize request from VSCode.
Args:
request: String with raw request from VSCode.
Returns:
Python dictionary with request data.
"""
return json.loads(request)
def _set_request_config(self, config):
self.use_snippets = config.get('useSnippets')
self.show_doc_strings = config.get('showDescriptions', True)
self.fuzzy_matcher = config.get('fuzzyMatcher', False)
def _process_request(self, request):
"""Accept serialized request from VSCode and write response.
"""
request = self._deserialize(request)
self._set_request_config(request.get('config', {}))
lookup = request.get('lookup', 'completions')
if lookup == 'definitions':
return self._write_response('defs')
elif lookup == 'arguments':
return self._write_response('arguments')
elif lookup == 'usages':
return self._write_response('usages')
else:
return self._write_response('Dont Know')
def _write_response(self, response):
sys.stdout.write(response + '\n')
sys.stdout.flush()
def watch(self):
port = int(sys.argv[1])
_debug_write('Socket port received: ' + str(port))
server = jediSocketServer()
server.connect(port)
sys.__stdout__.write('Started')
sys.__stdout__.write("\n")
sys.__stdout__.flush()
while True:
try:
kernelUUID = self._input.readline()
sys.__stdout__.write('about to die\n')
sys.__stdout__.flush()
if (len(kernelUUID) > 0):
try:
server.stopKernel(kernelUUID)
except:
pass
server.exit_requested = True
sys.__stdout__.write('adios\n')
sys.__stdout__.flush()
except Exception:
sys.stderr.write(traceback.format_exc() + '\n')
sys.stderr.flush()
if __name__ == '__main__':
jediReadLine().watch()
|
{
"content_hash": "37acce07c8ea01dca1afe4e6947d48f9",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 114,
"avg_line_length": 31.638225255972696,
"alnum_prop": 0.5481121898597626,
"repo_name": "lgeiger/ide-python",
"id": "0f5c360ee0dfba815eeed0fbd55b4de553e6e599",
"size": "18541",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dist/debugger/VendorLib/vs-py-debugger/pythonFiles/completionServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "10247"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2016, Marcelo Leal
Description: Simple Azure Media Services Python library
License: MIT (see LICENSE.txt file for details)
"""
import os
import json
import azurerm
import time
#import pytz
import logging
import datetime
###########################################################################################
##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# ALL CODE IN THIS DIRECTOY (INCLUDING THIS FILE) ARE EXAMPLE CODES THAT WILL ACT ON YOUR
# AMS ACCOUNT. IT ASSUMES THAT THE AMS ACCOUNT IS CLEAN (e.g.: BRAND NEW), WITH NO DATA OR
# PRODUCTION CODE ON IT. DO NOT, AGAIN: DO NOT RUN ANY EXAMPLE CODE AGAINST PRODUCTION AMS
# ACCOUNT! IF YOU RUN ANY EXAMPLE CODE AGAINST YOUR PRODUCTION AMS ACCOUNT, YOU CAN LOSE
# DATA, AND/OR PUT YOUR AMS SERVICES IN A DEGRADED OR UNAVAILABLE STATE. BE WARNED!
###########################################################################################
##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# Load Azure app defaults
try:
with open('config.json') as configFile:
configData = json.load(configFile)
except FileNotFoundError:
print("ERROR: Expecting config.json in current folder")
sys.exit()
account_name = configData['accountName']
account_key = configData['accountKey']
# Get the access token...
response = azurerm.get_ams_access_token(account_name, account_key)
resjson = response.json()
access_token = resjson["access_token"]
#Initialization...
print ("\n-----------------------= AMS Py =----------------------");
print ("Simple Python Library for Azure Media Services REST API");
print ("-------------------------------------------------------\n");
### list contentkey authorization policies
print ("\n001 >>> Listing Media Asset Delivery Policies")
response = azurerm.list_asset_delivery_policy(access_token)
if (response.status_code == 200):
resjson = response.json()
print("GET Status......................: " + str(response.status_code))
for ap in resjson['d']['results']:
print("Media Asset Delivery Policy Id..............: " + str(ap['Id']))
print("Media Asset Delivery Policy Name............: " + str(ap['Name']))
else:
print("GET Status: " + str(response.status_code) + " - Media Asset Delivery Policy Listing ERROR." + str(response.content))
|
{
"content_hash": "94fb80646ff10f36eb292ea31d38909c",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 124,
"avg_line_length": 42.847457627118644,
"alnum_prop": 0.5628955696202531,
"repo_name": "gbowerman/azurerm",
"id": "eda08c0797e5814543b8eabc9a0d071d32fdb831",
"size": "2528",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/media_services/list_asset_delivery_policies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "28"
},
{
"name": "Python",
"bytes": "235742"
}
],
"symlink_target": ""
}
|
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.meta_graph_transform import meta_graph_transform
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary as core_summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if tensor_util.is_tensor(x) or y is not None and tensor_util.is_tensor(y):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
results = []
for k, v in sorted(dictionary.items()):
if isinstance(v, float) or isinstance(v, np.float32) or isinstance(
v, int) or isinstance(v, np.int64) or isinstance(v, np.int32):
results.append('%s = %s' % (k, v))
else:
results.append('Type of %s = %s' % (k, type(v)))
return ', '.join(results)
def _write_dict_to_summary(output_dir, dictionary, current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = core_summary.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.string_types):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = key
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or a serialized string of Summary.', key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
GraphRewriteSpec = collections.namedtuple('GraphRewriteSpec',
['tags', 'transforms'])
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use an
`Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics, name)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps == 0:
logging.warning('evaluation steps are 0. If `input_fn` does not raise'
'OutOfRangeError`, the evaluation will never stop.'
'Use steps=None if intended.')
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training_util.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
core_summary.FileWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode, metrics=None):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
metrics: Dict of metrics.
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
model_fn_ops = model_fn_results
else:
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
model_fn_ops = model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
return model_fn_ops
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL, metrics)
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
graph_rewrite_specs=(GraphRewriteSpec((tag_constants.SERVING,), ()),)):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
graph_rewrite_specs: an iterable of `GraphRewriteSpec`. Each element will
produce a separate MetaGraphDef within the exported SavedModel, tagged
and rewritten as specified. Defaults to a single entry using the
default serving tag ("serve") and no rewriting.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# We'll write the SavedModel to a temporary directory and then atomically
# rename it at the end. This helps to avoid corrupt / incomplete outputs,
# which could otherwise occur if the job is preempted or otherwise fails
# in the middle of SavedModel creation.
temp_export_dir = saved_model_export_utils.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
# Build the base graph
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
lookup_ops.tables_initializer())
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Export the first MetaGraphDef with variables, assets etc.
with tf_session.Session('') as session:
# pylint: disable=protected-access
saveables = variables._all_saveable_objects()
# pylint: enable=protected-access
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
elif saveables:
saver_for_restore = saver.Saver(saveables, sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# Perform the export
if not graph_rewrite_specs or graph_rewrite_specs[0].transforms:
raise ValueError('The first element of graph_rewrite_specs '
'must specify no transforms.')
untransformed_tags = graph_rewrite_specs[0].tags
# TODO(soergel): switch to main_op or otherwise update when dust settles
builder.add_meta_graph_and_variables(
session, untransformed_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
# pylint: disable=protected-access
base_meta_graph_def = builder._saved_model.meta_graphs[0]
# pylint: enable=protected-access
if graph_rewrite_specs[1:]:
# Prepare the input_names and output_names needed for the
# meta_graph_transform call below.
input_names = [tensor.name
for input_dict in input_alternatives.values()
for tensor in input_dict.values()]
output_names = [tensor.name
for output_alternative in output_alternatives.values()
for tensor in output_alternative[1].values()]
# Write the additional MetaGraphDefs
for graph_rewrite_spec in graph_rewrite_specs[1:]:
# TODO(soergel) consider moving most of this to saved_model.builder_impl
# as e.g. builder.add_rewritten_meta_graph(rewritten_graph_def, tags)
transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
base_meta_graph_def, input_names, output_names,
graph_rewrite_spec.transforms, graph_rewrite_spec.tags)
# pylint: disable=protected-access
meta_graph_def = builder._saved_model.meta_graphs.add()
# pylint: enable=protected-access
meta_graph_def.CopyFrom(transformed_meta_graph_def)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
builder.save(as_text)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
|
{
"content_hash": "cea3b3c57d4c7ffc72fba3ac8f334b2e",
"timestamp": "",
"source": "github",
"line_count": 1457,
"max_line_length": 85,
"avg_line_length": 40.29993136582018,
"alnum_prop": 0.649164637157893,
"repo_name": "mavenlin/tensorflow",
"id": "0816844df8ee6a0ebbf493a4e39ada5ad6f733b7",
"size": "59407",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/estimators/estimator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "193501"
},
{
"name": "C++",
"bytes": "28519915"
},
{
"name": "CMake",
"bytes": "636307"
},
{
"name": "Go",
"bytes": "946452"
},
{
"name": "Java",
"bytes": "403360"
},
{
"name": "Jupyter Notebook",
"bytes": "1833674"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38060"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "261095"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "25109562"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "371205"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
from .base import BaseExtensionTests
class BaseDtypeTests(BaseExtensionTests):
"""Base class for ExtensionDtype classes"""
def test_name(self, dtype):
assert isinstance(dtype.name, str)
def test_kind(self, dtype):
valid = set('biufcmMOSUV')
if dtype.kind is not None:
assert dtype.kind in valid
def test_construct_from_string_own_name(self, dtype):
result = dtype.construct_from_string(dtype.name)
assert type(result) is type(dtype)
# check OK as classmethod
result = type(dtype).construct_from_string(dtype.name)
assert type(result) is type(dtype)
def test_is_dtype_from_name(self, dtype):
result = type(dtype).is_dtype(dtype.name)
assert result is True
def test_is_dtype_unboxes_dtype(self, data, dtype):
assert dtype.is_dtype(data) is True
def test_is_dtype_from_self(self, dtype):
result = type(dtype).is_dtype(dtype)
assert result is True
def test_is_not_string_type(self, dtype):
return not pd.api.types.is_string_dtype(dtype)
def test_is_not_object_type(self, dtype):
return not pd.api.types.is_object_dtype(dtype)
def test_eq_with_str(self, dtype):
assert dtype == dtype.name
assert dtype != dtype.name + '-suffix'
def test_eq_with_numpy_object(self, dtype):
assert dtype != np.dtype('object')
|
{
"content_hash": "2d9f53266826363e7dae70e4dcae1902",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 62,
"avg_line_length": 30.3125,
"alnum_prop": 0.6508591065292096,
"repo_name": "louispotok/pandas",
"id": "63d3d807c270c8c5d3716af9aecfa24d5d466c97",
"size": "1455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/extension/base/dtype.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3847"
},
{
"name": "C",
"bytes": "432930"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "563"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "13452425"
},
{
"name": "Shell",
"bytes": "25056"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
import time
import os
import random
import json
import locale
import stories
def clear():
os.system(['clear','cls'][os.name == 'nt'])
class Player(object):
def __init__(self):
self.health = 100
self.armor = 0
self.weapon = 0
self.enchantment = 0
self.level = 0
self.progress = 0
self.name = "Unknown"
self.pronoun = "He"
self.status = 0
self.gold = 0
self.story = 0
def save_to_file(self):
save_data = {}
save_data['health'] = self.health
save_data['armor'] = self.armor
save_data['weapon'] = self.weapon
save_data['enchantment'] = self.enchantment
save_data['level'] = self.level
save_data['progress'] = self.progress
save_data['name'] = self.name
save_data['pronoun'] = self.pronoun
save_data['status'] = self.status
save_data['gold'] = self.gold
save_data['story'] = self.story
with open("dragonsAndHeroes.save","w+") as openFile:
json.dump(save_data, openFile)
return True
def load_from_file(self):
if os.path.isfile("dragonsAndHeroes.save"):
with open("dragonsAndHeroes.save","r") as openFile:
save_data = json.load(openFile)
try:
self.health = int(save_data['health'])
except KeyError:
self.health = 100
try:
self.armor = int(save_data['armor'])
except KeyError:
self.armor = 0
try:
self.weapon = int(save_data['weapon'])
except KeyError:
self.weapon = 0
try:
self.enchantment = int(save_data['enchantment'])
except KeyError:
self.enchantment = 0
try:
self.level = int(save_data['level'])
except KeyError:
self.level = 0
try:
self.progress = int(save_data['progress'])
except KeyError:
self.progress = 0
try:
self.name = save_data['name']
except KeyError:
self.name = "Samuel the Corrupted"
try:
self.pronoun = save_data['pronoun']
except KeyError:
self.pronoun = "Xir"
try:
self.status = int(save_data['status'])
except KeyError:
self.status = 0
try:
self.gold = int(save_data['gold'])
except KeyError:
self.gold = 0
try:
self.story = int(save_data['story'])
except KeyError:
self.story = 0
return True
else:
return False
def start_game(self):
clear()
print("""
DRAGONS
&
Heroes
""")
time.sleep(2)
clear()
print("In a far away land, in a time long since forgotten...")
print("...")
time.sleep(5)
clear()
print("DRAGONS roamed the land, and harrowed the people into terror.")
print("Yet, one HOPE remained.")
print("...")
time.sleep(5)
clear()
print("Your village was attacked, and everyone you know was killed before your eyes!")
print("Yet, somehow, you survived.")
print("...")
time.sleep(5)
clear()
print("You stumble upon an old hermit, as you search for the nearest town.")
self.name = input("Hermit: What is your name? ")
print("Hermit: HA! " + str(self.name) + " is a terrible name! Well, at least for you.\nYou are a HERO!")
print(str(self.name) + ": What?")
print("...")
time.sleep(5)
clear()
print("Hermit: Well... Maybe not a hero... What are you?")
print("0: Hero")
print("1: Lord")
print("2: Lady")
print("3: Eunuch")
print("4: Mistress")
print("5: Deadpool")
print("6: Villain")
print("7: Bandit")
print("8: Imperial Sympathiser")
print("9: Samurai")
print("10: Mage")
print("11: One of them, I guess.")
print("12: None of those. Go to hell.")
title = input("Enter a number between 0 and 10: ")
if title == "0":
self.name = self.name + " the Hero"
elif title == "1":
self.name = self.name + " the Lord"
elif title == "2":
self.name = self.name + " the Lady"
elif title == "3":
self.name = self.name + " the Eunuch"
elif title == "4":
self.name = self.name + " the Mistress"
elif title == "5":
self.name = "Deadpool"
elif title == "6":
self.name = self.name + " the Villain"
elif title == "7":
self.name = self.name + " the Bandit"
elif title == "8":
self.name = self.name + " the Imperial"
elif title == "9":
self.name = self.name + " the Samurai"
elif title == "10":
self.name = self.name + " the Mage"
elif title == "11":
self.name = self.name + random.choice([" the Hero", " the Lord", " the Lady", " the Eunuch", " the Mistress", " the Villain", " the Bandit", " the Imperial", " the Samurai", " the Mage"])
else:
pass
time.sleep(5)
clear()
if self.name != "Deadpool":
print("Hermit: Wait... Are you a boy... Or?")
print("0: Male")
print("1: Female")
print("2: Indeterminate")
print("3: Faerie")
print("4: Halfling")
print("5: Demonspawn")
pronoun = input("Enter a number between 0 and 5: ")
if pronoun == "0":
self.pronoun = "He"
elif pronoun == "1":
self.pronoun = "Her"
elif pronoun == "2":
self.pronoun = random.choice(["Schkler", "Schklee"])
elif pronoun == "3":
self.pronoun = "Fae"
elif pronoun == "4":
self.pronoun = "Hobbitses"
elif pronoun == "5":
self.pronoun = random.choice(["Xe", "Xir"])
else:
self.pronoun = "Idiot"
time.sleep(5)
clear()
else:
self.pronoun = "He"
print("Hermit: It is your FATE to save this world, " + str(self.name) + "!")
print("...")
time.sleep(10)
clear()
print("The hermit dissolved into pixie dust.")
print("That was kinda weird.")
print("Maybe it might have some relevance later.")
print(str(self.name) + " sets off on their journey... Not knowing if they will save the world... Or burn it.")
print("...")
clear()
def print_stats(self):
locale.setlocale(locale.LC_ALL, 'en_US')
gold = locale.format("%d", self.gold, grouping=True)
print(str(self.name) + " is a " + str(self.level_string()) + ".\n" + str(self.pronoun) + " is wearing " + str(self.armor_string()) + " armor.\n" + str(self.pronoun) + " is using a " + str(self.weapon_string()) + " to fight.\n" + str(self.pronoun) + " have the enchantment of " + str(self.enchantment_string()) + ".\n" + str(self.pronoun) + " have " + str(self.health) + "% health remaining.\n" + str(self.pronoun) + " has " + str(gold) + " gold coins.")
def get_bad_guy(self):
bad_guys = []
bad_guys.append("Vampire")
bad_guys.append("Werewolf")
bad_guys.append("Gnome")
bad_guys.append("Troll")
bad_guys.append("Dwarf")
bad_guys.append("Hermit")
bad_guys.append("Wolf")
bad_guys.append("Cat")
bad_guys.append("Bear")
bad_guys.append("Deadpool")
bad_guys.append("Abandon")
bad_guys.append("Angel")
bad_guys.append("Ant-Lion")
bad_guys.append("Arch Demon")
bad_guys.append("Bahamas")
bad_guys.append("Behemoth")
bad_guys.append("Belial")
bad_guys.append("Samuel")
bad_guys.append("Snake")
bad_guys.append("Cursed Snake")
bad_guys.append("Baal")
bad_guys.append("Leviathan")
bad_guys.append("Draco")
bad_guys.append("Nephilim")
bad_guys.append("Goat")
bad_guys.append("Golem")
bad_guys.append("Gibborim")
bad_guys.append("Wooden Pallet")
bad_guys.append("Oceanus")
bad_guys.append("Reaper")
bad_guys.append("Assassin")
bad_guys.append("Bandit")
bad_guys.append("Imperial")
bad_guys.append("Mage")
bad_guys.append("Samurai")
bad_guys.append("Rogue")
bad_guys.append("Warlock")
bad_guys.append("Necromancer")
bad_guys.append("Skeleton")
bad_guys.append("Talking Cat")
bad_guys.append("Hideous Abyss")
return bad_guys
def get_sentence_end(self):
sentence_ends = []
sentence_ends.append(" by beating it senseless.")
sentence_ends.append(" with extreme sensitivity.")
sentence_ends.append(" effortlessly.")
sentence_ends.append(" after greatly struggling.")
sentence_ends.append(" in a timely manner.")
sentence_ends.append(" whilst yawning.")
sentence_ends.append(" as if " + str(self.pronoun).lower() + " were still training.")
sentence_ends.append(" with the skill of Deadpool.")
return sentence_ends
def verify(self):
if self.health > 100:
self.health = 100
elif self.health < 0:
self.health = 10
if self.gold > 1000000:
self.gold = self.gold - 1000000
self.armor += 1
self.weapon += 1
self.enchantment += 1
self.level += 1
self.progress = 0
print("Using 1 million gold to pay for new armor, weaponry, and enchantments!")
time.sleep(1)
if self.story == 0 and self.level > 10:
stories.storyOne(self)
elif self.story == 1 and self.level > 15:
stories.storyTwo(self)
elif self.story == 2 and self.level > 20:
stories.storyThree(self)
elif self.story == 3 and self.level > 25:
stories.storyFour(self)
elif self.story == 4 and self.level > 30:
stories.storyFive(self)
elif self.story == 5 and self.level > 35:
stories.storySix(self)
elif self.story == 6 and self.level > 40:
stories.storySeven(self)
elif self.story == 7 and self.level > 45:
stories.storyEight(self)
elif self.story == 8 and self.level > 50:
stories.storyNine(self)
elif self.story == 9 and self.level > 55:
stories.storyTen(self)
elif self.armor > 99 and self.level > 99 and self.weapon > 99 and self.story > 9:
stories.storyOver(self)
return True
def benefit(self):
choice = random.choice([0, 1, 2])
if choice == 0:
self.armor += 1
elif choice == 1:
self.weapon += 1
elif choice == 2:
self.enchantment += 1
self.progress += 1
if self.progress > 10:
self.level += 1
self.progress = 0
return True
def found_string(self):
found_strings = []
found_strings.append("stumbled upon a")
found_strings.append("discovered a")
found_strings.append("found a")
found_strings.append("spotted a")
return random.choice(found_strings)
def defeat_string(self):
defeat_strings = []
defeat_strings.append("defeated the")
defeat_strings.append("butchered the")
defeat_strings.append("murdered the")
defeat_strings.append("decimated the")
return random.choice(defeat_strings)
def lost_string(self):
lost_strings = []
lost_strings.append("lost to the")
lost_strings.append("was beaten by the")
lost_strings.append("was defeated by the")
return random.choice(lost_strings)
def event(self):
if self.status % 5:
print(str(self.name) + " is selling loot.")
self.gold += random.choice(range(0, 100 * (self.progress + 1)))
self.health += random.choice(range(0, 20))
self.status += 1
return True
elif self.status % 10 or self.status % 3:
bad_guy = random.choice(self.get_bad_guy())
sentence_end = random.choice(self.get_sentence_end())
print(str(self.name) + " " + str(self.found_string()) + " " + str(bad_guy) + "!")
if random.choice([0, 1]) == 1:
print(str(self.name) + " " + str(self.defeat_string()) + " " + str(bad_guy) + str(sentence_end))
self.benefit()
self.status += 1
self.health = self.health - random.choice(range(0, 25))
time.sleep(2)
return True
else:
print(str(self.name) + " " + str(self.lost_string()) + " " + str(bad_guy) + "...")
self.health = self.health - random.choice(range(0, 50))
self.status += 1
time.sleep(2)
return True
else:
self.status += 1
self.health += random.choice(range(0, 40))
print(str(self.name) + " is sleeping.")
return True
def get_status(self):
if self.status == 0:
return "sleeping"
elif self.status == 1:
return "hunting"
elif self.status == 2:
return "brawling"
elif self.status == 3:
return "fighting"
elif self.status == 4:
return "kicking ass"
elif self.status == 5:
self.gold += random.choice(range(0,20))
return "selling loot..."
elif self.status == 6:
return "killing"
elif self.status == 7:
return "butchering"
elif self.status == 8:
return "assassinating"
elif self.status == 9:
return "executing"
elif self.status == 10:
self.gold += random.choice(range(0,20))
return "selling loot..."
else:
return "deleting enemies from existence..."
def enchantment_string(self):
if self.enchantment == 0:
return "Weak Apathy"
elif self.enchantment == 1:
return "Apathy"
elif self.enchantment == 2:
return "Strong Apathy"
elif self.enchantment == 3:
return "Strength"
elif self.enchantment == 4:
return "Ironhide"
elif self.enchantment == 5:
return "Steelhide"
elif self.enchantment == 6:
return "Titaniumhide"
elif self.enchantment == 7:
return "Minor Deflect Spell"
elif self.enchantment == 8:
return "Deflect Spell"
elif self.enchantment == 9:
return "Major Deflect Spell"
elif self.enchantment == 10:
return "Minor Absorb Spell"
elif self.enchantment == 11:
return "Absorb Spell"
elif self.enchantment == 12:
return "Major Absorb Spell"
elif self.enchantment == 13:
return "Magical Drain"
elif self.enchantment == 14:
return "Vampiric Magical Drain"
elif self.enchantment == 15:
return "Howling Vampiric Magical Drain"
elif self.enchantment == 16:
return "Zombification"
elif self.enchantment == 17:
return "Resurrection"
elif self.enchantment == 18:
return "Holy Grail"
elif self.enchantment == 19:
return "Telepathy"
elif self.enchantment == 20:
return "Telekinesis"
elif self.enchantment == 21:
return "Clairvoyance"
elif self.enchantment == 22:
return "Pyrokenesis"
elif self.enchantment == 23:
return "Retrocognition"
elif self.enchantment == 24:
return "Psychic Link"
elif self.enchantment == 25:
return "Demonic Rage"
elif self.enchantment == 26:
return "Demonic Hunger"
elif self.enchantment == 27:
return "Soul Sucker"
elif self.enchantment == 28:
return "Soul Summoner"
elif self.enchantment == 29:
return "We are Legion"
elif self.enchantment == 30:
return "Weak Prescience"
elif self.enchantment == 31:
return "Prescience"
elif self.enchantment == 32:
return "Great Prescience"
else:
return "Magnificent Prescience"
def weapon_string(self):
if self.weapon == 0:
return "Stick"
elif self.weapon == 1:
return "Sharpened Stick"
elif self.weapon == 2:
return "Really Sharp Stick"
elif self.weapon == 3:
return "Blunt Spear"
elif self.weapon == 4:
return "Spear"
elif self.weapon == 5:
return "Sharp Spear"
elif self.weapon == 6:
return "Light Club"
elif self.weapon == 7:
return "Club"
elif self.weapon == 8:
return "Heavy Club"
elif self.weapon == 9:
return "Blunt and Light Axe"
elif self.weapon == 10:
return "Blunt Axe"
elif self.weapon == 11:
return "Heavy Blunt Axe"
elif self.weapon == 12:
return "Heavy Axe"
elif self.weapon == 13:
return "Heavy and Sharp Axe"
elif self.weapon == 14:
return "Light Dagger"
elif self.weapon == 15:
return "Dagger"
elif self.weapon == 16:
return "Sharp Dagger"
elif self.weapon == 17:
return "Light Sword"
elif self.weapon == 18:
return "Sword"
elif self.weapon == 19:
return "Sharp Sword"
elif self.weapon == 20:
return "Sharp Longsword"
elif self.weapon == 21:
return "Small Wand"
elif self.weapon == 22:
return "Wand"
elif self.weapon == 23:
return "Long Wand"
elif self.weapon == 24:
return "Short Magical Staff"
elif self.weapon == 25:
return "Magical Staff"
elif self.weapon == 26:
return "Large Magical Staff"
elif self.weapon == 27:
return "Short Mage's Staff"
elif self.weapon == 28:
return "Mage's Staff"
elif self.weapon == 29:
return "Large Mage's Staff"
elif self.weapon == 30:
return "Short Wizard's Staff"
elif self.weapon == 31:
return "Wizard's Staff"
elif self.weapon == 32:
return "Large Wizard's Staff"
elif self.weapon == 33:
return "Short Warlock's Staff"
elif self.weapon == 34:
return "Warlock's Staff"
elif self.weapon == 35:
return "Large Warlock's Staff"
elif self.weapon == 36:
return "Scroll of Fireball"
elif self.weapon == 37:
return "Scroll of Hurricane"
elif self.weapon == 38:
return "Scroll of Tornado"
elif self.weapon == 39:
return "Scroll of Boulder"
elif self.weapon == 40:
return "Scroll of Burning Tornado"
elif self.weapon == 41:
return "Scroll of Burning Tsunami"
elif self.weapon == 42:
return "Scroll of Earthquake"
elif self.weapon == 43:
return "Scroll of Volcano"
elif self.weapon == 44:
return "Scroll of Torture"
elif self.weapon == 45:
return "Scroll of Mind Control"
elif self.weapon == 46:
return "Scroll of Death"
elif self.weapon == 47:
return "Scroll of Zomibification"
elif self.weapon == 48:
return "Scroll of Summon Ghouls"
elif self.weapon == 49:
return "Scroll of Summon Deadpool"
elif self.weapon == 50:
return "Scroll of Summon Wyrm"
elif self.weapon == 51:
return "Scroll of Summon Fire Wyrm"
elif self.weapon == 52:
return "Scroll of Summon Imperial Paladin"
elif self.weapon == 53:
return "Scroll of Summon Necromancer"
elif self.weapon == 54:
return "Scroll of Summon Bat Swarm"
elif self.weapon == 55:
return "Scroll of Summon Death"
elif self.weapon == 56:
return "Shard of Infinity"
elif self.weapon == 57:
return "Infinity Stone"
elif self.weapon == 58:
return "Magic Ring"
elif self.weapon == 59:
return "One Ring"
elif self.weapon == 60:
return "Infinity Dagger"
elif self.weapon == 61:
return "Sands of Time"
elif self.weapon == 62:
return "Poseidon's Trident"
elif self.weapon == 63:
return "Hades' Fork"
elif self.weapon == 64:
return "Zeus' Lightning"
elif self.weapon == 65:
return "Deadpool's Fist"
elif self.weapon == 66:
return "Dragon Talon"
elif self.weapon == 67:
return "Insane Hobbit"
elif self.weapon == 68:
return "Ghostly Army"
elif self.weapon == 69:
return "Dead Army"
elif self.weapon == 70:
return "Damned Army"
elif self.weapon == 71:
return "Robot Damned Army"
elif self.weapon == 72:
return "Living Sword"
elif self.weapon == 73:
return "Infinity Gauntlet"
elif self.weapon == 74:
return "Hypercube"
elif self.weapon == 75:
return "Tesseract"
elif self.weapon == 76:
return "Penteract"
elif self.weapon == 77:
return "Hexeract"
elif self.weapon == 78:
return "Hepteract"
elif self.weapon == 79:
return "Octeract"
elif self.weapon == 80:
return "Enneract"
elif self.weapon == 81:
return "Dekeract"
elif self.weapon == 82:
return "N-Dimensional Cube"
elif self.weapon == 83:
return "N-Dimensional Space"
elif self.weapon == 84:
return "N-Dimensional Time"
elif self.weapon == 85:
return "2n-gonal Space-Time"
elif self.weapon == 86:
return "Dwarf Star"
elif self.weapon == 87:
return "White Dwarf Star"
elif self.weapon == 88:
return "Red Dwarf Star"
elif self.weapon == 89:
return "Black Hole"
elif self.weapon == 90:
return "Super Black Hole"
elif self.weapon == 91:
return "Super Massive Black Hole"
elif self.weapon == 92:
return "White Hole"
elif self.weapon == 93:
return "Hawking Radiation"
elif self.weapon == 94:
return "Singularity"
elif self.weapon == 95:
return "Mathematical Distortion Field"
elif self.weapon == 96:
return "Quantum Entanglement Field"
elif self.weapon == 97:
return "Ignore Physics Field"
elif self.weapon == 98:
return "Hyperdimensional Convolution Field"
elif self.weapon == 99:
return "Deadpool Science"
else:
return "Antiparticle Accelerator"
def level_string(self):
if self.level == 0:
return "Homeless Wanderer"
elif self.level == 1:
return "Hermit"
elif self.level == 2:
return "Pub Brawler"
elif self.level == 3:
return "Fighter"
elif self.level == 4:
return "Boxer"
elif self.level == 5:
return "Martial Artist"
elif self.level == 6:
return "Homeowner"
elif self.level == 7:
return "Homewrecker"
elif self.level == 8:
return "Bandit"
elif self.level == 9:
return "Scout"
elif self.level == 10:
return "Squire"
elif self.level == 11:
return "Archer"
elif self.level == 12:
return "Crossbowman"
elif self.level == 13:
return "Knight"
elif self.level == 14:
return "Paladin"
elif self.level == 15:
return "Imperial Scout"
elif self.level == 16:
return "Imperial Squire"
elif self.level == 17:
return "Imperial Archer"
elif self.level == 18:
return "Imperial Crossbowman"
elif self.level == 19:
return "Imperial Knight"
elif self.level == 20:
return "Imperial Paladin"
elif self.level == 21:
return "Samurai Scout"
elif self.level == 22:
return "Samurai Archer"
elif self.level == 23:
return "Samurai Squire"
elif self.level == 24:
return "Samurai Knight"
elif self.level == 25:
return "Samurai Paladin"
elif self.level == 26:
return "Manslayer Scout"
elif self.level == 27:
return "Manslayer Archer"
elif self.level == 28:
return "Manslayer Squire"
elif self.level == 29:
return "Manslayer Knight"
elif self.level == 30:
return "Manslayer Paladin"
elif self.level == 31:
return "True Manslayer"
elif self.level == 32:
return "Apprentice Mage"
elif self.level == 33:
return "Mage"
elif self.level == 34:
return "Wizard"
elif self.level == 35:
return "Warlock"
elif self.level == 36:
return "Imperial Mage"
elif self.level == 37:
return "Imperial Wizard"
elif self.level == 38:
return "Imperial Warlock"
elif self.level == 39:
return "Imperial Arch Mage"
elif self.level == 40:
return "Samurai Mage"
elif self.level == 41:
return "Samurai Wizard"
elif self.level == 42:
return "Samurai Warlock"
elif self.level == 43:
return "Samurai Arch Mage"
elif self.level == 44:
return "Vampire"
elif self.level == 45:
return "Werewolf"
elif self.level == 46:
return "Gnome"
elif self.level == 47:
return "Necromancer"
elif self.level == 48:
return "Blood Mancer"
elif self.level == 49:
return "Wyrm Hunter"
elif self.level == 50:
return "Adept Wyrm Hunter"
elif self.level == 51:
return "Intermediate Wyrm Hunter"
elif self.level == 52:
return "Expert Wyrm Hunter"
elif self.level == 53:
return "Dragonling Hunter"
elif self.level == 54:
return "Adept Dragonling Hunter"
elif self.level == 55:
return "Intermediate Dragonling Hunter"
elif self.level == 56:
return "Expert Dragonling Hunter"
elif self.level == 57:
return "Grimm"
elif self.level == 58:
return "Hansel"
elif self.level == 59:
return "Witchling"
elif self.level == 60:
return "Swamp Hag"
elif self.level == 61:
return "Witch"
elif self.level == 62:
return "Undead"
elif self.level == 63:
return "Resurrected"
elif self.level == 64:
return "Demon"
elif self.level == 65:
return "Angel"
elif self.level == 66:
return "Dampyre"
elif self.level == 67:
return "Alien"
elif self.level == 68:
return "Rich guy dressing up like a nightmare"
elif self.level == 69:
return "Deadpool"
elif self.level == 70:
return "Time Traveller"
elif self.level == 71:
return "Crazy man, with a box"
elif self.level == 72:
return "Crazy man, with a box, with a new face"
elif self.level == 73:
return "Universal Acolyte"
elif self.level == 74:
return "Universal Monk"
elif self.level == 75:
return "Universal Preacher"
elif self.level == 76:
return "Miracle Worker"
elif self.level == 77:
return "Saint"
elif self.level == 78:
return "Ascendant Saint"
elif self.level == 79:
return "Transcendant Saint"
elif self.level == 80:
return "Demigod"
elif self.level == 81:
return "Deified Demigod"
elif self.level == 82:
return "Deity"
elif self.level == 83:
return "Demiurge"
elif self.level == 84:
return "God"
elif self.level == 85:
return "Forbidden God"
elif self.level == 86:
return "Snake God"
elif self.level == 87:
return "Cursed God"
elif self.level == 88:
return "Vengeful God"
elif self.level == 89:
return "Merciful God"
elif self.level == 90:
return "Righteous God"
elif self.level == 91:
return "Massless Particle"
elif self.level == 92:
return "Antiparticle"
elif self.level == 93:
return "Antimatter"
elif self.level == 94:
return "Metamatter"
elif self.level == 95:
return "Black Hole"
elif self.level == 96:
return "Collapsing Black Hole"
elif self.level == 97:
return "Small Singularity"
elif self.level == 98:
return "Expanding Singularity"
elif self.level == 99:
return "Existential Singularity"
else:
return "Beyond the Existential Singularity"
def armor_string(self):
if self.armor == 0:
return "Sackcloth"
elif self.armor == 1:
return "Hide"
elif self.armor == 2:
return "Leather"
elif self.armor == 3:
return "Crab Leather"
elif self.armor == 4:
return "Studded Leather"
elif self.armor == 5:
return "Bull Leather"
elif self.armor == 6:
return "Notched Leather"
elif self.armor == 7:
return "Fur Armor"
elif self.armor == 8:
return "Bandit"
elif self.armor == 9:
return "Squire"
elif self.armor == 10:
return "Knight"
elif self.armor == 11:
return "Imperial Scout"
elif self.armor == 12:
return "Imperial Archer"
elif self.armor == 13:
return "Imperial Squire"
elif self.armor == 14:
return "Imperial Knight"
elif self.armor == 15:
return "Paladin"
elif self.armor == 16:
return "Imperial Paladin"
elif self.armor == 17:
return "Samurai Scout"
elif self.armor == 18:
return "Samurai Archer"
elif self.armor == 19:
return "Samurai Squire"
elif self.armor == 20:
return "Samurai Knight"
elif self.armor == 21:
return "Samurai Paladin"
elif self.armor == 22:
return "Manslayer Scout"
elif self.armor == 23:
return "Manslayer Archer"
elif self.armor == 24:
return "Manslayer Squire"
elif self.armor == 25:
return "Manslayer Knight"
elif self.armor == 26:
return "Manslayer Paladin"
elif self.armor == 27:
return "True Manslayer"
elif self.armor == 28:
return "Platinum"
elif self.armor == 29:
return "Studded Platinum"
elif self.armor == 30:
return "Platinum Scout"
elif self.armor == 31:
return "Platinum Archer"
elif self.armor == 32:
return "Platinum Squire"
elif self.armor == 33:
return "Platinum Knight"
elif self.armor == 34:
return "Platinum Paladin"
elif self.armor == 35:
return "Assassin"
elif self.armor == 36:
return "Adept Assassin"
elif self.armor == 37:
return "Intermediate Assassin"
elif self.armor == 38:
return "Expert Assassin"
elif self.armor == 39:
return "Invisibility"
elif self.armor == 40:
return "Hardened Invisibility"
elif self.armor == 41:
return "Crystalised Invisibility"
elif self.armor == 42:
return "Fire-proof Invisibility"
elif self.armor == 43:
return "Spell-proof Invisibility"
elif self.armor == 44:
return "Apprentice Mage"
elif self.armor == 45:
return "Mage"
elif self.armor == 46:
return "Wizard"
elif self.armor == 47:
return "Warlock"
elif self.armor == 48:
return "Imperial Mage"
elif self.armor == 49:
return "Imperial Wizard"
elif self.armor == 50:
return "Imperial Warlock"
elif self.armor == 51:
return "Imperial Arch Mage"
elif self.armor == 52:
return "Samurai Mage"
elif self.armor == 53:
return "Samurai Wizard"
elif self.armor == 54:
return "Samurai Warlock"
elif self.armor == 55:
return "Samurai Arch Mage"
elif self.armor == 56:
return "Forbidden"
elif self.armor == 57:
return "Human Skin"
elif self.armor == 58:
return "Werewolf Skin"
elif self.armor == 59:
return "Vampire Skin"
elif self.armor == 60:
return "Gnome Skin"
elif self.armor == 61:
return "Necromantic"
elif self.armor == 62:
return "Uncursed Necromantic"
elif self.armor == 63:
return "Blood Mancer's"
elif self.armor == 64:
return "Werewolf Blood"
elif self.armor == 65:
return "Vampire Blood"
elif self.armor == 66:
return "Gnome Blood"
elif self.armor == 67:
return "Wyrm Skin"
elif self.armor == 68:
return "Wyrm Scale"
elif self.armor == 69:
return "Water Wyrm Skin"
elif self.armor == 70:
return "Water Wyrm Scale"
elif self.armor == 71:
return "Wind Wyrm Skin"
elif self.armor == 72:
return "Wind Wyrm Scale"
elif self.armor == 73:
return "Earth Wyrm Skin"
elif self.armor == 74:
return "Earth Wyrm Scale"
elif self.armor == 75:
return "Fire Wyrm Skin"
elif self.armor == 76:
return "Fire Wyrm Scale"
elif self.armor == 77:
return "Elemental Wyrm Skin"
elif self.armor == 78:
return "Elemental Wyrm Scale"
elif self.armor == 79:
return "Dragon Tongue"
elif self.armor == 80:
return "Dragon Skin"
elif self.armor == 81:
return "Dragon Scale"
elif self.armor == 82:
return "Imperial Dragon Tongue"
elif self.armor == 83:
return "Imperial Dragon Skin"
elif self.armor == 84:
return "Imperial Dragon Scale"
elif self.armor == 85:
return "Samurai Dragon Tongue"
elif self.armor == 86:
return "Samurai Dragon Skin"
elif self.armor == 87:
return "Samurai Dragon Scale"
elif self.armor == 88:
return "Dragonslayer"
elif self.armor == 89:
return "Dragonslayer Scout"
elif self.armor == 90:
return "Dragonslayer Archer"
elif self.armor == 91:
return "Dragonslayer Squire"
elif self.armor == 92:
return "Dragonslayer Knight"
elif self.armor == 93:
return "Dragonslayer Mage"
elif self.armor == 94:
return "Dragonslayer Wizard"
elif self.armor == 95:
return "Dragonslayer Warlock"
elif self.armor == 96:
return "Dragonslayer Arch Mage"
elif self.armor == 97:
return "Dragonslayer Arch Wizard"
elif self.armor == 98:
return "Dragonslayer Arch Warlock"
elif self.armor == 99:
return "Dragonslayer Paladin"
else:
return "Dragonslayer Arch Paladin"
if __name__ == "__main__":
clear()
print("""
DRAGONS
&
Heroes
""")
time.sleep(2)
clear()
player = Player()
if player.load_from_file():
pass
else:
player.start_game()
while True:
player.verify()
player.print_stats()
player.event()
player.save_to_file()
time.sleep(2)
clear()
|
{
"content_hash": "f0fcb11ec2dc019ce651be8c08845885",
"timestamp": "",
"source": "github",
"line_count": 1105,
"max_line_length": 461,
"avg_line_length": 34.335746606334844,
"alnum_prop": 0.51334967449461,
"repo_name": "shakna-israel/dragonsandheroes",
"id": "6d0ee8a4c860e769fbd82ac61bb3578189685195",
"size": "37941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selfPlayer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40875"
}
],
"symlink_target": ""
}
|
import os
import time
import struct
import time
from datetime import datetime, date, timedelta
from kbutils import KBCapabilities, makeFCS
from GoodFETCCSPI import GoodFETCCSPI
class TELOSB:
def __init__(self, dev):
'''
Instantiates the KillerBee class for our TelosB/TmoteSky running GoodFET firmware.
@type dev: String
@param dev: Serial device identifier (ex /dev/ttyUSB0)
@return: None
@rtype: None
'''
self._channel = None
self.handle = None
self.dev = dev
os.environ["platform"] = "telosb" #set enviroment variable for GoodFET code to use
self.handle = GoodFETCCSPI()
self.handle.serInit(port=self.dev)
self.handle.setup()
self.__stream_open = False
self.capabilities = KBCapabilities()
self.__set_capabilities()
def close(self):
self.handle.serClose()
self.handle = None
def check_capability(self, capab):
return self.capabilities.check(capab)
def get_capabilities(self):
return self.capabilities.getlist()
def __set_capabilities(self):
'''
Sets the capability information appropriate for GoodFETCCSPI client and firmware.
@rtype: None
@return: None
'''
self.capabilities.setcapab(KBCapabilities.SNIFF, True)
self.capabilities.setcapab(KBCapabilities.SETCHAN, True)
self.capabilities.setcapab(KBCapabilities.INJECT, True)
self.capabilities.setcapab(KBCapabilities.PHYJAM_REFLEX, True)
return
# KillerBee expects the driver to implement this function
#def get_dev_info(self, dev, bus):
def get_dev_info(self):
'''
Returns device information in a list identifying the device.
@rtype: List
@return: List of 3 strings identifying device.
'''
return [self.dev, "TelosB/Tmote", ""]
# KillerBee expects the driver to implement this function
def sniffer_on(self, channel=None):
'''
Turns the sniffer on such that pnext() will start returning observed
data. Will set the command mode to Air Capture if it is not already
set.
@type channel: Integer
@param channel: Sets the channel, optional
@rtype: None
'''
self.capabilities.require(KBCapabilities.SNIFF)
self.handle.RF_promiscuity(1);
self.handle.RF_autocrc(0);
if channel != None:
self.set_channel(channel)
self.handle.CC_RFST_RX();
#print "Sniffer started (listening as %010x on %i MHz)" % (self.handle.RF_getsmac(), self.handle.RF_getfreq()/10**6);
self.__stream_open = True
# KillerBee expects the driver to implement this function
def sniffer_off(self):
'''
Turns the sniffer off, freeing the hardware for other functions. It is
not necessary to call this function before closing the interface with
close().
@rtype: None
'''
#TODO actually have firmware stop sending us packets!
self.__stream_open = False
# KillerBee expects the driver to implement this function
def set_channel(self, channel):
'''
Sets the radio interface to the specifid channel (limited to 2.4 GHz channels 11-26)
@type channel: Integer
@param channel: Sets the channel, optional
@rtype: None
'''
self.capabilities.require(KBCapabilities.SETCHAN)
if channel >= 11 or channel <= 26:
self._channel = channel
self.handle.RF_setchan(channel)
else:
raise Exception('Invalid channel')
# KillerBee expects the driver to implement this function
def inject(self, packet, channel=None, count=1, delay=0):
'''
Injects the specified packet contents.
@type packet: String
@param packet: Packet contents to transmit, without FCS.
@type channel: Integer
@param channel: Sets the channel, optional
@type count: Integer
@param count: Transmits a specified number of frames, def=1
@type delay: Float
@param delay: Delay between each frame, def=1
@rtype: None
'''
self.capabilities.require(KBCapabilities.INJECT)
if len(packet) < 1:
raise Exception('Empty packet')
if len(packet) > 125: # 127 - 2 to accommodate FCS
raise Exception('Packet too long')
if channel != None:
self.set_channel(channel)
self.handle.RF_autocrc(1) #let radio add the CRC
for pnum in range(0, count):
gfready = [ord(x) for x in packet] #convert packet string to GoodFET expected integer format
gfready.insert(0, len(gfready)+2) #add a length that leaves room for CRC
self.handle.RF_txpacket(gfready)
time.sleep(1)
# KillerBee expects the driver to implement this function
def pnext(self, timeout=100):
'''
Returns packet data as a string, else None.
@type timeout: Integer
@param timeout: Timeout to wait for packet reception in usec
@rtype: List
@return: Returns None is timeout expires and no packet received. When a packet is received, a list is returned, in the form [ String: packet contents | Bool: Valid CRC | Int: Unscaled RSSI ]
'''
if self.__stream_open == False:
self.sniffer_on() #start sniffing
packet = None;
start = datetime.now()
while (packet is None and (start + timedelta(microseconds=timeout) > datetime.now())):
packet = self.handle.RF_rxpacket()
rssi = self.handle.RF_getrssi() #TODO calibrate
if packet is None:
return None
frame = packet[1:]
if frame[-2:] == makeFCS(frame[:-2]): validcrc = True
else: validcrc = False
#Return in a nicer dictionary format, so we don't have to reference by number indicies.
#Note that 0,1,2 indicies inserted twice for backwards compatibility.
result = {0:frame, 1:validcrc, 2:rssi, 'bytes':frame, 'validcrc':validcrc, 'rssi':rssi, 'location':None}
result['dbm'] = rssi - 45 #TODO tune specifically to the Tmote platform (does ext antenna need to different?)
result['datetime'] = datetime.combine(date.today(), (datetime.now()).time()) #TODO address timezones by going to UTC everywhere
return result
def ping(self, da, panid, sa, channel=None):
'''
Not yet implemented.
@return: None
@rtype: None
'''
raise Exception('Not yet implemented')
def jammer_on(self, channel=None):
'''
Not yet implemented.
@type channel: Integer
@param channel: Sets the channel, optional
@rtype: None
'''
self.capabilities.require(KBCapabilities.PHYJAM_REFLEX)
self.handle.RF_promiscuity(1)
self.handle.RF_autocrc(0)
if channel != None:
self.set_channel(channel)
self.handle.CC_RFST_RX()
self.handle.RF_reflexjam()
def jammer_off(self, channel=None):
'''
Not yet implemented.
@return: None
@rtype: None
'''
#TODO implement
raise Exception('Not yet implemented')
|
{
"content_hash": "5b8b078b6cd2fecd43870116e13239d2",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 199,
"avg_line_length": 35.87378640776699,
"alnum_prop": 0.6148849797023004,
"repo_name": "silicrax/killerbee",
"id": "e13069b632fc4264efab8ba2174794a5612cf6e6",
"size": "7390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "killerbee/dev_telosb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "16765"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "165791"
},
{
"name": "Shell",
"bytes": "196"
}
],
"symlink_target": ""
}
|
"""The MIT License
Copyright (c) 2007 Nigel Brady
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. """
"""
This is script is a tool that can be used by servers to verify oAuth tokens.
To speed login flow, many mobile apps allow users to log in with their
Facebook/Google/Twitter accounts. The results of such logins are stable
User IDs that can be added to the database of your backend server.
However, how can your server know if the request is really coming from
a given User ID? You need to check the oAuth token coming from the mobile
application, and that's what this script does.
Facebook/Google:
facebook_token = "abc"
facebook_id = "123"
facebook_verifier = FacebookVerifier(facebook_token, facebook_id)
google_token = "def"
google_id = "456"
google_verifier = GoogleVerifier(google_token, google_id)
try:
facebook_verifier.verify()
google_verifier.verify()
except OAuthException as e:
#An exception is thrown if the oAuth token is invalid or doesn't belong to
#the provided user ID.
---
Twitter:
Twitter uses the oAuth 1.0 API which makes things more complicated. You'll need:
Your application's consumer key (keep it secret!)
Your application's consumer secret (keep it secret!)
Your user's oAuth token (get this from the mobile Twitter SDK)
your user's oAuth token secret (get this from the mobile Twitter SDK)
tw_token = "abc"
tw_token_secret = "def"
tw_id = "789"
tw_consumer_key = "foo"
tw_consumer_secret = "bar"
tw_verifier = verifier.TwitterVerifier(tw_token,
tw_id,
tw_consumer_key,
tw_consumer_secret,
tw_token_secret)
try:
tw_verifier.verify()
except OAuthException as e:
#An exception is thrown if the oAuth token is invalid or doesn't belong to
#the provided user ID.
"""
__author__ = 'Nigel Brady'
import urllib
import urllib2
import json
import oauth
class OAuthVerifier:
token = None
user_id = None
url = None
user_id_field = None
request = None
debug = False
def __init__(self, token, user_id, url, user_id_field="id", debug=False):
self.token = token
self.user_id = user_id
self.url = url
self.user_id_field = user_id_field
self.debug = debug
def verify(self):
if not self.token or not self.user_id:
raise Exception("You must provide a user ID and oAuth access token to proceed.")
params = {"access_token": self.token}
query_string = urllib.urlencode(params)
self.request = self.url + "?" + query_string
return self.execute_request()
def execute_request(self):
try:
result = urllib2.urlopen(self.request)
response = result.read()
result_dict = json.loads(response)
if self.debug:
print response
if self.user_id_field in result_dict and result_dict[self.user_id_field] == self.user_id:
return result_dict[self.user_id_field]
else:
raise OAuthException()
except urllib2.HTTPError as e:
if e.code == 401:
raise OAuthException("Authorization failed.")
elif e.code == 400:
raise OAuthException("Bad request. Auth token is likely invalid.")
else:
raise e
class OAuthException(Exception):
def __init__(self):
Exception.__init__(self, "Access token invalid or does not belong to the current user.")
def __init__(self, message):
Exception.__init__(self, message)
class FacebookVerifier(OAuthVerifier):
def __init__(self, token, user_id, debug=False):
OAuthVerifier.__init__(self,
token,
user_id,
"https://graph.facebook.com/me",
debug=debug)
class GoogleVerifier(OAuthVerifier):
def __init__(self, token, user_id, debug=False):
OAuthVerifier.__init__(self,
token,
user_id,
"https://www.googleapis.com/oauth2/v1/tokeninfo",
"user_id", debug=debug)
class TwitterVerifier(OAuthVerifier):
consumer_key = None
consumer_secret = None
token_secret = None
def __init__(self, token, user_id, consumer_key, consumer_secret, token_secret, debug=False):
OAuthVerifier.__init__(self,
token,
user_id,
"https://api.twitter.com/1.1/account/verify_credentials.json",
"id_str",
debug=debug)
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.token_secret = token_secret
def verify(self):
consumer = oauth.OAuthConsumer(self.consumer_key, self.consumer_secret)
signature_method_hmac_sha1 = oauth.OAuthSignatureMethod_HMAC_SHA1()
oauth_token = oauth.OAuthToken(self.token, self.token_secret)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
token=oauth_token,
http_method='GET',
http_url=self.url)
oauth_request.sign_request(signature_method_hmac_sha1, consumer, oauth_token)
headers = oauth_request.to_header()
self.request = urllib2.Request(self.url, headers=headers)
return self.execute_request()
|
{
"content_hash": "510a2f7ff9bece0eb4c2a4f395ecedc9",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 101,
"avg_line_length": 33.50990099009901,
"alnum_prop": 0.6142709410548087,
"repo_name": "nigelbrady/OAuthVerifier",
"id": "71544bc4533dc78684dbdc4111613e48ec844a0d",
"size": "6769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "verifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37019"
}
],
"symlink_target": ""
}
|
from .device import Device
from .device_manager import DeviceManager
from .group import DeviceGroup
|
{
"content_hash": "af73ed3a64b6202b22a7e31aa07f1656",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 33,
"alnum_prop": 0.8484848484848485,
"repo_name": "ttaanngg/petal",
"id": "e8acc9aeec34163a79b50758bcbd060a44e70906",
"size": "99",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uniform_model/devices/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "60368"
}
],
"symlink_target": ""
}
|
import json
import logging
import os
import uuid
from datetime import datetime, timedelta
from unittest.mock import patch, Mock
import yaml
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import GEOSGeometry, GeometryCollection, Polygon, Point, LineString
from django.core import serializers
from django.utils import timezone
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.reverse import reverse
from rest_framework.serializers import ValidationError
from rest_framework.test import APITestCase
from eventkit_cloud.api.pagination import LinkHeaderPagination
from eventkit_cloud.api.views import get_models, get_provider_task, ExportRunViewSet
from eventkit_cloud.core.models import GroupPermission, GroupPermissionLevel, AttributeClass
from eventkit_cloud.jobs.admin import get_example_from_file
from eventkit_cloud.jobs.models import (
ExportFormat,
Job,
DataProvider,
DataProviderType,
DataProviderTask,
Region,
RegionalJustification,
RegionalPolicy,
bbox_to_geojson,
DatamodelPreset,
License,
VisibilityState,
UserJobActivity,
)
from eventkit_cloud.tasks.enumerations import TaskState
from eventkit_cloud.tasks.models import (
DataProviderTaskRecord,
ExportRun,
ExportTaskRecord,
FileProducingTaskResult,
RunZipFile,
)
from eventkit_cloud.tasks.task_factory import InvalidLicense
from eventkit_cloud.user_requests.models import DataProviderRequest, SizeIncreaseRequest
logger = logging.getLogger(__name__)
def add_max_data_size(provider, max_data_size):
config = provider.config
config = yaml.load(config)
config["max_data_size"] = max_data_size
provider.config = yaml.dump(config)
provider.save()
class TestJobViewSet(APITestCase):
fixtures = (
"osm_provider.json",
"datamodel_presets.json",
)
def __init__(self, *args, **kwargs):
super(TestJobViewSet, self).__init__(*args, **kwargs)
self.path = None
self.group = None
self.user = None
self.job = None
self.client = None
self.config = None
self.tags = None
@patch("eventkit_cloud.api.views.get_estimate_cache_key")
@patch("eventkit_cloud.api.views.cache")
def setUp(self, cache_mock, get_estimate_cache_key_mock):
self.path = os.path.dirname(os.path.realpath(__file__))
self.group, created = Group.objects.get_or_create(name="TestDefaultExportExtentGroup")
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo")
self.attribute_class = AttributeClass.objects.create(name="test", slug="test", filter="username=demo")
self.attribute_class.users.add(self.user)
self.attribute_class.save()
extents = (-3.9, 16.1, 7.0, 27.6)
bbox = Polygon.from_bbox(extents)
original_selection = GeometryCollection(Point(1, 1), LineString((5.625, 48.458), (0.878, 44.339)))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(
name="TestJob",
event="Test Activation",
description="Test description",
user=self.user,
the_geom=the_geom,
original_selection=original_selection,
)
formats = ExportFormat.objects.all()
self.provider = DataProvider.objects.first()
self.provider.attribute_class = self.attribute_class
self.provider.save()
provider_task = DataProviderTask.objects.create(provider=self.provider, job=self.job)
provider_task.formats.add(*formats)
token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
hdm_presets = DatamodelPreset.objects.get(name="hdm")
self.job.preset = hdm_presets
self.job.save()
self.tags = [
{
"name": "Telecommunication office",
"key": "office",
"value": "telecommunication",
"geom": ["point", "polygon"],
},
{"name": "Radio or TV Studio", "key": "amenity", "value": "studio", "geom": ["point", "polygon"]},
{"name": "Telecommunication antenna", "key": "man_made", "value": "tower", "geom": ["point", "polygon"]},
{
"name": "Telecommunication company retail office",
"key": "office",
"value": "telecommunication",
"geom": ["point", "polygon"],
},
]
self.estimate_size = 110
self.estimate_time = 200
cache_mock.get.return_value = [self.estimate_size, self.estimate_time]
self.cache_key = "222222222222222"
get_estimate_cache_key_mock.return_value = self.cache_key
def test_list(self):
expected = "/api/jobs"
url = reverse("api:jobs-list")
self.assertEqual(expected, url)
@patch("eventkit_cloud.api.views.pick_up_run_task")
@patch("eventkit_cloud.api.validators.get_area_in_sqkm")
def test_make_job_with_export_providers(self, mock_get_area, mock_pickup):
"""tests job creation with export providers"""
mock_get_area.return_value = 16
export_providers = DataProvider.objects.all()
export_providers_start_len = len(export_providers)
formats = [export_format.slug for export_format in ExportFormat.objects.all()]
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
"selection": bbox_to_geojson([5, 16, 5.1, 16.1]),
"provider_tasks": [{"provider": "test", "formats": formats, "min_zoom": 3, "max_zoom": 6}],
"export_providers": [
{
"name": "test",
"level_from": 0,
"level_to": 1,
"url": "http://coolproviderurl.test",
"preview_url": "http://coolproviderurl.test",
}
],
"user": serializers.serialize("json", [self.user]),
"preset": self.job.preset.id,
}
url = reverse("api:jobs-list")
response = self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
response = response.json()
export_providers = DataProvider.objects.all()
# Test that the provider task was created with custom zoom levels.
provider_task = DataProviderTask.objects.last()
self.assertEqual(provider_task.min_zoom, request_data["provider_tasks"][0]["min_zoom"])
self.assertEqual(provider_task.max_zoom, request_data["provider_tasks"][0]["max_zoom"])
self.assertEqual(len(export_providers), export_providers_start_len + 1)
self.assertEqual(response["exports"][0]["provider"], "test")
mock_get_area.assert_called_once()
request_data["export_providers"][0]["name"] = "test 2"
# should be idempotent
self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
export_providers = DataProvider.objects.all()
self.assertEqual(len(export_providers), export_providers_start_len + 1)
with self.settings(CELERY_SCALE_BY_RUN=False):
self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
mock_pickup.assert_called_once()
def test_get_job_detail(self):
expected = "/api/jobs/{0}".format(self.job.uid)
url = reverse("api:jobs-detail", args=[self.job.uid])
self.assertEqual(expected, url)
data = {
"uid": str(self.job.uid),
"name": "TestJob",
"url": "http://testserver{0}".format(url),
"description": "Test Description",
"exports": [
{
"provider": "osm-generic",
"formats": [
{
"uid": "8611792d-3d99-4c8f-a213-787bc7f3066",
"url": "http://testserver/api/formats/gpkg",
"name": "Geopackage",
"description": "Geopackage",
}
],
}
],
"created_at": "2015-05-21T19:46:37.163749Z",
"updated_at": "2015-05-21T19:46:47.207111Z",
"status": "SUCCESS",
}
response = self.client.get(url)
# test the response headers
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
# test significant content
self.assertEqual(response.data["uid"], data["uid"])
self.assertEqual(response.data["url"], data["url"])
self.assertEqual(response.data["exports"][0]["formats"][0]["url"], data["exports"][0]["formats"][0]["url"])
def test_get_job_detail_no_attribute_class(self):
self.attribute_class.users.remove(self.user)
expected = "/api/jobs/{0}".format(self.job.uid)
url = reverse("api:jobs-detail", args=[self.job.uid])
self.assertEqual(expected, url)
data = {"uid": str(self.job.uid), "provider_task_list_status": "EMPTY", "provider_tasks": []}
response = self.client.get(url)
# test the response headers
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test significant content
self.assertEqual(response.data["provider_task_list_status"], data["provider_task_list_status"])
self.assertEqual(response.data["provider_tasks"], data["provider_tasks"])
def test_get_job_detail_no_permissions(self):
user = User.objects.create_user(username="demo2", email="demo2@demo.com", password="demo")
token = Token.objects.create(user=user)
# reset the client credentials to the new user
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
expected = "/api/jobs/{0}".format(self.job.uid)
url = reverse("api:jobs-detail", args=[self.job.uid])
self.assertEqual(expected, url)
response = self.client.get(url)
# test the response headers
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
# test significant content
self.assertIsNotNone(response.data["errors"][0]["detail"])
def test_delete_job(self):
url = reverse("api:jobs-detail", args=[self.job.uid])
response = self.client.delete(url)
# test the response headers
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response["Content-Length"], "0")
self.assertEqual(response["Content-Language"], "en")
def test_delete_job_no_permission(self):
user = User.objects.create_user(username="demo2", email="demo2@demo.com", password="demo")
token = Token.objects.create(user=user)
# reset the client credentials to the new user
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
url = reverse("api:jobs-detail", args=[self.job.uid])
response = self.client.delete(url)
# test the response headers
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data["errors"][0]["detail"], "ADMIN permission is required to delete this job.")
@patch("eventkit_cloud.api.views.pick_up_run_task")
@patch("eventkit_cloud.api.views.create_run")
def test_create_zipfile(self, create_run_mock, mock_pickup):
bbox = (5, 16, 5.1, 16.1)
max_zoom = 17
min_zoom = 0
create_run_mock.return_value = "some_run_uid"
formats = [export_format.slug for export_format in ExportFormat.objects.all()]
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
"selection": bbox_to_geojson(bbox),
"provider_tasks": [
{"provider": self.provider.slug, "formats": formats, "min_zoom": min_zoom, "max_zoom": max_zoom}
],
"preset": self.job.preset.id,
"published": True,
"tags": self.tags,
"include_zipfile": True,
}
url = reverse("api:jobs-list")
response = self.client.post(url, request_data, format="json")
msg = "status_code {} != {}: {}".format(200, response.status_code, response.content)
self.assertEqual(202, response.status_code, msg)
job_uid = response.data["uid"]
job = Job.objects.get(uid=job_uid)
self.assertEqual(job.include_zipfile, True)
with self.settings(CELERY_SCALE_BY_RUN=False):
self.client.post(url, request_data, format="json")
expected_user_details = {"username": "demo", "is_superuser": False, "is_staff": False}
mock_pickup.assert_called_with(
run_uid="some_run_uid", user_details=expected_user_details, session_token=None
)
@patch("eventkit_cloud.api.views.pick_up_run_task")
@patch("eventkit_cloud.api.views.create_run")
def test_create_job_success(self, create_run_mock, mock_pickup):
bbox = (5, 16, 5.1, 16.1)
max_zoom = 17
min_zoom = 0
create_run_mock.return_value = "some_run_uid"
url = reverse("api:jobs-list")
logger.debug(url)
formats = [export_format.slug for export_format in ExportFormat.objects.all()]
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
"selection": bbox_to_geojson(bbox),
"provider_tasks": [
{"provider": self.provider.slug, "formats": formats, "min_zoom": min_zoom, "max_zoom": max_zoom}
],
"preset": self.job.preset.id,
"published": True,
"tags": self.tags,
"original_selection": {
"type": "FeatureCollection",
"features": [
{"type": "Feature", "geometry": {"type": "Point", "coordinates": [1, 1]}},
{
"type": "Feature",
"geometry": {"type": "LineString", "coordinates": [[5.625, 48.458], [0.878, 44.339]]},
},
],
},
}
response = self.client.post(url, request_data, format="json")
job_uid = response.data["uid"]
job = Job.objects.get(uid=job_uid)
# test that the mock methods get called.
create_run_mock.assert_called_once_with(job=job, user=self.user)
# test the response headers
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
# test significant response content
self.assertEqual(
response.data["exports"][0]["formats"][0]["slug"], request_data["provider_tasks"][0]["formats"][0]
)
self.assertEqual(
response.data["exports"][0]["formats"][1]["slug"], request_data["provider_tasks"][0]["formats"][1]
)
self.assertEqual(response.data["name"], request_data["name"])
self.assertEqual(response.data["description"], request_data["description"])
self.assertEqual(response.data["original_selection"], request_data["original_selection"])
self.assertTrue(response.data["published"])
# check we have the correct tags
job = Job.objects.get(uid=job_uid)
self.assertIsNotNone(job.preset.json_tags)
self.assertEqual(259, len(job.preset.json_tags))
with self.settings(CELERY_SCALE_BY_RUN=False):
self.client.post(url, request_data, format="json")
expected_user_details = {"username": "demo", "is_superuser": False, "is_staff": False}
mock_pickup.assert_called_once_with(
run_uid="some_run_uid", user_details=expected_user_details, session_token=None
)
@patch("eventkit_cloud.api.views.pick_up_run_task")
@patch("eventkit_cloud.api.views.create_run")
def test_create_job_with_config_success(self, create_run_mock, mock_pickup):
bbox = (5, 16, 5.1, 16.1)
max_zoom = 17
min_zoom = 0
create_run_mock.return_value = "some_run_uid"
url = reverse("api:jobs-list")
formats = [export_format.slug for export_format in ExportFormat.objects.all()]
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
"selection": bbox_to_geojson(bbox),
"provider_tasks": [
{"provider": self.provider.slug, "formats": formats, "min_zoom": min_zoom, "max_zoom": max_zoom}
],
"preset": self.job.preset.id,
"transform": "",
"translation": "",
}
response = self.client.post(url, request_data, format="json")
job_uid = response.data["uid"]
job = Job.objects.get(uid=job_uid)
# test that the mock methods get called.
create_run_mock.assert_called_once_with(job=job, user=self.user)
# test the response headers
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
# test significant response content
self.assertEqual(
response.data["exports"][0]["formats"][0]["slug"], request_data["provider_tasks"][0]["formats"][0]
)
self.assertEqual(
response.data["exports"][0]["formats"][1]["slug"], request_data["provider_tasks"][0]["formats"][1]
)
self.assertEqual(response.data["name"], request_data["name"])
self.assertEqual(response.data["description"], request_data["description"])
self.assertFalse(response.data["published"])
self.assertEqual(259, len(self.job.preset.json_tags))
with self.settings(CELERY_SCALE_BY_RUN=False):
self.client.post(url, request_data, format="json")
expected_user_details = {"username": "demo", "is_superuser": False, "is_staff": False}
mock_pickup.assert_called_once_with(
run_uid="some_run_uid", user_details=expected_user_details, session_token=None
)
@patch("eventkit_cloud.api.views.pick_up_run_task")
@patch("eventkit_cloud.api.views.create_run")
def test_create_job_with_tags(self, create_run_mock, mock_pickup):
bbox = (5, 16, 5.1, 16.1)
max_zoom = 17
min_zoom = 0
create_run_mock.return_value = "some_run_uid"
url = reverse("api:jobs-list")
formats = [export_format.slug for export_format in ExportFormat.objects.all()]
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
"selection": bbox_to_geojson(bbox),
"provider_tasks": [
{"provider": "osm-generic", "formats": formats, "min_zoom": min_zoom, "max_zoom": max_zoom}
],
"preset": self.job.preset.id,
"transform": "",
"translate": "",
"tags": self.tags,
}
response = self.client.post(url, request_data, format="json")
job_uid = response.data["uid"]
job = Job.objects.get(uid=job_uid)
# test that the mock methods get called.
create_run_mock.assert_called_once_with(job=job, user=self.user)
# test the response headers
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
# test significant response content
self.assertEqual(
response.data["exports"][0]["formats"][0]["slug"], request_data["provider_tasks"][0]["formats"][0]
)
self.assertEqual(
response.data["exports"][0]["formats"][1]["slug"], request_data["provider_tasks"][0]["formats"][1]
)
self.assertEqual(response.data["name"], request_data["name"])
self.assertEqual(response.data["description"], request_data["description"])
with self.settings(CELERY_SCALE_BY_RUN=False):
self.client.post(url, request_data, format="json")
expected_user_details = {"username": "demo", "is_superuser": False, "is_staff": False}
mock_pickup.assert_called_once_with(
run_uid="some_run_uid", user_details=expected_user_details, session_token=None
)
def test_invalid_selection(self):
url = reverse("api:jobs-list")
formats = [export_format.slug for export_format in ExportFormat.objects.all()]
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
"selection": {},
"preset": self.job.preset.id,
"provider_tasks": [{"provider": "osm-generic", "formats": formats}],
}
response = self.client.post(url, request_data, format="json")
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertEqual("No Geometry", response.data["errors"][0]["title"])
def test_empty_string_param(self):
url = reverse("api:jobs-list")
formats = [export_format.slug for export_format in ExportFormat.objects.all()]
request_data = {
"name": "TestJob",
"description": "", # empty
"event": "Test Activation",
"selection": bbox_to_geojson([5, 16, 5.1, 16.1]),
"formats": formats,
}
response = self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertIsNotNone(response.data["errors"][0]["title"])
def test_string_too_long_param(self):
url = reverse("api:jobs-list")
formats = [export_format.slug for export_format in ExportFormat.objects.all()]
name = "x" * 300
request_data = {
"name": name,
"description": "Test description",
"event": "Test event",
"selection": bbox_to_geojson([5, 16, 5.1, 16.1]),
"provider_tasks": [{"provider": "osm-generic", "formats": formats}],
}
response = self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertEqual("Max Length", response.data["errors"][0]["title"])
self.assertEqual(
"name: Ensure this field has no more than 100 characters.", response.data["errors"][0]["detail"]
)
def test_missing_format_param(self):
url = reverse("api:jobs-list")
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
"selection": bbox_to_geojson([5, 16, 5.1, 16.1]),
"provider_tasks": [{"provider": "osm-generic"}], # 'formats': formats}]# missing
}
response = self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertIsNotNone(response.data["errors"][0]["title"])
def test_invalid_format_param(self):
url = reverse("api:jobs-list")
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
"selection": bbox_to_geojson([5, 16, 5.1, 16.1]),
"provider_tasks": [{"provider": "osm-generic", "formats": ""}], # invalid
}
response = self.client.post(url, request_data, format="json")
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertIsNotNone(response.data.get("errors")[0]["title"])
def test_no_matching_format_slug(self):
url = reverse("api:jobs-list")
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
"selection": bbox_to_geojson([5, 16, 5.1, 16.1]),
"provider_tasks": [{"provider": "osm-generic", "formats": ["broken-format-one", "broken-format-two"]}],
}
response = self.client.post(url, request_data, format="json")
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertIsNotNone(response.data["errors"][0]["detail"])
def test_extents_too_large(self):
url = reverse("api:jobs-list")
formats = [export_format.slug for export_format in ExportFormat.objects.all()]
# job outside any region
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
# Full world (WGS84) bounds are invalid in 3857 -- subtract 10 from lat as we simply need
# a very large bounding box.
"selection": bbox_to_geojson([-180, -80, 180, 80]),
"provider_tasks": [{"provider": "osm-generic", "formats": formats}],
}
with self.settings(JOB_MAX_EXTENT=100000):
response = self.client.post(
url, data=json.dumps(request_data), content_type="application/json; version=1.0"
)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertEqual("Invalid Extents", response.data["errors"][0]["title"])
@patch("eventkit_cloud.api.views.get_estimate_cache_key")
@patch("eventkit_cloud.api.views.cache")
def test_extents_too_large_for_max_data_size(self, cache_mock, get_estimate_cache_key_mock):
self.estimate_size = 210
self.estimate_time = 20
cache_mock.get.return_value = [self.estimate_size, self.estimate_time]
cache_key = "1.22222222222222"
get_estimate_cache_key_mock.return_value = cache_key
bbox = (5, 16, 5.1, 16.1)
srs = "4326"
max_zoom = 17
min_zoom = 0
slug = "osm"
excessive_data_size = 200
add_max_data_size(self.provider, excessive_data_size)
job_url = reverse("api:jobs-list")
formats = [export_format.slug for export_format in ExportFormat.objects.all()]
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
"selection": bbox_to_geojson(bbox),
"provider_tasks": [
{"provider": self.provider.slug, "formats": formats, "min_zoom": min_zoom, "max_zoom": max_zoom}
],
}
response = self.client.post(
job_url, data=json.dumps(request_data), content_type="application/json; version=1.0"
)
cache_mock.get.assert_called_with(cache_key, (None, None))
get_estimate_cache_key_mock.called_once_with(bbox, srs, min_zoom, max_zoom, slug)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertEqual("Estimated size too large", response.data["errors"][0]["title"])
def test_patch(self):
expected = "/api/jobs/{0}".format(self.job.uid)
url = reverse("api:jobs-detail", args=[self.job.uid])
self.assertEqual(expected, url)
request_data = {"published": False}
response = self.client.patch(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertIsNotNone(response.data["published"])
self.assertTrue(response.data["success"])
request_data = {"featured": True}
response = self.client.patch(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertIsNotNone(response.data["featured"])
self.assertTrue(response.data["success"])
request_data = {"featured": True, "published": False, "visibility": VisibilityState.SHARED.value}
response = self.client.patch(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertIsNotNone(response.data["featured"])
self.assertIsNotNone(response.data["published"])
self.assertIsNotNone(response.data["visibility"])
self.assertTrue(response.data["success"])
class TestBBoxSearch(APITestCase):
"""
Test cases for testing bounding box searches.
"""
fixtures = ("osm_provider.json", "datamodel_presets.json")
def __init__(self, *args, **kwargs):
super(TestBBoxSearch, self).__init__(*args, **kwargs)
self.user = None
self.client = None
@patch("eventkit_cloud.api.views.pick_up_run_task")
@patch("eventkit_cloud.api.views.create_run")
def setUp(self, create_run_mock, mock_pickup):
create_run_mock.return_value = "some_run_uid"
url = reverse("api:jobs-list")
self.group, created = Group.objects.get_or_create(name="TestDefaultExportExtentGroup")
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo", is_superuser=True)
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
# pull out the formats
formats = [export_format.slug for export_format in ExportFormat.objects.all()]
# create test jobs
extents = [
(-3.9, 16.1, 7.0, 27.6),
(36.90, 13.54, 48.52, 20.24),
(-71.79, -49.57, -67.14, -46.16),
(-61.27, -6.49, -56.20, -2.25),
(-11.61, 32.07, -6.42, 36.31),
(-10.66, 5.81, -2.45, 11.83),
(47.26, 34.58, 52.92, 39.15),
(90.00, 11.28, 95.74, 17.02),
]
for extent in extents:
request_data = {
"name": "TestJob",
"description": "Test description",
"event": "Test Activation",
"selection": bbox_to_geojson(extent),
"provider_tasks": [{"provider": "osm-generic", "formats": formats}],
}
response = self.client.post(url, request_data, format="json")
self.assertEqual(status.HTTP_202_ACCEPTED, response.status_code, response.content)
with self.settings(CELERY_SCALE_BY_RUN=False):
self.client.post(url, request_data, format="json")
expected_user_details = {"username": "demo", "is_superuser": True, "is_staff": False}
mock_pickup.assert_called_with(
run_uid="some_run_uid", user_details=expected_user_details, session_token=None
)
self.assertEqual(status.HTTP_202_ACCEPTED, response.status_code, response.content)
self.assertEqual(16, len(Job.objects.all()))
LinkHeaderPagination.page_size = 2
def test_bbox_search_success(self):
url = reverse("api:jobs-list")
extent = (-79.5, -16.16, 7.40, 52.44)
param = "bbox={0},{1},{2},{3}".format(extent[0], extent[1], extent[2], extent[3])
response = self.client.get("{0}?{1}".format(url, param))
self.assertEqual(status.HTTP_206_PARTIAL_CONTENT, response.status_code)
self.assertEqual(2, len(response.data)) # 8 jobs in total but response is paginated
def test_list_jobs_no_bbox(self):
url = reverse("api:jobs-list")
response = self.client.get(url)
self.assertEqual(status.HTTP_206_PARTIAL_CONTENT, response.status_code)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertEqual(response["Link"], '<http://testserver/api/jobs?page=2>; rel="next"')
self.assertEqual(2, len(response.data)) # 8 jobs in total but response is paginated
def test_bbox_search_missing_params(self):
url = reverse("api:jobs-list")
param = "bbox=" # missing params
response = self.client.get("{0}?{1}".format(url, param))
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertEqual("Missing Bbox Parameter", response.data["errors"][0]["title"])
def test_bbox_missing_coord(self):
url = reverse("api:jobs-list")
extent = (-79.5, -16.16, 7.40) # one missing
param = "bbox={0},{1},{2}".format(extent[0], extent[1], extent[2])
response = self.client.get("{0}?{1}".format(url, param))
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertEqual("Missing Bbox Parameter", response.data["errors"][0]["title"])
class TestPagination(APITestCase):
pass
class TestExportRunViewSet(APITestCase):
"""
Test cases for ExportRunViewSet
"""
fixtures = (
"osm_provider.json",
"datamodel_presets.json",
)
def __init__(self, *args, **kwargs):
super(TestExportRunViewSet, self).__init__(*args, **kwargs)
self.user = None
self.client = None
self.job = None
self.job_uid = None
self.export_run = None
self.run_uid = None
def setUp(self):
self.group, created = Group.objects.get_or_create(name="TestDefaultExportExtentGroup")
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo")
self.attribute_class = AttributeClass.objects.create(name="test", slug="test", filter="username=demo")
self.attribute_class.users.add(self.user)
self.attribute_class.save()
self.token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + self.token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
extents = (-3.9, 16.1, 7.0, 27.6)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name="TestJob", description="Test description", user=self.user, the_geom=the_geom)
formats = ExportFormat.objects.all()
self.provider = DataProvider.objects.first()
self.provider.attribute_class = self.attribute_class
self.provider.save()
provider_task = DataProviderTask.objects.create(job=self.job, provider=self.provider)
provider_task.formats.add(*formats)
self.job.visibility = VisibilityState.PUBLIC.value
self.job.save()
self.job_uid = str(self.job.uid)
self.export_run = ExportRun.objects.create(job=self.job, user=self.user)
self.run_uid = str(self.export_run.uid)
def test_patch(self):
url = reverse("api:runs-detail", args=[self.export_run.uid])
today = datetime.today()
max_days = int(getattr(settings, "MAX_DATAPACK_EXPIRATION_DAYS", 30))
ok_expiration = today + timedelta(max_days - 1)
request_data = {"expiration": ok_expiration.isoformat()}
response = self.client.patch(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertIsNotNone(response.data["expiration"])
self.assertTrue(response.data["success"])
not_ok_expiration = ok_expiration - timedelta(1)
request_data = {"expiration": not_ok_expiration.isoformat()}
response = self.client.patch(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertFalse(response.data["success"])
not_ok_expiration = today + timedelta(max_days + 1)
request_data = {"expiration": not_ok_expiration.isoformat()}
response = self.client.patch(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertFalse(response.data["success"])
request_data = {"exploration": ok_expiration.isoformat()}
response = self.client.patch(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
run = ExportRun.objects.get(uid=self.export_run.uid)
self.assertEqual(ok_expiration, run.expiration.replace(tzinfo=None))
def test_delete_run(self):
url = reverse("api:runs-detail", args=[self.export_run.uid])
response = self.client.delete(url)
# test the response headers
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response["Content-Length"], "0")
self.assertEqual(response["Content-Language"], "en")
def test_retrieve_run(self):
expected = "/api/runs/{0}".format(self.run_uid)
url = reverse("api:runs-detail", args=[self.run_uid])
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
result = response.data
# make sure we get the correct uid back out
self.assertEqual(self.run_uid, result[0].get("uid"))
self.assertEqual(response["content-type"], "application/json")
# Test geojson response via accept header.
self.client.credentials(
HTTP_AUTHORIZATION="Token " + self.token.key,
HTTP_ACCEPT="application/geo+json",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
response = self.client.get(url)
self.assertEqual(response["content-type"], "application/geo+json")
# Test geojson response via format parameter.
# Adding format as a kwarg here results in a url /api/runs/uid.geojson which eventkit isn't supporting.
url = f"{reverse('api:runs-detail', args=[self.run_uid])}?format=geojson"
expected = f"/api/runs/{self.run_uid}?format=geojson"
self.assertEqual(expected, url)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + self.token.key,
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
response = self.client.get(url)
self.assertEqual(response["content-type"], "application/geo+json")
def test_retrieve_run_no_attribute_class(self):
expected = "/api/runs/{0}".format(self.run_uid)
self.attribute_class.users.remove(self.user)
url = reverse("api:runs-detail", args=[self.run_uid])
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
expected_result = {"provider_tasks": [], "provider_task_list_status": "EMPTY"}
result = response.data
# make sure we get the correct uid back out
self.assertEqual(self.run_uid, result[0].get("uid"))
self.assertEqual(expected_result["provider_tasks"], result[0]["provider_tasks"])
self.assertEqual(expected_result["provider_task_list_status"], result[0]["provider_task_list_status"])
@patch("eventkit_cloud.api.views.ExportRunViewSet.validate_licenses")
def test_retrieve_run_invalid_license(self, mock_validate_licenses):
expected = "/api/runs/{0}".format(self.run_uid)
mock_validate_licenses.side_effect = (InvalidLicense("no license"),)
url = reverse("api:runs-detail", args=[self.run_uid])
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
result = response.data
self.assertTrue("InvalidLicense" in result["errors"][0].get("detail"))
self.assertEqual(response.status_code, 400)
@patch("eventkit_cloud.api.views.get_invalid_licenses")
def test_validate_licenses(self, mock_invalid_licenses):
queryset = Mock()
run = Mock(job=Mock(user=Mock(username="username")))
queryset.all.return_value = [run]
mock_invalid_licenses.return_value = ["license"]
with self.assertRaises(InvalidLicense):
ExportRunViewSet.validate_licenses(queryset)
mock_invalid_licenses.return_value = []
self.assertTrue(ExportRunViewSet.validate_licenses(queryset))
def test_list_runs(self):
expected = "/api/runs"
url = reverse("api:runs-list")
self.assertEqual(expected, url)
query = "{0}?job_uid={1}".format(url, self.job.uid)
response = self.client.get(query)
self.assertIsNotNone(response)
result = response.data
# make sure we get the correct uid back out
self.assertEqual(1, len(result))
self.assertEqual(self.run_uid, result[0].get("uid"))
@patch("eventkit_cloud.api.views.ExportRunViewSet.validate_licenses")
def test_list_runs_invalid_license(self, mock_validate_licenses):
from eventkit_cloud.tasks.task_factory import InvalidLicense
expected = "/api/runs"
url = reverse("api:runs-list")
mock_validate_licenses.side_effect = (InvalidLicense("no license"),)
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
result = response.data
self.assertTrue("InvalidLicense" in result["errors"][0].get("detail"))
self.assertEqual(response.status_code, 400)
def test_filter_runs(self):
expected = "/api/runs/filter"
url = reverse("api:runs-filter")
self.assertEqual(expected, url)
query = "{0}".format(url)
extents = (-4, 15, 8.0, 28)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
geojson = the_geom.geojson
response = self.client.post(query, {"geojson": "{}".format(geojson)})
self.assertIsNotNone(response)
result = response.data
# make sure we get a single run back
self.assertEqual(1, len(result))
self.assertEqual(self.run_uid, result[0].get("uid"))
extents = (-3, 16, 7.0, 27)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
geojson = the_geom.geojson
response = self.client.post(query, {"geojson": "{}".format(geojson)})
self.assertIsNotNone(response)
result = response.data
# make sure 1 run is returned as it should be completely contained
self.assertEqual(1, len(result))
extents = (4, 17, 9.0, 28)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
geojson = the_geom.geojson
response = self.client.post(query, {"geojson": "{}".format(geojson)})
self.assertIsNotNone(response)
result = response.data
# make sure 1 run is returned as it should intersect
self.assertEqual(1, len(result))
extents = (-40, -5, -30, 5)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
geojson = the_geom.geojson
response = self.client.post(query, {"geojson": "{}".format(geojson)})
self.assertIsNotNone(response)
result = response.data
# make sure no runs are returned since it should not intersect
self.assertEqual(0, len(result))
name = "TestJob"
query = "{0}?search_term={1}".format(url, name)
response = self.client.get(query)
self.assertIsNotNone(response)
result = response.data
# make sure we get a single run back
self.assertEqual(1, len(result))
self.assertEqual(name, result[0].get("job").get("name"))
name = "NotFound"
query = "{0}?search_term={1}".format(url, name)
response = self.client.get(query)
self.assertIsNotNone(response)
result = response.data
# make sure no runs are returned as they should have been filtered out
self.assertEqual(0, len(result))
@patch("eventkit_cloud.api.views.rerun_data_provider_records")
@patch("eventkit_cloud.api.views.check_job_permissions")
def test_rerun_providers(self, mock_check_job_permissions, mock_rerun_records):
run = ExportRun.objects.create(job=self.job, user=self.user)
data_provider_task_record = DataProviderTaskRecord.objects.create(
run=run, name="Shapefile Export", provider=self.provider, status=TaskState.PENDING.value
)
run.data_provider_task_records.add(data_provider_task_record)
url = f"/api/runs/{run.uid}/rerun_providers"
expected_slugs = ["osm-generic"]
request_data = {"data_provider_slugs": expected_slugs}
response = self.client.post(url, request_data, format="json")
self.assertEqual(status.HTTP_202_ACCEPTED, response.status_code)
self.assertEqual("PENDING", response.data["zipfile"]["status"])
mock_check_job_permissions.assert_called_once_with(run.job)
class TestRunZipFileViewSet(APITestCase):
"""
Test cases for RunZipFileViewSet
"""
fixtures = ("osm_provider.json",)
list_url = reverse("api:zipfiles-list")
def setUp(self):
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo")
self.token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + self.token.key,
HTTP_ACCEPT="application/json, text/plain; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(
name="TestJob",
description="Test description",
user=self.user,
the_geom=the_geom,
visibility=VisibilityState.PUBLIC.value,
)
self.run = ExportRun.objects.create(job=self.job, user=self.user)
self.provider = DataProvider.objects.first()
self.celery_uid = str(uuid.uuid4())
self.data_provider_task_record = DataProviderTaskRecord.objects.create(
run=self.run, name="Shapefile Export", provider=self.provider, status=TaskState.PENDING.value
)
filename = "test.zip"
self.downloadable_file = FileProducingTaskResult.objects.create(filename=filename, size=10)
self.task = ExportTaskRecord.objects.create(
export_provider_task=self.data_provider_task_record,
name="Shapefile Export",
celery_uid=self.celery_uid,
status="SUCCESS",
result=self.downloadable_file,
)
self.run_zip_file = RunZipFile.objects.create(run=self.run, downloadable_file=self.downloadable_file)
self.data_provider_task_records = [self.data_provider_task_record]
self.run_zip_file.data_provider_task_records.set(self.data_provider_task_records)
def test_zipfiles_list_authenticated(self):
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_zipfiles_list_unauthenticated(self):
self.client.force_authenticate(user=None)
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_zipfiles_detail_retrieve(self):
url = reverse("api:zipfiles-detail", args=[self.run_zip_file.uid])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(str(response.data.get("uid")), str(self.run_zip_file.uid))
self.assertEqual(str(response.data.get("run")), str(self.run_zip_file.run))
dptr_uids = [dptr.uid for dptr in self.run_zip_file.data_provider_task_records.all()]
self.assertEqual(str(response.data.get("data_provider_task_records")), str(dptr_uids))
self.assertEqual(response.data.get("downloadable_file"), self.run_zip_file.downloadable_file.id)
expected_url = f"http://testserver/download?uid={self.run_zip_file.downloadable_file.uid}"
self.assertEqual(response.data.get("url"), expected_url)
class TestDataProviderTaskRecordViewSet(APITestCase):
"""
Test cases for ExportTaskViewSet
"""
fixtures = (
"osm_provider.json",
"datamodel_presets.json",
)
def __init__(self, *args, **kwargs):
super(TestDataProviderTaskRecordViewSet, self).__init__(*args, **kwargs)
self.user = None
self.path = None
self.job = None
self.celery_uid = None
self.client = None
self.export_run = None
self.data_provider_task_record = None
self.task = None
self.task_uid = None
def setUp(self):
self.path = os.path.dirname(os.path.realpath(__file__))
self.group, created = Group.objects.get_or_create(name="TestDefaultExportExtentGroup")
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo")
self.attribute_class = AttributeClass.objects.create(name="test", slug="test", filter="username=demo")
self.attribute_class.users.add(self.user)
self.attribute_class.save()
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(
name="TestJob",
description="Test description",
user=self.user,
the_geom=the_geom,
visibility=VisibilityState.PUBLIC.value,
)
formats = ExportFormat.objects.all()
self.provider = DataProvider.objects.first()
self.provider.attribute_class = self.attribute_class
self.provider.save()
provider_task = DataProviderTask.objects.create(job=self.job, provider=self.provider)
provider_task.formats.add(*formats)
self.job.save()
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
self.export_run = ExportRun.objects.create(job=self.job, user=self.user)
self.celery_uid = str(uuid.uuid4())
self.data_provider_task_record = DataProviderTaskRecord.objects.create(
run=self.export_run, name="Shapefile Export", provider=self.provider, status=TaskState.PENDING.value
)
self.task = ExportTaskRecord.objects.create(
export_provider_task=self.data_provider_task_record,
name="Shapefile Export",
celery_uid=self.celery_uid,
status="SUCCESS",
)
self.task_uid = str(self.task.uid)
def test_retrieve(self):
url = reverse("api:provider_tasks-detail", args=[self.data_provider_task_record.uid])
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEqual(200, response.status_code)
# make sure we get the correct uid back out
expected_response = {"hidden": False, "display": False}
self.assertEqual(str(response.data.get("uid")), str(self.data_provider_task_record.uid))
self.assertEqual(response.data["hidden"], expected_response["hidden"])
self.assertEqual(response.data["display"], expected_response["display"])
self.attribute_class.users.remove(self.user)
response = self.client.get(url)
expected_response = {"hidden": True, "display": False}
self.assertEqual(str(response.data.get("uid")), str(self.data_provider_task_record.uid))
self.assertEqual(response.data["hidden"], expected_response["hidden"])
self.assertEqual(response.data["display"], expected_response["display"])
def test_list(self):
url = reverse("api:provider_tasks-list")
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEqual(200, response.status_code)
# make sure we get the correct uid back out
expected_response = {"hidden": False, "display": False}
self.assertEqual(str(response.data[0].get("uid")), str(self.data_provider_task_record.uid))
self.assertEqual(response.data[0]["hidden"], expected_response["hidden"])
self.assertEqual(response.data[0]["display"], expected_response["display"])
self.attribute_class.users.remove(self.user)
response = self.client.get(url)
expected_response = {"hidden": True, "display": False}
self.assertEqual(str(response.data[0].get("uid")), str(self.data_provider_task_record.uid))
self.assertEqual(response.data[0]["hidden"], expected_response["hidden"])
self.assertEqual(response.data[0]["display"], expected_response["display"])
def test_patch_cancel_task(self):
expected = "/api/provider_tasks/{0}".format(self.data_provider_task_record.uid)
url = reverse("api:provider_tasks-list") + "/%s" % (self.data_provider_task_record.uid,)
self.assertEqual(expected, url)
response = self.client.patch(url)
# test significant content
self.assertEqual(response.data, {"success": True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
pt = DataProviderTaskRecord.objects.get(uid=self.data_provider_task_record.uid)
et = pt.tasks.last()
self.assertEqual(pt.status, TaskState.CANCELED.value)
self.assertEqual(et.status, TaskState.SUCCESS.value)
def test_patch_cancel_task_no_permissions(self):
user = User.objects.create_user(username="demo2", email="demo2@demo.com", password="demo")
self.attribute_class.users.add(user)
token = Token.objects.create(user=user)
# reset the client credentials to the new user
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
expected = "/api/provider_tasks/{0}".format(self.data_provider_task_record.uid)
url = reverse("api:provider_tasks-list") + "/%s" % (self.data_provider_task_record.uid,)
self.assertEqual(expected, url)
response = self.client.patch(url)
# test the response headers
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, {"success": False})
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.attribute_class.users.remove(user)
response = self.client.patch(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data, {"success": False})
def test_export_provider_task_get(self):
url = reverse("api:provider_tasks-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestExportTaskViewSet(APITestCase):
"""
Test cases for ExportTaskViewSet
"""
fixtures = (
"osm_provider.json",
"datamodel_presets.json",
)
def __init__(self, *args, **kwargs):
super(TestExportTaskViewSet, self).__init__(*args, **kwargs)
self.user = None
self.path = None
self.job = None
self.celery_uid = None
self.client = None
self.export_run = None
self.export_provider_task = None
self.task = None
self.task_uid = None
def setUp(self):
self.path = os.path.dirname(os.path.realpath(__file__))
self.group, created = Group.objects.get_or_create(name="TestDefaultExportExtentGroup")
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo")
self.attribute_class = AttributeClass.objects.create(name="test", slug="test", filter="username=demo")
self.attribute_class.users.add(self.user)
self.attribute_class.save()
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(
name="TestJob",
description="Test description",
user=self.user,
the_geom=the_geom,
visibility=VisibilityState.PUBLIC.value,
)
formats = ExportFormat.objects.all()
self.provider = DataProvider.objects.first()
provider_task = DataProviderTask.objects.create(job=self.job, provider=self.provider)
provider_task.formats.add(*formats)
self.job.save()
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
self.export_run = ExportRun.objects.create(job=self.job, user=self.user)
self.celery_uid = str(uuid.uuid4())
self.export_provider_task = DataProviderTaskRecord.objects.create(
run=self.export_run, name="Shapefile Export", provider=self.provider, status=TaskState.PENDING.value
)
self.task = ExportTaskRecord.objects.create(
export_provider_task=self.export_provider_task,
name="Shapefile Export",
celery_uid=self.celery_uid,
status="SUCCESS",
)
self.task_uid = str(self.task.uid)
def test_retrieve(self):
expected = "/api/tasks/{0}".format(self.task_uid)
url = reverse("api:tasks-detail", args=[self.task_uid])
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEqual(200, response.status_code)
result = json.dumps(response.data)
data = json.loads(result)
# make sure we get the correct uid back out
self.assertEqual(self.task_uid, data[0].get("uid"))
response = self.client.get(url)
def test_list(self):
expected = "/api/tasks"
url = reverse("api:tasks-list")
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEqual(200, response.status_code)
result = json.dumps(response.data)
data = json.loads(result)
# should only be one task in the list
self.assertEqual(1, len(data))
# make sure we get the correct uid back out
self.assertEqual(self.task_uid, data[0].get("uid"))
class TestStaticFunctions(APITestCase):
def test_get_models(self):
ExportFormat.objects.create(name="Test1", slug="Test1")
ExportFormat.objects.create(name="Test2", slug="Test2")
sample_models = ["Test1", "Test2"]
models = get_models(sample_models, ExportFormat, "name")
assert len(models) == 2
def test_get_provider_tasks(self):
# Arbitrary "Formats"
format_test1 = ExportFormat.objects.create(name="Test1", slug="Test1")
format_test2 = ExportFormat.objects.create(name="Test2", slug="Test2")
format_test3 = ExportFormat.objects.create(name="Test3", slug="Test3")
# Formats we want to process
requested_types = (format_test1, format_test2)
# An arbitrary provider type...
provider_type = DataProviderType.objects.create(type_name="test")
# ... and the formats it actually supports.
supported_formats = [format_test2, format_test3]
provider_type.supported_formats.add(*supported_formats)
provider_type.save()
# Assign the type to an arbitrary provider.
export_provider = DataProvider.objects.create(name="provider1", export_provider_type=provider_type)
# Get a DataProviderTask object to ensure that it is only trying to process
# what it actually supports (1).
provider_task = get_provider_task(export_provider, requested_types)
assert len(provider_task.formats.all()) == 2
class TestLicenseViewSet(APITestCase):
def setUp(self):
group, created = Group.objects.get_or_create(name="TestDefaultExportExtentGroup")
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo")
self.licenses = [License.objects.create(slug="test1", name="name1", text="text1")]
self.licenses += [License.objects.create(slug="test0", name="name0", text="text0")]
token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json, text/plain; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
def test_get_licenses_list(self):
expected_url = "/api/licenses"
expected_data = [
{"slug": "test0", "name": "name0", "text": "text0"},
{"slug": "test1", "name": "name1", "text": "text1"},
]
url = reverse("api:licenses-list")
self.assertEqual(expected_url, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEqual(200, response.status_code)
data = response.json()
self.assertEqual(expected_data, data)
def test_get_licenses_detail(self):
expected_url = "/api/licenses/test1"
expected_data = {"slug": "test1", "name": "name1", "text": "text1"}
url = reverse("api:licenses-detail", args=["test1"])
self.assertEqual(expected_url, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEqual(200, response.status_code)
data = response.json()
self.assertEqual(expected_data, data)
def test_get_licenses_download(self):
expected_url = "/api/licenses/test1/download"
url = reverse("api:licenses-download", args=["test1"])
self.assertEqual(expected_url, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEqual(200, response.status_code)
self.assertEqual(self.licenses[0].text, response.content.decode())
expected_bad_url = "/api/licenses/test22/download"
bad_url = reverse("api:licenses-download", args=["test22"])
self.assertEqual(expected_bad_url, bad_url)
bad_response = self.client.get(bad_url)
self.assertIsNotNone(bad_response)
self.assertEqual(404, bad_response.status_code)
self.assertEqual("Not Found", bad_response.data["errors"][0]["title"])
class TestUserDataViewSet(APITestCase):
fixtures = ("osm_provider.json",)
def setUp(self):
self.path = os.path.dirname(os.path.realpath(__file__))
self.group, created = Group.objects.get_or_create(name="TestDefaultExportExtentGroup")
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo")
self.licenses = [License.objects.create(slug="test1", name="Test1", text="text")]
self.licenses += [License.objects.create(slug="test2", name="Test2", text="text")]
token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
ds = DataSource(os.path.dirname(os.path.realpath(__file__)) + "/../../jobs/migrations/africa.geojson")
layer = ds[0]
geom = layer.get_geoms(geos=True)[0]
the_geom = GEOSGeometry(geom.wkt, srid=4326)
the_geog = GEOSGeometry(geom.wkt)
the_geom_webmercator = the_geom.transform(ct=3857, clone=True)
region = Region.objects.create(
name="Africa",
description="African export region",
the_geom=the_geom,
the_geog=the_geog,
the_geom_webmercator=the_geom_webmercator,
)
self.provider = DataProvider.objects.first()
policies_example = json.loads(get_example_from_file("examples/policies_example.json"))
justification_options_example = json.loads(get_example_from_file("examples/justification_options_example.json"))
self.regional_policy = RegionalPolicy.objects.create(
name="Test Policy",
region=region,
policies=policies_example,
justification_options=justification_options_example,
policy_title_text="Policy Title",
policy_cancel_button_text="Cancel Button",
)
self.regional_policy.providers.set([self.provider])
self.regional_policy.save()
def test_get_userdata_list(self):
expected = "/api/users"
url = reverse("api:users-list")
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEqual(200, response.status_code)
data = response.json()
self.assertEqual(self.user.username, data[0].get("user").get("username"))
self.assertIsNotNone(data[0].get("accepted_licenses").get(self.licenses[0].slug))
def test_get_userdata(self):
expected = "/api/users/{0}".format(self.user)
url = reverse("api:users-detail", args=[self.user])
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEqual(200, response.status_code)
data = response.json()
self.assertEqual(self.user.username, data.get("user").get("username"))
self.assertIsNotNone(data.get("accepted_licenses").get(self.licenses[0].slug))
def test_set_licenses(self):
url = reverse("api:users-detail", args=[self.user])
response = self.client.get(url)
data = response.json()
# check both licenses are NOT accepted.
self.assertEqual(data.get("accepted_licenses").get(self.licenses[0].slug), False)
self.assertEqual(data.get("accepted_licenses").get(self.licenses[1].slug), False)
# update single license.
request_data = data
request_data["accepted_licenses"][self.licenses[0].slug] = True
patch_response = self.client.patch(
url, data=json.dumps(request_data), content_type="application/json; version=1.0"
)
response = self.client.get(url)
data = response.json()
# check that the response body matches a new request
self.assertEqual(patch_response.data, response.data)
# check single licenses is accepted.
self.assertEqual(data.get("accepted_licenses").get(self.licenses[0].slug), True)
self.assertEqual(data.get("accepted_licenses").get(self.licenses[1].slug), False)
request_data = data
request_data["accepted_licenses"][self.licenses[1].slug] = True
patch_response = self.client.patch(
url, data=json.dumps(request_data), content_type="application/json; version=1.0"
)
data = patch_response.json()
self.assertEqual(data.get("accepted_licenses").get(self.licenses[0].slug), True)
self.assertEqual(data.get("accepted_licenses").get(self.licenses[1].slug), True)
request_data = data
request_data["accepted_licenses"][self.licenses[0].slug] = False
patch_response = self.client.patch(
url, data=json.dumps(request_data), content_type="application/json; version=1.0"
)
data = patch_response.json()
self.assertEqual(data.get("accepted_licenses").get(self.licenses[0].slug), False)
self.assertEqual(data.get("accepted_licenses").get(self.licenses[1].slug), True)
def test_get_policies(self):
self.user.last_login = timezone.now()
self.user.save()
url = reverse("api:users-detail", args=[self.user])
response = self.client.get(url)
data = response.json()
# check both licenses are NOT accepted.
self.assertEqual(data.get("accepted_policies"), {str(RegionalPolicy.objects.first().uid): False})
request_data = {
"justification_id": 1,
"justification_suboption_value": "Option 1",
"regional_policy_uid": str(self.regional_policy.uid),
}
url = reverse("api:regional_justifications-list")
response = self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
url = reverse("api:users-detail", args=[self.user])
response = self.client.get(url)
data = response.json()
self.assertEqual(data.get("accepted_policies"), {str(RegionalPolicy.objects.first().uid): True})
class TestGroupDataViewSet(APITestCase):
def setUp(self):
self.path = os.path.dirname(os.path.realpath(__file__))
self.user1 = User.objects.create_user(username="user_1", email="groupuser@demo.com", password="demo")
self.user2 = User.objects.create_user(username="user_2", email="groupuser@demo.com", password="demo")
self.token = Token.objects.create(user=self.user1)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + self.token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
self.testName = "Omaha 319"
group, created = Group.objects.get_or_create(name=self.testName)
self.groupid = group.id
GroupPermission.objects.create(group=group, user=self.user1, permission=GroupPermissionLevel.ADMIN.value)
def test_insert_group(self):
expected = "/api/groups"
url = reverse("api:groups-list")
self.assertEqual(expected, url)
payload = {"name": "Any group"}
response = self.client.post(url, data=json.dumps(payload), content_type="application/json; version=1.0")
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_duplicate_group(self):
url = reverse("api:groups-list")
payload = {"name": "oMaHa 319"}
response = self.client.post(url, data=json.dumps(payload), content_type="application/json; version=1.0")
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_get_list(self):
url = reverse("api:groups-list")
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.json()
self.assertEqual(len(data), 2)
def test_get_group(self):
url = reverse("api:groups-detail", args=[self.groupid])
response = self.client.get(url, content_type="application/json; version=1.0")
data = response.json()
self.groupid = data["id"]
self.assertEqual(data["name"], self.testName)
self.assertEqual(len(data["members"]), 0)
self.assertEqual(len(data["administrators"]), 1)
def test_set_membership(self):
url = reverse("api:groups-detail", args=[self.groupid])
response = self.client.get(url, content_type="application/json; version=1.0")
self.assertEqual(response.status_code, status.HTTP_200_OK)
groupdata = response.json()
# add a user to group members and to group administrators
groupdata["members"].append("user_1")
groupdata["administrators"].append("user_1")
groupdata["members"].append("user_2")
groupdata["administrators"].append("user_2")
response = self.client.patch(url, data=json.dumps(groupdata), content_type="application/json; version=1.0")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get(url, content_type="application/json; version=1.0")
groupdata = response.json()
self.assertEqual(len(groupdata["members"]), 2)
self.assertEqual(len(groupdata["administrators"]), 2)
# remove user_2 from members
groupdata["members"] = ["user_1"]
groupdata["administrators"] = ["user_1"]
response = self.client.patch(url, data=json.dumps(groupdata), content_type="application/json; version=1.0")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get(url, content_type="application/json; version=1.0")
groupdata = response.json()
self.assertEqual(len(groupdata["members"]), 1)
self.assertEqual(groupdata["members"][0], "user_1")
# check for a 403 if we remove all administrators
groupdata["administrators"] = []
response = self.client.patch(url, data=json.dumps(groupdata), content_type="application/json; version=1.0")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_leave_group(self):
# ensure the group is created
url = reverse("api:groups-detail", args=[self.groupid])
response = self.client.get(url, content_type="application/json; version=1.0")
self.assertEqual(response.status_code, status.HTTP_200_OK)
# check add user_2 as member and only one admin
group_data = response.json()
group_data["members"] = ["user_1", "user_2"]
group_data["administrators"] = ["user_2"]
response = self.client.patch(url, data=json.dumps(group_data), content_type="application/json; version=1.0")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get(url, content_type="application/json; version=1.0")
group_data = response.json()
self.assertEqual(len(group_data["members"]), 2)
self.assertEqual(len(group_data["administrators"]), 1)
self.assertEqual(group_data["administrators"][0], "user_2")
# empty patch request should remove user_1 from members
response = self.client.patch(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# verify the results
response = self.client.get(url, content_type="application/json; verison=1.0")
group_data = response.json()
self.assertEqual(len(group_data["members"]), 1)
self.assertEqual(group_data["members"][0], "user_2")
class TestUserJobActivityViewSet(APITestCase):
fixtures = ("osm_provider.json",)
def __init__(self, *args, **kwargs):
super(TestUserJobActivityViewSet, self).__init__(*args, **kwargs)
self.group = None
self.user = None
self.viewed_jobs = []
def setUp(self):
self.group, created = Group.objects.get_or_create(name="TestDefaultExportExtentGroup")
with patch("eventkit_cloud.jobs.signals.Group") as mock_group:
mock_group.objects.get.return_value = self.group
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo")
token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
for i in range(3):
job = self.create_job("ViewedJob%s" % str(i))
self.viewed_jobs.append(job)
UserJobActivity.objects.create(user=self.user, job=job, type=UserJobActivity.VIEWED)
def test_get_viewed(self):
expected = "/api/user/activity/jobs"
url = reverse("api:user_job_activity-list")
self.assertEqual(expected, url)
response = self.client.get(url + "?activity=viewed&page_size=10")
self.assertIsNotNone(response)
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.json()
self.assertEqual(len(data), len(self.viewed_jobs))
def test_get_viewed_pagination(self):
for i in range(len(self.viewed_jobs), 15):
job = self.create_job("ViewedJob%s" % str(i))
self.viewed_jobs.append(job)
UserJobActivity.objects.create(user=self.user, job=job, type=UserJobActivity.VIEWED)
url = reverse("api:user_job_activity-list")
page_size = 10
response = self.client.get(url + "?activity=viewed&page_size=%s" % page_size)
self.assertIsNotNone(response)
self.assertEqual(status.HTTP_206_PARTIAL_CONTENT, response.status_code)
data = response.json()
self.assertEqual(len(data), page_size)
def test_create_viewed(self):
# Get our current number of viewed jobs to compare against.
url = reverse("api:user_job_activity-list")
response = self.client.get(url + "?activity=viewed&page_size=10")
prev_viewed_jobs_count = len(response.json())
# Post a new job view.
job = self.create_job("UnviewedJob")
response = self.client.post(
url + "?activity=viewed",
data=json.dumps({"job_uid": str(job.uid)}),
content_type="application/json; version=1.0",
)
self.assertIsNotNone(response)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# Get our new number of viewed jobs and compare.
response = self.client.get(url + "?activity=viewed&page_size=10")
viewed_jobs = response.json()
self.assertEqual(len(viewed_jobs), prev_viewed_jobs_count + 1)
# Make sure the first returned viewed job matches what we just viewed.
self.assertEqual(viewed_jobs[0]["last_export_run"]["job"]["uid"], str(job.uid))
self.assertEqual(viewed_jobs[0]["type"], UserJobActivity.VIEWED)
def test_create_viewed_consecutive(self):
# Add one viewed job first.
url = reverse("api:user_job_activity-list")
job_a = self.create_job("UnviewedJobA")
self.client.post(
url + "?activity=viewed",
data=json.dumps({"job_uid": str(job_a.uid)}),
content_type="application/json; version=1.0",
)
# Post a new job view twice. It should be ignored the second time.
job_b = self.create_job("UnviewedJobB")
response = self.client.post(
url + "?activity=viewed",
data=json.dumps({"job_uid": str(job_b.uid)}),
content_type="application/json; version=1.0",
)
self.assertEqual(response.json().get("ignored"), None)
response = self.client.post(
url + "?activity=viewed",
data=json.dumps({"job_uid": str(job_b.uid)}),
content_type="application/json; version=1.0",
)
self.assertEqual(response.json().get("ignored"), True)
# Make sure we don't see the same job twice in our viewed jobs.
response = self.client.get(url + "?activity=viewed")
viewed_jobs = response.json()
self.assertNotEqual(viewed_jobs[0], viewed_jobs[1])
def test_create_viewed_existing(self):
# View job A.
url = reverse("api:user_job_activity-list")
job_a = self.create_job("UnviewedJobA")
self.client.post(
url + "?activity=viewed",
data=json.dumps({"job_uid": str(job_a.uid)}),
content_type="application/json; version=1.0",
)
# View job B.
job_b = self.create_job("UnviewedJobB")
self.client.post(
url + "?activity=viewed",
data=json.dumps({"job_uid": str(job_b.uid)}),
content_type="application/json; version=1.0",
)
# View Job A.
self.client.post(
url + "?activity=viewed",
data=json.dumps({"job_uid": str(job_a.uid)}),
content_type="application/json; version=1.0",
)
# Make sure that job A only shows up once in our viewed jobs.
response = self.client.get(url + "?activity=viewed")
viewed_jobs = response.json()
job_a_count = 0
for viewed_job in viewed_jobs:
if viewed_job["last_export_run"]["job"]["uid"] == str(job_a.uid):
job_a_count += 1
self.assertEqual(job_a_count, 1)
self.assertEqual(viewed_jobs[-1]["last_export_run"]["job"]["uid"], str(job_a.uid))
def create_job(self, name):
extents = (-3.9, 16.1, 7.0, 27.6)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
job = Job.objects.create(name=name, description="Test description", user=self.user, the_geom=the_geom)
formats = ExportFormat.objects.all()
provider = DataProvider.objects.first()
provider_task = DataProviderTask.objects.create(job=job, provider=provider)
provider_task.formats.add(*formats)
run = ExportRun.objects.create(
job=job, user=self.user, status="COMPLETED", expiration=(timezone.now() + timezone.timedelta(days=14))
)
job.last_export_run = run
job.save()
return job
class TestDataProviderRequestViewSet(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo")
self.provider_request = DataProviderRequest(
name="Test Data Provider Request",
url="http://www.test.com",
service_description="Test Service Description",
layer_names="[Test1, Test2, Test3]",
comment="Test Comment",
user=self.user,
)
token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
self.provider_request.save()
def test_list(self):
expected = "/api/providers/requests"
url = reverse("api:provider_requests-list")
self.assertEqual(expected, url)
def test_create_provider_request_success(self):
request_data = {
"name": "Test Name",
"url": "http://www.test.com",
"service_description": "Test Description",
"layer_names": "[Test1, Test2, Test3]",
}
url = reverse("api:provider_requests-list")
response = self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
response = response.json()
provider_request = DataProviderRequest.objects.last()
self.assertEqual(provider_request.name, request_data["name"])
self.assertEqual(provider_request.url, request_data["url"])
self.assertEqual(provider_request.service_description, request_data["service_description"])
self.assertEqual(provider_request.layer_names, request_data["layer_names"])
self.assertEqual(provider_request.status, "pending")
self.assertEqual(provider_request.user, self.user)
def test_get_provider_request_detail(self):
expected = f"/api/providers/requests/{self.provider_request.uid}"
url = reverse("api:provider_requests-detail", args=[self.provider_request.uid])
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertEqual(response.data["uid"], str(self.provider_request.uid))
def test_get_provider_request_detail_no_permissions(self):
user = User.objects.create_user(username="demo2", email="demo2@demo.com", password="demo")
token = Token.objects.create(user=user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
expected = f"/api/providers/requests/{self.provider_request.uid}"
url = reverse("api:provider_requests-detail", args=[self.provider_request.uid])
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertIsNotNone(response.data["errors"][0]["detail"])
def test_delete_job(self):
url = reverse("api:provider_requests-detail", args=[self.provider_request.uid])
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response["Content-Length"], "0")
self.assertEqual(response["Content-Language"], "en")
class TestSizeIncreaseRequestViewSet(APITestCase):
fixtures = ("osm_provider.json",)
def setUp(self):
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
provider = DataProvider.objects.get(slug="osm-generic")
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo")
self.provider = DataProvider.objects.first()
self.size_request = SizeIncreaseRequest(
provider=provider, the_geom=the_geom, requested_aoi_size=5000, requested_data_size=1000, user=self.user
)
token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
self.size_request.save()
def test_list(self):
expected = "/api/providers/requests/size"
url = reverse("api:size_requests-list")
self.assertEqual(expected, url)
def test_create_size_request_success(self):
request_data = {
"provider": self.provider.id,
"selection": bbox_to_geojson([5, 16, 5.1, 16.1]),
"requested_aoi_size": 5000,
"requested_data_size": 1000,
}
url = reverse("api:size_requests-list")
response = self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
response = response.json()
size_request = SizeIncreaseRequest.objects.last()
self.assertEqual(size_request.provider.id, request_data["provider"])
self.assertEqual(size_request.requested_aoi_size, request_data["requested_aoi_size"])
self.assertEqual(size_request.requested_data_size, request_data["requested_data_size"])
self.assertEqual(size_request.status, "pending")
self.assertEqual(size_request.user, self.user)
def test_get_size_request_detail(self):
expected = f"/api/providers/requests/size/{self.size_request.uid}"
url = reverse("api:size_requests-detail", args=[self.size_request.uid])
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertEqual(response.data["uid"], str(self.size_request.uid))
def test_get_size_request_detail_no_permissions(self):
user = User.objects.create_user(username="demo2", email="demo2@demo.com", password="demo")
token = Token.objects.create(user=user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
expected = f"/api/providers/requests/size/{self.size_request.uid}"
url = reverse("api:size_requests-detail", args=[self.size_request.uid])
self.assertEqual(expected, url)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
self.assertIsNotNone(response.data["errors"][0]["detail"])
def test_delete_job(self):
url = reverse("api:size_requests-detail", args=[self.size_request.uid])
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response["Content-Length"], "0")
self.assertEqual(response["Content-Language"], "en")
class TestRegionalJustification(APITestCase):
fixtures = ("osm_provider.json",)
def setUp(self):
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo")
token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
ds = DataSource(os.path.dirname(os.path.realpath(__file__)) + "/../../jobs/migrations/africa.geojson")
layer = ds[0]
geom = layer.get_geoms(geos=True)[0]
the_geom = GEOSGeometry(geom.wkt, srid=4326)
the_geog = GEOSGeometry(geom.wkt)
the_geom_webmercator = the_geom.transform(ct=3857, clone=True)
region = Region.objects.create(
name="Africa",
description="African export region",
the_geom=the_geom,
the_geog=the_geog,
the_geom_webmercator=the_geom_webmercator,
)
self.provider = DataProvider.objects.first()
policies_example = json.loads(get_example_from_file("examples/policies_example.json"))
justification_options_example = json.loads(get_example_from_file("examples/justification_options_example.json"))
self.regional_policy = RegionalPolicy.objects.create(
name="Test Policy",
region=region,
policies=policies_example,
justification_options=justification_options_example,
policy_title_text="Policy Title",
policy_cancel_button_text="Cancel Button",
)
self.regional_policy.providers.set([self.provider])
self.regional_policy.save()
def test_create_regional_justification(self):
request_data = {
"justification_id": 1,
"justification_suboption_value": "Option 1",
"regional_policy_uid": str(self.regional_policy.uid),
}
url = reverse("api:regional_justifications-list")
response = self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(response["Content-Language"], "en")
response = response.json()
regional_justification = RegionalJustification.objects.last()
self.assertEqual(regional_justification.justification_id, request_data["justification_id"])
self.assertEqual(regional_justification.justification_name, "Justification Option with Dropdown Suboption")
self.assertEqual(regional_justification.justification_suboption_value, "Option 1")
self.assertEqual(regional_justification.regional_policy, self.regional_policy)
def test_invalid_regional_policy(self):
request_data = {
"justification_id": 1,
"justification_suboption_value": "Option 1",
"regional_policy_uid": "invalid_uid",
}
url = reverse("api:regional_justifications-list")
self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertRaisesMessage(
Exception, "The Regional Policy for UID {request_data['regional_policy_uid']} does not exist."
)
def test_invalid_dropdown_suboption(self):
request_data = {
"justification_id": 1,
"justification_suboption_value": "Invalid Option",
"regional_policy_uid": str(self.regional_policy.uid),
}
url = reverse("api:regional_justifications-list")
self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertRaisesMessage(ValidationError, "Invalid suboption selected.")
def test_no_suboption_invalid_description(self):
request_data = {
"justification_id": 3,
"justification_suboption_value": "Invalid Option",
"regional_policy_uid": str(self.regional_policy.uid),
}
url = reverse("api:regional_justifications-list")
self.client.post(url, data=json.dumps(request_data), content_type="application/json; version=1.0")
self.assertRaisesMessage(
ValidationError, "No suboption was available, so justification_suboption_value cannot be used."
)
class TestMetricsViewSet(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username="demo", email="demo@demo.com", password="demo", is_staff=True)
token = Token.objects.create(user=self.user)
self.client.credentials(
HTTP_AUTHORIZATION="Token " + token.key,
HTTP_ACCEPT="application/json; version=1.0",
HTTP_ACCEPT_LANGUAGE="en",
HTTP_HOST="testserver",
)
def test_metrics(self):
url = reverse("api:metrics")
response = self.client.get(url, content_type="application/json; version=1.0")
response_data = response.json()
expected_keys = ["Total Users", "Average Users Per Day", "Top User Groups", "Downloads by Area"]
self.assertEqual(expected_keys, list(response_data.keys()))
# TODO: Add example users, groups, and UserDownloads to ensure filters work correctly.
|
{
"content_hash": "234b43759e2b6c6d40442d978ac44560",
"timestamp": "",
"source": "github",
"line_count": 2185,
"max_line_length": 120,
"avg_line_length": 44.60228832951945,
"alnum_prop": 0.624261205056641,
"repo_name": "venicegeo/eventkit-cloud",
"id": "cbea13985878e6ebb9dd77c872425f33f56c330d",
"size": "97480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventkit_cloud/api/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "90420"
},
{
"name": "Dockerfile",
"bytes": "2466"
},
{
"name": "HTML",
"bytes": "85741"
},
{
"name": "Java",
"bytes": "123740"
},
{
"name": "JavaScript",
"bytes": "597810"
},
{
"name": "Python",
"bytes": "1145801"
},
{
"name": "Shell",
"bytes": "6127"
},
{
"name": "TypeScript",
"bytes": "1456680"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
import o3d3xx
import sys
import time
if len(sys.argv) > 1:
address=sys.argv[1]
else:
address='192.168.0.69'
pcic = o3d3xx.ImageClient(address, 50010)
pcic.debug = True
# repeatedly read frames from the process interface
lasttimestamp = time.time()
startTimestamp = lasttimestamp
frameCounter = 0
while True:
result = pcic.readNextFrame()
if 'diagnostic' in result:
print(result['diagnostic'])
frameCounter = frameCounter + 1
# timing
timestamp = time.time()
timediff = timestamp - lasttimestamp
print('Current frame time: %f (%f fps), bandwidth %f MBit/s' % (timediff, 1.0/timediff, 8 * pcic.recvCounter / (1e6 * timediff)))
print('Overall run time: %f for %d frames (%f fps)' % (timestamp - startTimestamp, frameCounter, (frameCounter * 1.0)/(timestamp - startTimestamp)))
lasttimestamp = timestamp
pcic.recvCounter = 0
|
{
"content_hash": "53a1f2e42269c62c589c3fb86b026e1d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 149,
"avg_line_length": 27.17142857142857,
"alnum_prop": 0.729758149316509,
"repo_name": "cfreundl/o3d3xx-python",
"id": "eae98bf2bd5e50a06a52f95242a8dd459ed0cc60",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/image_grabber.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15240"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
from datetime import datetime
import gc
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas._libs.tslibs import Timestamp
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_float_dtype,
is_integer_dtype,
is_unsigned_integer_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
NumericIndex,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
isna,
)
import pandas._testing as tm
from pandas.core.api import ( # noqa:F401
Float64Index,
Int64Index,
UInt64Index,
)
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
class Base:
"""
Base class for index sub-class tests.
"""
_index_cls: type[Index]
@pytest.fixture
def simple_index(self):
raise NotImplementedError("Method not implemented")
def create_index(self) -> Index:
raise NotImplementedError("Method not implemented")
def test_pickle_compat_construction(self):
# need an object to create with
msg = "|".join(
[
r"Index\(\.\.\.\) must be called with a collection of some "
r"kind, None was passed",
r"DatetimeIndex\(\) must be called with a collection of some "
r"kind, None was passed",
r"TimedeltaIndex\(\) must be called with a collection of some "
r"kind, None was passed",
r"__new__\(\) missing 1 required positional argument: 'data'",
r"__new__\(\) takes at least 2 arguments \(1 given\)",
]
)
with pytest.raises(TypeError, match=msg):
self._index_cls()
@pytest.mark.parametrize("name", [None, "new_name"])
def test_to_frame(self, name, simple_index):
# see GH-15230, GH-22580
idx = simple_index
if name:
idx_name = name
else:
idx_name = idx.name or 0
df = idx.to_frame(name=idx_name)
assert df.index is idx
assert len(df.columns) == 1
assert df.columns[0] == idx_name
assert df[idx_name].values is not idx.values
df = idx.to_frame(index=False, name=idx_name)
assert df.index is not idx
def test_shift(self, simple_index):
# GH8083 test the base class for shift
idx = simple_index
msg = (
f"This method is only implemented for DatetimeIndex, PeriodIndex and "
f"TimedeltaIndex; Got type {type(idx).__name__}"
)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_constructor_name_unhashable(self, simple_index):
# GH#29069 check that name is hashable
# See also same-named test in tests.series.test_constructors
idx = simple_index
with pytest.raises(TypeError, match="Index.name must be a hashable type"):
type(idx)(idx, name=[])
def test_create_index_existing_name(self, simple_index):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = simple_index
if not isinstance(expected, MultiIndex):
expected.name = "foo"
result = Index(expected)
tm.assert_index_equal(result, expected)
result = Index(expected, name="bar")
expected.name = "bar"
tm.assert_index_equal(result, expected)
else:
expected.names = ["foo", "bar"]
result = Index(expected)
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["foo", "bar"],
),
)
result = Index(expected, names=["A", "B"])
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["A", "B"],
),
)
def test_numeric_compat(self, simple_index):
idx = simple_index
# Check that this doesn't cover MultiIndex case, if/when it does,
# we can remove multi.test_compat.test_numeric_compat
assert not isinstance(idx, MultiIndex)
if type(idx) is Index:
return
typ = type(idx._data).__name__
cls = type(idx).__name__
lmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: '{typ}' and 'int'",
"cannot perform (__mul__|__truediv__|__floordiv__) with "
f"this index type: ({cls}|{typ})",
]
)
with pytest.raises(TypeError, match=lmsg):
idx * 1
rmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: 'int' and '{typ}'",
"cannot perform (__rmul__|__rtruediv__|__rfloordiv__) with "
f"this index type: ({cls}|{typ})",
]
)
with pytest.raises(TypeError, match=rmsg):
1 * idx
div_err = lmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = rmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
1 / idx
floordiv_err = lmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
idx // 1
floordiv_err = rmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
1 // idx
def test_logical_compat(self, simple_index):
idx = simple_index
with pytest.raises(TypeError, match="cannot perform all"):
idx.all()
with pytest.raises(TypeError, match="cannot perform any"):
idx.any()
def test_repr_roundtrip(self, simple_index):
idx = simple_index
tm.assert_index_equal(eval(repr(idx)), idx)
def test_repr_max_seq_item_setting(self, simple_index):
# GH10182
idx = simple_index
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert "..." not in str(idx)
def test_copy_name(self, index):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
if isinstance(index, MultiIndex):
return
first = type(index)(index, copy=True, name="mario")
second = type(first)(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == "mario"
assert second.name == "mario"
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == "mario"
def test_copy_name2(self, index):
# gh-35592
if isinstance(index, MultiIndex):
return
assert index.copy(name="mario").name == "mario"
with pytest.raises(ValueError, match="Length of new names must be 1, got 2"):
index.copy(name=["mario", "luigi"])
msg = f"{type(index).__name__}.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
index.copy(name=[["mario"]])
def test_ensure_copied_data(self, index):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs["freq"] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
return
index_type = type(index)
result = index_type(index.values, copy=True, **init_kwargs)
if is_datetime64tz_dtype(index.dtype):
result = result.tz_localize("UTC").tz_convert(index.tz)
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
index = index._with_freq(None)
tm.assert_index_equal(index, result)
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.asi8, result.asi8, check_same="same")
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values, check_same="same")
def test_memory_usage(self, index):
index._engine.clear_mapping()
result = index.memory_usage()
if index.empty:
# we report 0 for no-length
assert result == 0
return
# non-zero length
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == "object":
assert result3 > result2
def test_argsort(self, request, index):
# separately tested
if isinstance(index, CategoricalIndex):
return
result = index.argsort()
expected = np.array(index).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self, index):
result = np.argsort(index)
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(index), (CategoricalIndex, RangeIndex)):
# TODO: why type(index)?
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, axis=1)
msg = "the 'kind' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, kind="mergesort")
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, order=("a", "b"))
def test_repeat(self, simple_index):
rep = 2
idx = simple_index.copy()
new_index_cls = Int64Index if isinstance(idx, RangeIndex) else idx._constructor
expected = new_index_cls(idx.values.repeat(rep), name=idx.name)
tm.assert_index_equal(idx.repeat(rep), expected)
idx = simple_index
rep = np.arange(len(idx))
expected = new_index_cls(idx.values.repeat(rep), name=idx.name)
tm.assert_index_equal(idx.repeat(rep), expected)
def test_numpy_repeat(self, simple_index):
rep = 2
idx = simple_index
expected = idx.repeat(rep)
tm.assert_index_equal(np.repeat(idx, rep), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(idx, rep, axis=0)
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass, simple_index):
idx = simple_index
if isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
# where does not preserve freq
idx = idx._with_freq(None)
cond = [True] * len(idx)
result = idx.where(klass(cond))
expected = idx
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = Index([idx._na_value] + idx[1:].tolist(), dtype=idx.dtype)
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_insert_base(self, index):
result = index[1:4]
if not len(index):
return
# test 0th element
assert index[0:4].equals(result.insert(0, index[0]))
def test_delete_base(self, index):
if not len(index):
return
if isinstance(index, RangeIndex):
# tested in class
return
expected = index[1:]
result = index.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = index[:-1]
result = index.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
length = len(index)
msg = f"index {length} is out of bounds for axis 0 with size {length}"
with pytest.raises(IndexError, match=msg):
index.delete(length)
def test_equals(self, index):
if isinstance(index, IntervalIndex):
# IntervalIndex tested separately, the index.equals(index.astype(object))
# fails for IntervalIndex
return
assert index.equals(index)
assert index.equals(index.copy())
assert index.equals(index.astype(object))
assert not index.equals(list(index))
assert not index.equals(np.array(index))
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(index, RangeIndex):
same_values = Index(index, dtype=object)
assert index.equals(same_values)
assert same_values.equals(index)
if index.nlevels == 1:
# do not test MultiIndex
assert not index.equals(Series(index))
def test_equals_op(self, simple_index):
# GH9947, GH10637
index_a = simple_index
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
msg = "Lengths must match|could not be broadcast"
with pytest.raises(ValueError, match=msg):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match=msg):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match=msg):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
# For RangeIndex we can convert to Int64Index
tm.assert_series_equal(series_a == item, Series(expected3))
def test_format(self, simple_index):
# GH35439
idx = simple_index
expected = [str(x) for x in idx]
assert idx.format() == expected
def test_format_empty(self):
# GH35712
empty_idx = self._index_cls([])
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
def test_hasnans_isnans(self, index_flat):
# GH 11343, added tests for hasnans / isnans
index = index_flat
# cases in indices doesn't include NaN
idx = index.copy(deep=True)
expected = np.array([False] * len(idx), dtype=bool)
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is False
idx = index.copy(deep=True)
values = np.asarray(idx.values)
if len(index) == 0:
return
elif isinstance(index, NumericIndex) and is_integer_dtype(index.dtype):
return
elif isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = type(index)(values, freq=index.freq)
else:
idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_fillna(self, index):
# GH 11343
if len(index) == 0:
return
elif isinstance(index, NumericIndex) and is_integer_dtype(index.dtype):
return
elif isinstance(index, MultiIndex):
idx = index.copy(deep=True)
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
else:
idx = index.copy(deep=True)
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with pytest.raises(TypeError, match=msg):
idx.fillna([idx[0]])
idx = index.copy(deep=True)
values = np.asarray(idx.values)
if isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = type(index)(values, freq=index.freq)
else:
idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_nulls(self, index):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
if len(index) == 0:
tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool))
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
elif not index.hasnans:
tm.assert_numpy_array_equal(index.isna(), np.zeros(len(index), dtype=bool))
tm.assert_numpy_array_equal(index.notna(), np.ones(len(index), dtype=bool))
else:
result = isna(index)
tm.assert_numpy_array_equal(index.isna(), result)
tm.assert_numpy_array_equal(index.notna(), ~result)
def test_empty(self, simple_index):
# GH 15270
idx = simple_index
assert not idx.empty
assert idx[:0].empty
def test_join_self_unique(self, join_type, simple_index):
idx = simple_index
if idx.is_unique:
joined = idx.join(idx, how=join_type)
assert (idx == joined).all()
def test_map(self, simple_index):
# callable
idx = simple_index
# we don't infer UInt64
if is_integer_dtype(idx.dtype):
expected = idx.astype("int64")
elif is_float_dtype(idx.dtype):
expected = idx.astype("float64")
else:
expected = idx
result = idx.map(lambda x: x)
# For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: Series(values, index),
],
)
def test_map_dictlike(self, mapper, simple_index):
idx = simple_index
if isinstance(idx, CategoricalIndex):
pytest.skip(f"skipping tests for {type(idx)}")
identity = mapper(idx.values, idx)
# we don't infer to UInt64 for a dict
if is_unsigned_integer_dtype(idx.dtype) and isinstance(identity, dict):
expected = idx.astype("int64")
else:
expected = idx
result = idx.map(identity)
# For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
# empty mappable
if idx._is_backward_compat_public_numeric_index:
new_index_cls = NumericIndex
else:
new_index_cls = Float64Index
expected = new_index_cls([np.nan] * len(idx))
result = idx.map(mapper(expected, idx))
tm.assert_index_equal(result, expected)
def test_map_str(self, simple_index):
# GH 31202
idx = simple_index
result = idx.map(str)
expected = Index([str(x) for x in idx], dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("ordered", [True, False])
def test_astype_category(self, copy, name, ordered, simple_index):
# GH 18630
idx = simple_index
if name:
idx = idx.rename(name)
# standard categories
dtype = CategoricalDtype(ordered=ordered)
result = idx.astype(dtype, copy=copy)
expected = CategoricalIndex(idx, name=name, ordered=ordered)
tm.assert_index_equal(result, expected, exact=True)
# non-standard categories
dtype = CategoricalDtype(idx.unique().tolist()[:-1], ordered)
result = idx.astype(dtype, copy=copy)
expected = CategoricalIndex(idx, name=name, dtype=dtype)
tm.assert_index_equal(result, expected, exact=True)
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
result = idx.astype("category", copy=copy)
expected = CategoricalIndex(idx, name=name)
tm.assert_index_equal(result, expected, exact=True)
def test_is_unique(self, simple_index):
# initialize a unique index
index = simple_index.drop_duplicates()
assert index.is_unique is True
# empty index should be unique
index_empty = index[:0]
assert index_empty.is_unique is True
# test basic dupes
index_dup = index.insert(0, index[0])
assert index_dup.is_unique is False
# single NA should be unique
index_na = index.insert(0, np.nan)
assert index_na.is_unique is True
# multiple NA should not be unique
index_na_dup = index_na.insert(0, np.nan)
assert index_na_dup.is_unique is False
@pytest.mark.arm_slow
def test_engine_reference_cycle(self, simple_index):
# GH27585
index = simple_index
nrefs_pre = len(gc.get_referrers(index))
index._engine
assert len(gc.get_referrers(index)) == nrefs_pre
def test_getitem_2d_deprecated(self, simple_index):
# GH#30588, GH#31479
idx = simple_index
msg = "Support for multi-dimensional indexing"
with tm.assert_produces_warning(FutureWarning, match=msg):
res = idx[:, None]
assert isinstance(res, np.ndarray), type(res)
def test_copy_shares_cache(self, simple_index):
# GH32898, GH36840
idx = simple_index
idx.get_loc(idx[0]) # populates the _cache.
copy = idx.copy()
assert copy._cache is idx._cache
def test_shallow_copy_shares_cache(self, simple_index):
# GH32669, GH36840
idx = simple_index
idx.get_loc(idx[0]) # populates the _cache.
shallow_copy = idx._view()
assert shallow_copy._cache is idx._cache
shallow_copy = idx._shallow_copy(idx._data)
assert shallow_copy._cache is not idx._cache
assert shallow_copy._cache == {}
def test_index_groupby(self, simple_index):
idx = simple_index[:5]
to_groupby = np.array([1, 2, np.nan, 2, 1])
tm.assert_dict_equal(
idx.groupby(to_groupby), {1.0: idx[[0, 4]], 2.0: idx[[1, 3]]}
)
to_groupby = DatetimeIndex(
[
datetime(2011, 11, 1),
datetime(2011, 12, 1),
pd.NaT,
datetime(2011, 12, 1),
datetime(2011, 11, 1),
],
tz="UTC",
).values
ex_keys = [Timestamp("2011-11-01"), Timestamp("2011-12-01")]
expected = {ex_keys[0]: idx[[0, 4]], ex_keys[1]: idx[[1, 3]]}
tm.assert_dict_equal(idx.groupby(to_groupby), expected)
class NumericBase(Base):
"""
Base class for numeric index (incl. RangeIndex) sub-class tests.
"""
def test_constructor_unwraps_index(self, dtype):
index_cls = self._index_cls
idx = Index([1, 2], dtype=dtype)
result = index_cls(idx)
expected = np.array([1, 2], dtype=idx.dtype)
tm.assert_numpy_array_equal(result._data, expected)
def test_where(self):
# Tested in numeric.test_indexing
pass
def test_can_hold_identifiers(self, simple_index):
idx = simple_index
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_format(self, simple_index):
# GH35439
idx = simple_index
max_width = max(len(str(x)) for x in idx)
expected = [str(x).ljust(max_width) for x in idx]
assert idx.format() == expected
def test_numeric_compat(self):
pass # override Base method
def test_insert_na(self, nulls_fixture, simple_index):
# GH 18295 (test missing)
index = simple_index
na_val = nulls_fixture
if na_val is pd.NaT:
expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object)
else:
expected = Float64Index([index[0], np.nan] + list(index[1:]))
result = index.insert(1, na_val)
tm.assert_index_equal(result, expected)
def test_arithmetic_explicit_conversions(self):
# GH 8608
# add/sub are overridden explicitly for Float/Int Index
index_cls = self._index_cls
if index_cls is RangeIndex:
idx = RangeIndex(5)
else:
idx = index_cls(np.arange(5, dtype="int64"))
# float conversions
arr = np.arange(5, dtype="int64") * 3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx, expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx, expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5, dtype="float64")
result = fidx - a
tm.assert_index_equal(result, expected)
expected = Float64Index(-arr)
a = np.zeros(5, dtype="float64")
result = a - fidx
tm.assert_index_equal(result, expected)
def test_invalid_dtype(self, invalid_dtype):
# GH 29539
dtype = invalid_dtype
msg = fr"Incorrect `dtype` passed: expected \w+(?: \w+)?, received {dtype}"
with pytest.raises(ValueError, match=msg):
self._index_cls([1, 2, 3], dtype=dtype)
|
{
"content_hash": "b1ca01c2e7585eef4b1981727c938625",
"timestamp": "",
"source": "github",
"line_count": 873,
"max_line_length": 87,
"avg_line_length": 34.376861397479956,
"alnum_prop": 0.5653926893472393,
"repo_name": "rs2/pandas",
"id": "a8684ca4d3c25ee6191544a9a26744ec2446e06f",
"size": "30011",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/tests/indexes/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "360253"
},
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "Cython",
"bytes": "1081551"
},
{
"name": "Dockerfile",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "456275"
},
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "17393243"
},
{
"name": "Shell",
"bytes": "10872"
},
{
"name": "Smarty",
"bytes": "7820"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
}
|
from urlparse import urlparse
from django import http
from django.views.decorators.csrf import csrf_exempt
from webalyzer.collector.models import Page, Stylesheet
@csrf_exempt
def collect(request):
if request.method in ('GET', 'HEAD'):
response = http.HttpResponse('Works')
response['Access-Control-Allow-Origin'] = '*'
return response
url = request.POST['url']
domain = request.POST.get('domain', urlparse(url).netloc)
source_hash = request.POST['source_hash']
# print (url, domain)
if request.POST.get('css'):
# it's a stylesheet!
css = request.POST['css']
for page in Stylesheet.objects.filter(
url=url,
domain=domain,
source_hash=source_hash):
if page.css == css:
created = False
break
else:
page = Stylesheet.objects.create(
url=url,
domain=domain,
source_hash=source_hash,
css=css,
)
created = True
if created:
print "New CSS", domain, url
print len(page), "bytes"
else:
print "Not new CSS", domain, url
else:
html = request.POST['html']
for page in Page.objects.filter(
url=url,
domain=domain,
source_hash=source_hash):
if page.html == html:
created = False
break
else:
page = Page.objects.create(
url=url,
domain=domain,
source_hash=source_hash,
html=html,
)
created = True
if created:
print "New HTML", domain, url
print len(page), "bytes"
else:
print "Not new HTML", domain, url
response = http.HttpResponse(
'OK', status=created and 201 or 200)
response['Access-Control-Allow-Origin'] = '*'
return response
def collect_check(request, source_hash, source_type, domain):
source_hash = int(source_hash)
matches = 0
# do we have a counter to increment?
# do we want to update the date?
if source_type == 'css':
matches = Stylesheet.objects.filter(source_hash=source_hash,
domain=domain)
elif source_type == 'html':
matches = Page.objects.filter(source_hash=source_hash, domain=domain)
if matches.exists():
response = http.HttpResponse('OK', status=200)
else:
response = http.HttpResponse('Not Found', status=404)
response['Access-Control-Allow-Origin'] = '*'
return response
|
{
"content_hash": "44b182f98f4b8a91918e5c384140e49e",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 77,
"avg_line_length": 28.447916666666668,
"alnum_prop": 0.5382643720248993,
"repo_name": "mdn/webalyzer",
"id": "bb7da0c3ae7cf18ae091d8e3e45b3356da23c6c3",
"size": "2731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webalyzer/collector/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11452"
},
{
"name": "HTML",
"bytes": "15439"
},
{
"name": "JavaScript",
"bytes": "19433"
},
{
"name": "Python",
"bytes": "76017"
},
{
"name": "Shell",
"bytes": "247"
}
],
"symlink_target": ""
}
|
import re
import nova.conf
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import fs
from nova.virt.libvirt.volume import remotefs
CONF = nova.conf.CONF
USERNAME_REGEX = re.compile(r"(user(?:name)?)=(?:[^ ,]+\\)?([^ ,]+)")
class LibvirtSMBFSVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
"""Class implements libvirt part of volume driver for SMBFS."""
def _get_mount_point_base(self):
return CONF.libvirt.smbfs_mount_point_base
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtSMBFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.driver_cache = 'writethrough'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info, instance):
"""Connect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(connection_info)
if not libvirt_utils.is_mounted(mount_path, smbfs_share):
mount_options = self._parse_mount_options(connection_info)
remotefs.mount_share(mount_path, smbfs_share,
export_type='cifs', options=mount_options)
device_path = self._get_device_path(connection_info)
connection_info['data']['device_path'] = device_path
def disconnect_volume(self, connection_info, disk_dev, instance):
"""Disconnect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(connection_info)
remotefs.unmount_share(mount_path, smbfs_share)
def _parse_mount_options(self, connection_info):
mount_options = " ".join(
[connection_info['data'].get('options') or '',
CONF.libvirt.smbfs_mount_options])
if not USERNAME_REGEX.findall(mount_options):
mount_options = mount_options + ' -o username=guest'
else:
# Remove the Domain Name from user name
mount_options = USERNAME_REGEX.sub(r'\1=\2', mount_options)
return mount_options.strip(", ").split(' ')
|
{
"content_hash": "66f0a289cea4e12750f0ed0b811f6503",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 39.52542372881356,
"alnum_prop": 0.6355060034305318,
"repo_name": "Juniper/nova",
"id": "8841bcb86eee1cbc952c568e87591c3c2ebf12e7",
"size": "2905",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/volume/smbfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "23962"
},
{
"name": "Python",
"bytes": "19816434"
},
{
"name": "Shell",
"bytes": "27717"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
}
|
import copy
import numpy as np
import os
import pandas as pd
from datetime import datetime, timedelta
class WodProfile(object):
""" Main class to parse a WOD ASCII file
Input:
fid: File object of an open WOD ASCII file.
Output:
Each time this class is initialised it reads a
single profile from the input file. The data are
parsed into a set of dictionaries and lists.
Functions are defined to extract some of the
commonly used information from these.
Example:
fid = open("XBTO1966")
profile = WodProfile(fid) # Reads a single profile.
profile.latitude() # Return the latitude of the profile.
profile.z() # Return the depths of the observations.
profile2 = WodProfile(fid) # Read the next profile.
profile2.is_last_profile_in_file() # Is this the last profile?
fid.close()
"""
def __init__(self, fid, load_profile_data=True):
# Record of where the profile occurs.
self.file_name = fid.name
self.file_position = fid.tell()
# Check we are not at the end of the file.
assert self.file_position < os.fstat(fid.fileno()).st_size, 'At end of data file.'
# Record if CR+LF characters are being used at the end of lines.
firstline = fid.readline()
if(fid.tell() == self.file_position + 82):
self.cr = True
else:
self.cr = False
# also catch if profile begins with 'Q' == IQuOD formatting
if firstline[0] == 'Q':
self.IQuOD = True
else:
self.IQuOD = False
fid.seek(self.file_position)
# Read the various sections of the profile record.
self._read_primary_header(fid)
self._read_character_data_and_principal_investigator(fid)
self._read_secondary_or_biological_header(fid)
self._read_secondary_or_biological_header(fid, bio=True)
if self.biological_header['Total bytes'] > 0:
self._read_taxonomic_data(fid)
else:
self.taxa = {}
if load_profile_data:
self._read_profile_data(fid)
else:
self.profile_data = []
# Wind forward to the next profile in the file.
self.advance_file_position_to_next_profile(fid)
# ROUTINES THAT READ AND INTERPRET INFORMATION FROM THE FILE
def _read_chars(self, fid, nChars):
# Read characters from the file. If the section
# includes a line feed then an extra character
# is read and the line feed is deleted. Has to cope
# with files with Windows or Linux line endings.
chars = fid.read(nChars)
lfError = True
while lfError:
lfError = False
for iChar, char in enumerate(chars):
if char == '\r' or char == '\n':
iError = iChar
lfError = True
break
if lfError: chars = chars[0:iError] + chars[iError+1:] + fid.read(1)
return chars
def _interpret_data(self, fid, format, dest):
# This routine extracts the information from the ASCII file
# using the formatting information supplied to it and
# stores the result in the dest dictionary.
sigDigits = None
precision = None
for i, item in enumerate(format):
if item[1] == 0:
continue # Skip if not reading anything.
chars = self._read_chars(fid, item[1])
# Check if we need to skip the next few items.
if item[0] == 'Significant digits' and chars == '-':
format[i+1][1] = 0
format[i+2][1] = 0
dest[format[i+3][0]] = None
continue
# Cast to the required data type.
value = item[2](chars)
# Check for special items otherwise store the data
# in the destination dictionary.
if item[0] == 'Bytes in next field':
format[i+1][1] = value
elif item[0] == 'Significant digits':
sigDigits = value
elif item[0] == 'Total digits':
format[i+2][1] = value
elif item[0] == 'Precision':
precision = value
else:
dest[item[0]] = value
if item[0] != 'Precision' and precision is not None:
dest[item[0]] /= 10**precision
dest[item[0] + ' precision'] = precision
dest[item[0] + ' significant digits'] = sigDigits
sigDigits = None
precision = None
return None
def _read_primary_header(self, fid):
# Reads the primary header from the WOD ASCII profile.
prhFormat = [['WOD Version identifier', 1, str],
['Bytes in next field', 1, int],
['Bytes in profile', 0, int],
['Bytes in next field', 1, int],
['WOD unique cast number', 0, int],
['Country code', 2, str],
['Bytes in next field', 1, int],
['Cruise number', 0, int],
['Year', 4, int],
['Month', 2, int],
['Day', 2, int],
['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['Time', 0, float],
['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['Latitude', 0, float],
['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['Longitude', 0, float],
['Bytes in next field', 1, int],
['Number of levels', 0, int],
['Profile type', 1, str],
['Number of variables', 2, int]]
# insert additional data from IQuOD format if appropriate:
if self.IQuOD:
n = prhFormat.index(['Latitude', 0, float]) + 1
prhFormat[n:n] = [
['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['Latitude_unc', 0, float],
]
n = prhFormat.index(['Longitude', 0, float]) + 1
prhFormat[n:n] = [
['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['Longitude_unc', 0, float],
]
varFormat = [['Bytes in next field', 1, int],
['Variable code', 0, int],
['Quality control flag for variable', 1, int],
['Bytes in next field', 1, int],
['Number of variable-specific metadata', 0, int]]
metFormat = [['Bytes in next field', 1, int],
['Variable-specific code', 0, int],
['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['Value', 0, float]]
if self.IQuOD:
metFormat.append(['iMeta', 1, int])
primary_header = {}
self._interpret_data(fid, prhFormat, primary_header)
# Now read variable specific metadata.
primary_header['variables'] = []
for iVar in range(primary_header['Number of variables']):
primary_header['variables'] += [{}]
self._interpret_data(fid, copy.deepcopy(varFormat), primary_header['variables'][iVar])
primary_header['variables'][iVar]['metadata'] = []
for iMetadata in range(primary_header['variables'][iVar]['Number of variable-specific metadata']):
primary_header['variables'][iVar]['metadata'] += [{}]
self._interpret_data(fid, copy.deepcopy(metFormat), primary_header['variables'][iVar]['metadata'][iMetadata])
self.primary_header = primary_header
return None
def _read_character_data_and_principal_investigator(self, fid):
# Reads the character data and principal investigator section
# of the file.
character_data_and_principal_investigator = {}
charFormat1 = [['Bytes in next field', 1, int],
['Total bytes', 0, int]]
charFormat2 = [['Number of entries', 1, int]]
charFormat3 = [['Type of data', 1, int]]
charFormat4 = [['Bytes in next field', 2, int],
['Character data', 0, str]]
charFormat5 = [['Number of PI names', 2, int]]
charFormat6 = [['Bytes in next field', 1, int],
['Variable code', 0, int],
['Bytes in next field', 1, int],
['P.I. code', 0, int]]
self._interpret_data(fid, charFormat1, character_data_and_principal_investigator)
if 'Total bytes' in character_data_and_principal_investigator:
self._interpret_data(fid, charFormat2, character_data_and_principal_investigator)
character_data_and_principal_investigator['entries'] = []
for i in range(character_data_and_principal_investigator['Number of entries']):
character_data_and_principal_investigator['entries'] += [{}]
self._interpret_data(fid, charFormat3, character_data_and_principal_investigator['entries'][i])
if character_data_and_principal_investigator['entries'][i]['Type of data'] < 3:
self._interpret_data(fid, copy.deepcopy(charFormat4), character_data_and_principal_investigator['entries'][i])
else:
self._interpret_data(fid, copy.deepcopy(charFormat5), character_data_and_principal_investigator['entries'][i])
character_data_and_principal_investigator['entries'][i]['PIs'] = []
for j in range(character_data_and_principal_investigator['entries'][i]['Number of PI names']):
character_data_and_principal_investigator['entries'][i]['PIs'] += [{}]
self._interpret_data(fid, copy.deepcopy(charFormat6), character_data_and_principal_investigator['entries'][i]['PIs'][j])
else:
character_data_and_principal_investigator['Total bytes'] = 0
character_data_and_principal_investigator['Number of entries'] = 0
self.character_data_and_principal_investigator = character_data_and_principal_investigator
return None
def _read_secondary_or_biological_header(self, fid, bio=False):
# Reads either the secondary header or the biological
# header. The format of the two are almost identical.
header = {}
format1 = [['Bytes in next field', 1, int],
['Total bytes', 0, int]]
format2 = [['Bytes in next field', 1, int],
['Number of entries', 0, int]]
format3 = [['Bytes in next field', 1, int],
['Code', 0, int],
['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['Value', 0, float]]
if not bio and self.IQuOD:
format3.append(['iMeta', 1, int])
self._interpret_data(fid, format1, header)
if 'Total bytes' in header:
self._interpret_data(fid, format2, header)
header['entries'] = []
for i in range(header['Number of entries']):
header['entries'] += [{}]
self._interpret_data(fid, copy.deepcopy(format3), header['entries'][i])
else:
header['Total bytes'] = 0
header['Number of entries'] = 0
if bio:
self.biological_header = header
else:
self.secondary_header = header
return None
def _read_taxonomic_data(self, fid):
# Placeholder for a reader for taxa data.
taxa = {}
format1 = [['Bytes in next field', 1, int],
['Number of taxa sets', 0, int]]
format2 = [['Bytes in next field', 1, int],
['Number of entries', 0, int]]
format3 = [['Bytes in next field', 1, int],
['Code', 0, int],
['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['Value', 0, float],
['Quality control flag', 1, int],
['Originator flag', 1, int]]
self._interpret_data(fid, format1, taxa)
if 'Number of taxa sets' in taxa:
taxa['sets'] = []
for i in range(taxa['Number of taxa sets']):
taxa['sets'] += [{}]
self._interpret_data(fid, copy.deepcopy(format2), taxa['sets'][i])
taxa['sets'][i]['entries'] = []
for j in range(taxa['sets'][i]['Number of entries']):
taxa['sets'][i]['entries'] += [{}]
self._interpret_data(fid, copy.deepcopy(format3), taxa['sets'][i]['entries'][j])
else:
taxa['Number of taxa sets'] = 0
self.taxa = taxa
return None
def _read_profile_data(self, fid):
dFormat1 = [['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['Depth', 0, float]]
dFormat2 = [['Depth error code', 1, int],
['Originator depth error flag', 1, int]]
vFormat1 = [['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['Value', 0, float]]
vFormat2 = [['Value quality control flag', 1, int],
['Value originator flag', 1, int]]
# insert additional data from IQuOD format if appropriate:
if self.IQuOD:
dFormat2 += [
['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['depth_unc', 0, float]
]
vFormat2 += [
['Significant digits', 1, int],
['Total digits', 1, int],
['Precision', 1, int],
['Value_unc', 0, float]
]
data = []
for i in range(self.primary_header['Number of levels']):
data += [{}]
# extract depth
self._interpret_data(fid, copy.deepcopy(dFormat1), data[i])
# flag missing values
if data[i]['Depth'] is not None:
data[i]['Missing'] = False
else:
data[i]['Missing'] = True
continue
self._interpret_data(fid, copy.deepcopy(dFormat2), data[i])
# flag missing depth error values
if 'depth_unc' in data[i] and data[i]['depth_unc'] is not None:
data[i]['Missing_unc'] = False
else:
data[i]['Missing_unc'] = True
# extract variables
data[i]['variables'] = []
for j in range(self.primary_header['Number of variables']):
data[i]['variables'] += [{}]
self._interpret_data(fid, copy.deepcopy(vFormat1), data[i]['variables'][j])
# flag missing variable values; unpack if not missing
if (data[i]['variables'][j]['Value'] is not None):
data[i]['variables'][j]['Missing'] = False
self._interpret_data(fid, copy.deepcopy(vFormat2), data[i]['variables'][j])
# flag missing error values
if 'Value_unc' in data[i]['variables'][j] and data[i]['variables'][j]['Value_unc'] is not None:
data[i]['variables'][j]['Missing_unc'] = False
else:
data[i]['variables'][j]['Missing_unc'] = True
else:
data[i]['variables'][j]['Missing'] = True
self.profile_data = data
return None
# FILE POSITIONING
def _calculate_next_profile_position(self):
# Returns the file position of the next profile.
nLines = self.primary_header['Bytes in profile'] // 80
if (self.primary_header['Bytes in profile'] % 80) > 0: nLines += 1
if self.cr:
mult = 82
else:
mult = 81
return self.file_position + nLines * mult
def advance_file_position_to_next_profile(self, fid):
""" Advance to the next profile in the current file. """
# Each profile record is made up of 80 data characters
# (including blanks at the end of the profile)
# and return characters (LF+CR).
fid.seek(self._calculate_next_profile_position())
return None
def return_file_position_to_start_of_profile(self, fid):
""" Return the file position to the start of the profile. """
fid.seek(self.file_position, 0)
return None
def is_last_profile_in_file(self, fid):
""" Returns true if this is the last profile in the data file. """
return self._calculate_next_profile_position() == os.fstat(fid.fileno()).st_size
# CONVENIENCE FUNCTIONS FOR RETRIEVAL OF INFORMATION FROM A PROFILE
def primary_header_keys(self):
""" Returns a list of keys in the primary header. """
return [d for d in self.primary_header]
def latitude(self):
""" Returns the latitude of the profile. """
return self.primary_header['Latitude']
def latitude_unc(self):
""" Returns the error on the latitude, if available"""
if 'Latitude_unc' in self.primary_header:
return self.primary_header['Latitude_unc']
else:
return None
def longitude(self):
""" Returns the longitude of the profile. """
return self.primary_header['Longitude']
def longitude_unc(self):
""" Returns the error on the longitude, if available"""
if 'Longitude_unc' in self.primary_header:
return self.primary_header['Longitude_unc']
else:
return None
def uid(self):
""" Returns the unique identifier of the profile. """
return self.primary_header['WOD unique cast number']
def n_levels(self):
""" Returns the number of levels in the profile. """
return self.primary_header['Number of levels']
def year(self):
""" Returns the year. """
return self.primary_header['Year']
def month(self):
""" Returns the month. """
return self.primary_header['Month']
def day(self):
""" Returns the day. """
day = self.primary_header['Day']
if day == 0:
return
else:
return day
def time(self):
""" Returns the time. """
return self.primary_header['Time']
def datetime(self):
""" Returns the date and time as a datetime object. """
time = self.time()
if time is None or time < 0 or time >= 24:
time = 0
try:
d = datetime(self.year(), self.month(), self.day()) + \
timedelta(hours=time)
return d
except:
return
def cruise(self):
""" return the cruise number """
return self.primary_header['Cruise number']
def PIs(self):
""" Return PI object, or None if not avilable"""
# find the PI object, if it exits
PIs = None
if 'entries' in self.character_data_and_principal_investigator:
for obj in self.character_data_and_principal_investigator['entries']:
if 'PIs' in obj:
PIs = obj['PIs']
return PIs
def originator_cruise(self):
""" return the originator cruise ID """
# decide if there is an originator cruise code object by looking for something with data type '1' in the character header
cruise = None
if 'entries' in self.character_data_and_principal_investigator:
for obj in self.character_data_and_principal_investigator['entries']:
if 'Type of data' in obj:
if obj['Type of data'] == 1:
cruise = obj['Character data']
return cruise
def originator_station(self):
""" return the originator station ID """
# decide if there is a station code object by looking for something with data type '2' in the character header
station = None
if 'entries' in self.character_data_and_principal_investigator:
for obj in self.character_data_and_principal_investigator['entries']:
if 'Type of data' in obj:
if obj['Type of data'] == 2:
station = obj['Character data']
return station
def extract_secondary_header(self, index):
""" Returns the contents of secondary header <index> if it exists,
otherwise None. """
header = None
for item in self.secondary_header['entries']:
if item['Code'] == index:
header = item['Value']
return header
def originator_flag_type(self):
""" Returns the contents of secondary header 96 if it exists,
otherwise None. """
return self.extract_secondary_header(96)
def probe_type(self):
""" Returns the contents of secondary header 29 if it exists,
otherwise None. """
return self.extract_secondary_header(29)
def z(self):
""" Returns a numpy masked array of depths. """
data = np.ma.array(np.zeros(self.n_levels()), mask=True)
for i in range(self.n_levels()):
if self.profile_data[i]['Missing']: continue
data[i] = self.profile_data[i]['Depth']
return data
def z_unc(self):
"""Returns a numpy array of depth errors, if available"""
data = np.ma.array(np.zeros(self.n_levels()), mask=True)
for i in range(self.n_levels()):
if self.profile_data[i]['Missing_unc']: continue
data[i] = self.profile_data[i]['depth_unc']
return data
def z_level_qc(self, originator=False):
""" Returns a numpy masked array of depth
quality control flags. Set the originator
option if the originator flags are required. """
data = np.ma.array(np.zeros(self.n_levels()), mask=True, dtype=int)
for i in range(self.n_levels()):
if self.profile_data[i]['Missing']: continue
if originator:
data[i] = self.profile_data[i]['Originator depth error flag']
else:
data[i] = self.profile_data[i]['Depth error code']
return data
def var_index(self, code=1, s=False):
""" Returns the variable index for a variable.
Either the variable code can be specified
or s can be set to True to return the salinity
index. Otherwise temperature index is returned."""
if s:
code = 2
index = None
for i, var in enumerate(self.primary_header['variables']):
if var['Variable code'] == code:
assert index is None, 'Appears to be two sets of same data in profile'
index = i
return index
def var_data(self, index):
""" Returns the data values for a variable given the variable index. """
data = np.ma.array(np.zeros(self.n_levels()), mask=True)
if index is not None:
for i in range(self.n_levels()):
if self.profile_data[i]['variables'][index]['Missing']: continue
data[i] = self.profile_data[i]['variables'][index]['Value']
return data
def var_data_unc(self, index):
""" Returns the errors on data values for a variable given the variable index. """
data = np.ma.array(np.zeros(self.n_levels()), mask=True)
if index is not None:
for i in range(self.n_levels()):
if self.profile_data[i]['variables'][index]['Missing'] or self.profile_data[i]['variables'][index]['Missing_unc']: continue
data[i] = self.profile_data[i]['variables'][index]['Value_unc']
return data
def var_metadata(self, index):
""" Returns a list of dicts of metadata associated with a variable denoted by index """
if index is not None:
metadata = []
for m in self.primary_header['variables'][index]['metadata']:
meta = {
'value': m['Value'] / 10**m['Value precision'],
'code': m['Variable-specific code'],
}
if 'iMeta' in m:
meta['iMeta'] = m['iMeta']
else:
meta['iMeta'] = 0
metadata.append(meta)
return metadata
else:
return None
def var_level_qc(self, index, originator=False):
""" Returns the quality control codes for the levels in the profile. """
data = np.ma.array(np.zeros(self.n_levels()), mask=True, dtype=int)
if index is not None:
for i in range(self.n_levels()):
if self.profile_data[i]['variables'][index]['Missing']: continue
if originator:
data[i] = self.profile_data[i]['variables'][index]['Value originator flag']
else:
data[i] = self.profile_data[i]['variables'][index]['Value quality control flag']
return data
def var_profile_qc(self, index, originator=False):
""" Returns the quality control flag for entire cast. """
if index is None: return None
if originator:
return None # There is no originator flag for the entire profile.
else:
return self.primary_header['variables'][index]['Quality control flag for variable']
def var_qc_mask(self, index):
""" Returns a boolean array showing which levels are rejected
by the quality control (values are True). A true is only
put in the array if there is a rejection (not if there is
a missing value)."""
data = np.ma.array(np.zeros(self.n_levels()), mask=False, dtype=bool)
prof = self.var_profile_qc(index)
if prof is not None and prof > 0:
data[:] = True
else:
zqc = self.z_level_qc()
data[(zqc.mask == False) & (zqc > 0)] = True
lqc = self.var_level_qc(index)
data[(lqc.mask == False) & (lqc > 0)] = True
return data
def t(self):
""" Returns a numpy masked array of temperatures. """
index = self.var_index()
return self.var_data(index)
def t_unc(self):
"""Returns a numpy array of temperature errors, if available"""
index = self.var_index()
return self.var_data_unc(index)
def t_qc_mask(self):
""" Returns a boolean array showing which temperature
levels failed quality control. If the entire cast
was rejected then all levels are set to True."""
index = self.var_index()
return self.var_qc_mask(index)
def t_level_qc(self, originator=False):
""" Returns the quality control flag for each temperature level. """
index = self.var_index()
return self.var_level_qc(index, originator=originator)
def t_profile_qc(self, originator=False):
""" Returns the quality control flag for the temperature profile. """
index = self.var_index()
return self.var_profile_qc(index, originator=originator)
def t_metadata(self):
""" return the temperature metadata, if available """
index = self.var_index()
return self.var_metadata(index)
def s(self):
""" Returns a numpy masked array of salinity. """
index = self.var_index(s=True)
return self.var_data(index)
def s_unc(self):
""" Returns a numpy masked array of salinity errors, if available. """
index = self.var_index(s=True)
return self.var_data_unc(index)
def s_qc_mask(self):
""" Returns a boolean array showing which salinity
levels failed quality control. If the entire cast
was rejected then all levels are set to True."""
index = self.var_index(s=True)
return self.var_qc_mask(index)
def s_level_qc(self, originator=False):
""" Returns the quality control flag for each salinity level. """
index = self.var_index(s=True)
return self.var_level_qc(index, originator=originator)
def s_profile_qc(self, originator=False):
""" Returns the quality control flag for the salinity profile. """
index = self.var_index(s=True)
return self.var_profile_qc(index, originator=originator)
def s_metadata(self):
""" return the salinity metadata, if available """
index = self.var_index(s=True)
return self.var_metadata(index)
def oxygen(self):
""" Returns a numpy masked array of oxygen content (mL / L) """
index = self.var_index(3)
return self.var_data(index)
def phosphate(self):
""" Returns a numpy masked array of phosphate content (uM / L) """
index = self.var_index(4)
return self.var_data(index)
def silicate(self):
""" Returns a numpy masked array of silicate content (uM / L) """
index = self.var_index(6)
return self.var_data(index)
def pH(self):
""" Returns a numpy masked array of pH """
index = self.var_index(9)
return self.var_data(index)
def p(self):
""" Returns a numpy masked array of pressures. """
index = self.var_index(25)
return self.var_data(index)
def df(self):
""" Returns level data as a pandas data frame.
Profile metadata recorded as custom attributes on the dataframe.
"""
# populate dataframe with level data
columns = {
"z": self.z(),
"z_level_qc": self.z_level_qc(),
"z_unc": self.z_unc(),
"t": self.t(),
"t_level_qc": self.t_level_qc(),
"t_unc": self.t_unc(),
"s": self.s(),
"s_level_qc": self.s_level_qc(),
"s_unc": self.s_unc(),
"oxygen": self.oxygen(),
"phosphate": self.phosphate(),
"silicate": self.silicate(),
"pH": self.pH(),
"p": self.p()
}
df = pd.DataFrame(columns)
# record profile data in a metadata object on the dataframe
meta = {}
meta["latitude"] = self.latitude()
meta["latitude_unc"] = self.latitude_unc()
meta["longitude"] = self.longitude()
meta["longitude_unc"] = self.longitude_unc()
meta["uid"] = self.uid()
meta["n_levels"] = self.n_levels()
meta["year"] = self.year()
meta["month"] = self.month()
meta["day"] = self.day()
meta["time"] = self.time()
meta["cruise"] = self.cruise()
meta["probe_type"] = self.probe_type()
meta["originator_flag_type"] = self.originator_flag_type()
meta["PIs"] = self.PIs()
meta["originator_station"] = self.originator_station()
meta["originator_cruise"] = self.originator_cruise()
meta["t_metadata"] = self.t_metadata()
meta["s_metadata"] = self.s_metadata()
df.meta = meta
return df
def npdict(self):
""" Returns a dict with keys == function names and
values == returns of those functions with default parameters.
"""
d = {}
# per profile
d['cruise'] = self.cruise()
d['day'] = self.day()
d['latitude'] = self.latitude()
d['latitude_unc'] = self.latitude_unc()
d['longitude'] = self.longitude()
d['longitude_unc'] = self.longitude_unc()
d['month'] = self.month()
d['n_levels'] = self.n_levels()
d['primary_header_keys'] = self.primary_header_keys()
d['probe_type'] = self.probe_type()
d['time'] = self.time()
d['uid'] = self.uid()
d['year'] = self.year()
d['PIs'] = self.PIs()
d['originator_station'] = self.originator_station()
d['originator_cruise'] = self.originator_cruise()
d['originator_flag_type'] = self.originator_flag_type()
d['t_metadata'] = self.t_metadata()
d['s_metadata'] = self.s_metadata()
# per level
d['s'] = self.s()
d['s_unc'] = self.s_unc()
d['s_level_qc'] = self.s_level_qc()
d['s_profile_qc'] = self.s_profile_qc()
d['s_qc_mask'] = self.s_qc_mask()
d['t'] = self.t()
d['t_unc'] = self.t_unc()
d['t_level_qc'] = self.t_level_qc()
d['t_profile_qc'] = self.t_profile_qc()
d['t_qc_mask'] = self.t_qc_mask()
d['z'] = self.z()
d['z_unc'] = self.z_unc()
d['z_level_qc'] = self.z_level_qc()
d['oxygen'] = self.oxygen()
d['phosphate'] = self.phosphate()
d['silicate'] = self.silicate()
d['pH'] = self.pH()
d['p'] = self.p()
return d
def header(self):
""" Returns a data series containing primary header of the current profile """
data = {}
data['latitude'] = self.latitude()
data['latitude_unc'] = self.latitude_unc()
data['longitude'] = self.longitude()
data['longitude_unc'] = self.longitude_unc()
data['uid'] = self.uid()
data['n_levels'] = self.n_levels()
data['year'] = self.year()
data['month'] = self.month()
data['day'] = self.day()
data['time'] = self.time()
data['cruise'] = self.cruise()
data['probe_type'] = self.probe_type()
header = pd.Series(data)
return header
|
{
"content_hash": "41f6191eba8d50c4945af36070d7aea0",
"timestamp": "",
"source": "github",
"line_count": 877,
"max_line_length": 144,
"avg_line_length": 40.43443557582668,
"alnum_prop": 0.5204026959194609,
"repo_name": "BillMills/wodpy",
"id": "ae15e52c7208da02489823cec5b03f03731d02e4",
"size": "35461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wodpy/wod.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78896"
}
],
"symlink_target": ""
}
|
from mindwave.parser import ThinkGearParser, TimeSeriesRecorder
import bluetooth
import time
import sys
import argparse
from mindwave.bluetooth_headset import connect_magic, connect_bluetooth_addr
from mindwave.bluetooth_headset import BluetoothError
from example_startup import mindwave_startup
description = """Simple Neurofeedback console application.
Make sure you paired the Mindwave to your computer. You need to
do that pairing for every operating system/user profile you run
seperately.
If you don't know the address, leave it out, and this program will
figure it out, but you need to put the MindWave Mobile headset into
pairing mode first.
"""
if __name__ == '__main__':
extra_args=[dict(name='measure', type=str, nargs='?',
const="attention", default="attention",
help="""Measure you want feedback on. Either "meditation"
or "attention\"""")]
socket, args = mindwave_startup(description=description,
extra_args=extra_args)
if args.measure not in ["attention", "meditation"]:
print("Unknown measure %s" % repr(args.measure))
sys.exit(-1)
recorder = TimeSeriesRecorder()
parser = ThinkGearParser(recorders=[recorder])
if args.measure== 'attention':
measure_name = 'Attention'
else:
measure_name = 'Meditation'
while 1:
time.sleep(0.25)
data = socket.recv(20000)
parser.feed(data)
v = 0
if args.measure == 'attention':
if len(recorder.attention)>0:
v = recorder.attention[-1]
if args.measure == 'meditation':
if len(recorder.meditation)>0:
v = recorder.meditation[-1]
if v>0:
print("BALABLA:",v)
|
{
"content_hash": "620daa6feece082b4f153f2573fd8324",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 76,
"avg_line_length": 32.72222222222222,
"alnum_prop": 0.6502546689303905,
"repo_name": "akloster/python-mindwave",
"id": "fcdec91d20444729b4363374d7b54812f7f3e8cf",
"size": "1810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/console.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "42998"
}
],
"symlink_target": ""
}
|
"""
Launch a Docker image with Ubuntu and LXDE window manager, and
automatically open up the URL in the default web browser.
It also sets up port forwarding for ssh.
"""
# Author: Xiangmin Jiao <xmjiao@gmail.com>
from __future__ import print_function # Only Python 2.x
import sys
import subprocess
import time
import os
owner = "numgeom"
proj = os.path.basename(sys.argv[0]).split("_")[0]
image = owner + "/desktop"
tag = "dev"
projdir = "project"
workdir = "project"
volume = proj + "_project"
def parse_args(description):
"Parse command-line arguments"
import argparse
# Process command-line arguments
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-i",
"--image",
help="The Docker image to use. " + "The default is " + image + ".",
default=image,
)
parser.add_argument(
"-t",
"--tag",
help="Tag of the image. The default is " + tag + ". " +
"If the image already has a tag, its tag prevails.",
default=tag,
)
parser.add_argument(
"-m",
"--matlab",
nargs="?",
metavar="VERSION",
help="Specify whether to use MATLAB. Use -m alone to use R2020a." + \
"Use `-m R2017a` to use R2017a etc.",
const="R2020a",
default="",
)
parser.add_argument(
"-v",
"--volume",
help='A data volume to be mounted at ~/" + projdir + ". ' +
"The default is " + volume + ".",
default=volume,
)
parser.add_argument(
"-w",
"--workdir",
help="The starting work directory in container. " +
"The default is ~/" + workdir + ".",
default=workdir,
)
parser.add_argument(
"-p",
"--pull",
help="Pull the latest Docker image. " + "The default is not to pull.",
action="store_true",
default=False,
)
parser.add_argument(
"-r",
"--reset",
help="Reset configurations to default.",
action="store_true",
default=False,
)
parser.add_argument(
"-c",
"--clear",
help="Clear the project data volume (please use with caution).",
action="store_true",
default=False,
)
parser.add_argument(
"-d",
"--detach",
help="Run in background and print container id",
action="store_true",
default=False,
)
parser.add_argument(
"-s",
"--size",
help="Size of the screen. The default is to use " +
"the current screen size.",
default="",
)
parser.add_argument(
"-n",
"--no-browser",
help="Do not start web browser",
action="store_true",
default=False,
)
parser.add_argument(
"-N",
"--nvidia",
help="Mount the Nvidia card for GPU computation. " +
"(Linux only, experimental, sudo required).",
action="store_true",
default="",
)
parser.add_argument(
"-V",
"--verbose",
help="Enable verbose mode and print debug info to stderr.",
action="store_true",
default=False,
)
parser.add_argument(
"-q",
"--quiet",
help="Disable screen output (some Docker output cannot be disabled).",
action="store_true",
default=False,
)
parser.add_argument(
"-A",
"--args",
help='Additional arguments for the "docker run" command. ' +
"Useful for specifying additional resources or environment variables.",
default="",
)
args = parser.parse_args()
# Append tag to image if the image has no tag
if args.image.find(":") < 0:
if not args.tag:
pass
else:
args.image += ":" + args.tag
return args
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
import random
for i in range(min(5, n)):
yield port + i
for i in range(n - 5):
yield max(1, port + random.randint(-2 * n, 2 * n))
def id_generator(size=6):
"""Generate a container ID"""
import random
import string
chars = string.ascii_lowercase
return proj + "-" + ("".join(random.choice(chars) for _ in range(size)))
def find_free_port(port, retries):
"Find a free port"
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for prt in random_ports(port, retries + 1):
try:
sock.bind(("127.0.0.1", prt))
sock.close()
return prt
except socket.error:
continue
return ""
def wait_net_service(port, timeout=30):
""" Wait for network service to appear.
"""
import socket
for _ in range(timeout * 10):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", port))
except socket.error:
sock.close()
time.sleep(0.1)
continue
else:
sock.close()
time.sleep(3)
return True
def get_screen_resolution():
"""Obtain the local screen resolution."""
try:
if sys.version_info.major > 2:
import tkinter as tk
else:
import Tkinter as tk
root = tk.Tk()
root.withdraw()
width, height = root.winfo_screenwidth(), root.winfo_screenheight()
return str(width) + "x" + str(height)
except BaseException:
return ""
def handle_interrupt(container):
"""Handle keyboard interrupt"""
try:
print("Press Ctrl-C again to terminate the container: ")
time.sleep(5)
print("Invalid response. Resuming...")
except KeyboardInterrupt:
print("*** Stopping the container " + container)
if platform.system() == "Windows":
subprocess.check_output(["docker", "stop", container])
else:
subprocess.Popen(
["docker", "exec", container, "killall", "my_init"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
sys.exit(0)
if __name__ == "__main__":
import webbrowser
import platform
import glob
args = parse_args(description=__doc__)
config = proj + "_" + args.tag + "_config"
if args.quiet:
def print(*args, **kwargs):
"Do nothing"
pass
def stdout_write(*args, **kwargs):
"Do nothing"
pass
def stderr_write(*args, **kwargs):
"Do nothing"
pass
else:
def stdout_write(*args, **kwargs):
"Call sys.stderr.write"
sys.stdout.write(*args, **kwargs)
def stderr_write(*args, **kwargs):
"Call sys.stderr.write"
sys.stderr.write(*args, **kwargs)
pwd = os.getcwd()
homedir = os.path.expanduser("~")
if platform.system() == "Linux":
if subprocess.check_output(["groups"]).find(b"docker") < 0:
print("You are not a member of the docker group. Please add")
print("yourself to the docker group using the following command:")
print(" sudo addgroup $USER docker")
print("Then, log out and log back in before you can use Docker.")
sys.exit(-1)
uid = str(os.getuid())
if uid == "0":
print("You are running as root. This is not safe. " +
"Please run as a regular user.")
sys.exit(-1)
else:
uid = ""
try:
if args.verbose:
stdout_write("Check whether Docker is up and running.\n")
img = subprocess.check_output(["docker", "images", "-q", args.image])
except BaseException:
stderr_write("Docker failed. Please make sure docker was properly " +
"installed and has been started.\n")
sys.exit(-1)
if args.pull or not img:
try:
if args.verbose:
stdout_write("Pulling latest docker image " + args.image +
".\n")
err = subprocess.call(["docker", "pull", args.image])
except BaseException:
err = -1
if err:
sys.exit(err)
# Delete dangling image
if (img and subprocess.check_output(
["docker", "images", "-f", "dangling=true", "-q"]).find(img) >= 0):
subprocess.Popen(["docker", "rmi", "-f", img.decode("utf-8")[:-1]])
docker_user = "ubuntu"
docker_home = "/home/" + docker_user
if args.reset:
try:
if args.verbose:
stdout_write("Removing old docker volume " + config + ".\n")
output = subprocess.check_output(
["docker", "volume", "rm", "-f", config])
except subprocess.CalledProcessError as e:
stderr_write(e.output.decode("utf-8"))
volumes = [
"-v",
pwd + ":" + docker_home + "/shared",
"-v",
config + ":" + docker_home + "/.config",
]
if os.path.exists(homedir + "/.gnupg"):
volumes += ["-v", homedir + "/.gnupg" + ":" + docker_home + "/.gnupg"]
# Mount .gitconfig to Docker image
if os.path.isfile(homedir + "/.gitconfig"):
volumes += [
"-v",
homedir + "/.gitconfig" + ":" + docker_home + "/.gitconfig_host",
]
if args.matlab:
volumes += ["-v", "matlab_bin:/usr/local/MATLAB/"]
if args.tag != "pyng":
volumes += [
"-v", args.volume + ":" + docker_home + "/" + projdir, "-v",
"numgeom2_src:" + docker_home + "/numgeom2", "-v",
"fastsolve_src:" + docker_home + "/fastsolve"
]
vols = [args.volume, "numgeom2_src", "fastsolve_src"]
else:
volumes += ["-v", args.volume + ":" + docker_home + "/" + projdir]
vols = [args.volume]
if args.volume:
if args.clear:
try:
if args.verbose:
stdout_write("Removing old docker volume " + config +
".\n")
output = subprocess.check_output(
["docker", "volume", "rm", "-f", vols])
except subprocess.CalledProcessError as e:
stderr_write(e.output.decode("utf-8"))
if args.workdir[0] == "/":
volumes += ["-w", args.workdir]
else:
volumes += ["-w", docker_home + "/" + args.workdir]
stderr_write("Starting up docker image...\n")
if subprocess.check_output(["docker", "--version"
]).find(b"Docker version 1.") >= 0:
rmflag = "-t"
else:
rmflag = "--rm"
# Determine size of the desktop
if not args.size:
size = get_screen_resolution()
if not size:
# Set default size and disable webbrowser
size = "1440x900"
args.no_browser = True
else:
size = args.size
# Generate a container ID
container = id_generator()
envs = [
"--hostname",
container,
"--env",
"RESOLUT=" + size,
"--env",
"HOST_UID=" + uid,
]
if args.matlab:
envs += ["--env", "MATLAB_VERSION=" + args.matlab]
else:
envs += ["--env", "MATLAB_VERSION="]
# Find a free port for ssh tunning
port_ssh = str(find_free_port(2222, 50))
if not port_ssh:
stderr_write("Error: Could not find a free port.\n")
sys.exit(-1)
envs += ["-p", port_ssh + ":22"]
# Create directory .ssh if not exist
if not os.path.exists(homedir + "/.ssh"):
os.mkdir(homedir + "/.ssh")
if platform.system() != "Windows":
volumes += ["-v", homedir + "/.ssh" + ":" + docker_home + "/.ssh"]
else:
# On Windows, cannot use ~/.ssh directly. Mount it into ~/.ssh-host.
volumes += ["-v", homedir + "/.ssh" + ":" + docker_home + "/.ssh-host"]
devices = []
if args.nvidia:
for d in glob.glob("/dev/nvidia*"):
devices += ["--device", d + ":" + d]
# Start the docker image in the background and pipe the stderr
port_http = str(find_free_port(6080, 50))
port_vnc = str(find_free_port(5950, 50))
if not port_http or not port_vnc:
stderr_write("Error: Could not find a free port.\n")
sys.exit(-1)
cmd = ([
"docker",
"run",
"-d",
rmflag,
"--name",
container,
"--shm-size",
"2g",
"-p",
port_http + ":6080",
"-p",
port_vnc + ":5900",
] + envs + volumes + devices + args.args.split() + [
"--security-opt",
"seccomp=unconfined",
"--cap-add=SYS_PTRACE",
args.image,
"startvnc.sh >> " + docker_home + "/.log/vnc.log",
])
if args.verbose:
stdout_write(" ".join(cmd[:-1]) + ' "' + cmd[-1] + '"\n')
subprocess.call(cmd)
wait_for_url = True
# Wait for user to press Ctrl-C
while True:
try:
if wait_for_url:
# Wait until the file is not empty
while not subprocess.check_output([
"docker", "exec", container, "cat",
docker_home + "/.log/vnc.log"
]):
time.sleep(1)
p = subprocess.Popen(
[
"docker",
"exec",
container,
"tail",
"-F",
docker_home + "/.log/vnc.log",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
# Monitor the stdout to extract the URL
for stdout_line in iter(p.stdout.readline, ""):
ind = stdout_line.find("http://localhost:")
if ind >= 0:
# Open browser if found URL
url = stdout_line.replace(":6080/",
":" + port_http + "/")
stdout_write(url)
passwd = stdout_line[url.find("password=") + 9:]
stdout_write(
"\nFor a better experience, use VNC Viewer (" +
"http://realvnc.com/download/viewer)\n" +
"to connect to localhost:%s with password %s\n" %
(port_vnc, passwd))
if platform.system() == "Windows":
# Copy ssh config files
subprocess.check_output([
"docker",
"exec",
container,
"rsync",
"-rog",
"--chown=ubuntu:ubuntu",
"--chmod=600",
"/home/ubuntu/.ssh-host/",
"/home/ubuntu/.ssh/",
])
stdout_write(
"You can also log into the container using the command\n ssh -X -p "
+ port_ssh + " " + docker_user +
"@localhost -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no\n"
+ "with an authorized key in " + homedir +
"/.ssh/authorized_keys.\n")
if not args.no_browser:
wait_net_service(int(port_http))
webbrowser.open(url[ind:-1])
p.stdout.close()
p.terminate()
wait_for_url = False
break
else:
stdout_write(stdout_line)
if args.detach:
print("Started container " + container + " in background.")
print('To terminate it, use "docker stop ' + container + '".')
sys.exit(0)
print("Press Ctrl-C to terminate the container.")
time.sleep(1)
# Wait until the container exits or Ctlr-C is pressed
subprocess.check_output(
["docker", "exec", container, "tail", "-f", "/dev/null"])
sys.exit(0)
except subprocess.CalledProcessError:
try:
# If Docker process no long exists, exit
if args.verbose:
stdout_write(
"Check whether the docker container is running.\n")
if not subprocess.check_output(
["docker", "ps", "-q", "-f", "name=" + container]):
stdout_write("Docker container " + container +
" is no longer running\n")
sys.exit(-1)
else:
time.sleep(1)
continue
except subprocess.CalledProcessError:
stderr_write("Docker container " + container +
" is no longer running\n")
sys.exit(-1)
except KeyboardInterrupt:
handle_interrupt(container)
continue
except KeyboardInterrupt:
handle_interrupt(container)
except OSError:
sys.exit(-1)
|
{
"content_hash": "884b30e601aad22573a11317f61f9687",
"timestamp": "",
"source": "github",
"line_count": 602,
"max_line_length": 102,
"avg_line_length": 29.64950166112957,
"alnum_prop": 0.4845089360748501,
"repo_name": "numgeom/docker-desktop",
"id": "5485d100828c55430fe25a97821e023bc2aaf645",
"size": "17872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numgeom_desktop.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "991"
},
{
"name": "M",
"bytes": "152"
},
{
"name": "Python",
"bytes": "17810"
},
{
"name": "Shell",
"bytes": "1748"
}
],
"symlink_target": ""
}
|
import re
import os
import sys
import datetime
import json
import traceback
import requests
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except Exception:
pass
from six import iteritems
from pandajedi.jedicore.MsgWrapper import MsgWrapper
from .DDMClientBase import DDMClientBase
from rucio.client import Client as RucioClient
from rucio.common.exception import UnsupportedOperation,DataIdentifierNotFound,DataIdentifierAlreadyExists,\
DuplicateRule,DuplicateContent, InvalidObject
from pandaserver.dataservice import DataServiceUtils
from pandaserver.srvcore import CoreUtils
from pandajedi.jediconfig import jedi_config
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
# class to access to ATLAS DDM
class AtlasDDMClient(DDMClientBase):
# constructor
def __init__(self,con):
# initialize base class
DDMClientBase.__init__(self,con)
# the list of fatal error
self.fatalErrors = []
# list of blacklisted endpoints
self.blackListEndPoints = []
# time of last update for blacklist
self.lastUpdateBL = None
# how frequently update DN/token map
self.timeIntervalBL = datetime.timedelta(seconds=60*10)
# dict of endpoints
self.endPointDict = {}
# time of last update for endpoint dict
self.lastUpdateEP = None
# how frequently update endpoint dict
self.timeIntervalEP = datetime.timedelta(seconds=60*10)
# pid
self.pid = os.getpid()
# get files in dataset
def getFilesInDataset(self, datasetName, getNumEvents=False, skipDuplicate=True,
ignoreUnknown=False, longFormat=False, lfn_only=False):
methodName = 'getFilesInDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get Rucio API
client = RucioClient()
# extract scope from dataset
scope, dsn = self.extract_scope(datasetName)
if dsn.endswith('/'):
dsn = dsn[:-1]
# get length
tmpMeta = client.get_metadata(scope, dsn)
# get files
fileMap = {}
baseLFNmap = {}
fileSet = set()
for x in client.list_files(scope, dsn, long=longFormat):
# convert to old dict format
lfn = str(x['name'])
if lfn_only:
fileSet.add(lfn)
continue
attrs = {}
attrs['lfn'] = lfn
attrs['chksum'] = "ad:" + str(x['adler32'])
attrs['md5sum'] = attrs['chksum']
attrs['checksum'] = attrs['chksum']
attrs['fsize'] = x['bytes']
attrs['filesize'] = attrs['fsize']
attrs['scope'] = str(x['scope'])
attrs['events'] = str(x['events'])
if longFormat:
attrs['lumiblocknr'] = str(x['lumiblocknr'])
guid = str('%s-%s-%s-%s-%s' % (x['guid'][0:8], x['guid'][8:12], x['guid'][12:16], x['guid'][16:20], x['guid'][20:32]))
attrs['guid'] = guid
# skip duplicated files
if skipDuplicate:
# extract base LFN and attempt number
baseLFN = re.sub('(\.(\d+))$','',lfn)
attNr = re.sub(baseLFN+'\.*','',lfn)
if attNr == '':
# without attempt number
attNr = -1
else:
attNr = int(attNr)
# compare attempt numbers
addMap = False
if baseLFN in baseLFNmap:
# use larger attempt number
oldMap = baseLFNmap[baseLFN]
if oldMap['attNr'] < attNr:
del fileMap[oldMap['guid']]
addMap = True
else:
addMap = True
# append
if not addMap:
continue
baseLFNmap[baseLFN] = {'guid':guid,
'attNr':attNr}
fileMap[guid] = attrs
if lfn_only:
return_list = fileSet
else:
return_list = fileMap
tmpLog.debug('done len={} meta={}'.format(len(return_list), tmpMeta['length']))
if tmpMeta['length'] and tmpMeta['length'] > len(return_list):
errMsg = "file list length mismatch len={} != meta={}".format(len(return_list),
tmpMeta['length'])
tmpLog.error(errMsg)
return self.SC_FAILED, errMsg
return self.SC_SUCCEEDED, return_list
except DataIdentifierNotFound as e:
if ignoreUnknown:
return self.SC_SUCCEEDED, {}
errType = e
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode,'{0} : {1}'.format(methodName, errMsg)
# list dataset replicas
def listDatasetReplicas(self, datasetName, use_vp=False, detailed=False, skip_incomplete_element=False):
methodName = 'listDatasetReplicas'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
if not datasetName.endswith('/'):
# get file list
tmpRet = self.convertOutListDatasetReplicas(datasetName, use_vp=use_vp)
tmpLog.debug('got new '+str(tmpRet))
if detailed:
return self.SC_SUCCEEDED, tmpRet, {datasetName: tmpRet}
return self.SC_SUCCEEDED, tmpRet
else:
# list of attributes summed up
retMap = {}
detailedRetMap = {}
# get constituent datasets
tmpS,dsList = self.listDatasetsInContainer(datasetName)
totalFiles = 0
grandTotal = 0
for tmpName in dsList:
tmpLog.debug(tmpName)
tmp_status, tmp_output = self.getDatasetMetaData(tmpName)
if tmp_status != self.SC_SUCCEEDED:
raise RuntimeError('failed to get metadata with {0}'.format(tmp_output))
try:
totalFiles = tmp_output['length']
if not totalFiles:
totalFiles = 0
except Exception:
totalFiles = 0
tmpRet = self.convertOutListDatasetReplicas(tmpName, use_vp=use_vp,
skip_incomplete_element=skip_incomplete_element)
detailedRetMap[tmpName] = tmpRet
# loop over all sites
for tmpSite,tmpValMap in iteritems(tmpRet):
# add site
retMap.setdefault(tmpSite, [{'found': 0}])
# sum
try:
retMap[tmpSite][-1]['found'] += int(tmpValMap[-1]['found'])
except Exception:
pass
# total
try:
if totalFiles < int(tmpValMap[-1]['total']):
totalFiles = int(tmpValMap[-1]['total'])
except Exception:
pass
grandTotal += totalFiles
# set total
for tmpSite in retMap.keys():
retMap[tmpSite][-1]['total'] = grandTotal
# return
tmpLog.debug('got '+str(retMap))
if detailed:
return self.SC_SUCCEEDED, retMap, detailedRetMap
return self.SC_SUCCEEDED, retMap
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg + traceback.format_exc())
if detailed:
return errCode, '{0} : {1}'.format(methodName, errMsg), None
return errCode, '{0} : {1}'.format(methodName, errMsg)
# list replicas per dataset
def listReplicasPerDataset(self,datasetName,deepScan=False):
methodName = 'listReplicasPerDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start with deepScan={0}'.format(deepScan))
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
datasets = []
if not datasetName.endswith('/'):
datasets = [dsn]
else:
# get constituent datasets
itr = client.list_content(scope,dsn)
datasets = [i['name'] for i in itr]
retMap = {}
for tmpName in datasets:
retMap[tmpName] = self.convertOutListDatasetReplicas(tmpName,deepScan)
tmpLog.debug('got '+str(retMap))
return self.SC_SUCCEEDED,retMap
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# get site property
def getSiteProperty(self,seName,attribute):
methodName = 'getSiteProperty'
methodName += ' pid={0}'.format(self.pid)
self.updateEndPointDict()
try:
retVal = self.endPointDict[seName][attribute]
return self.SC_SUCCEEDED,retVal
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# get site alternateName
def getSiteAlternateName(self,se_name):
self.updateEndPointDict()
if se_name in self.endPointDict:
return [self.endPointDict[se_name]['site']]
return None
# get associated endpoints
def getAssociatedEndpoints(self,altName):
self.updateEndPointDict()
epList = []
for seName,seVal in iteritems(self.endPointDict):
if seVal['site'] == altName:
epList.append(seName)
return epList
# convert token to endpoint
def convertTokenToEndpoint(self,baseSeName,token):
self.updateEndPointDict()
try:
altName = self.getSiteAlternateName(baseSeName)[0]
if altName is not None:
for seName,seVal in iteritems(self.endPointDict):
if seVal['site'] == altName:
# space token
if seVal['token'] == token:
return seName
# pattern matching
if re.search(token,seName) is not None:
return seName
except Exception:
pass
return None
# get cloud for an endpoint
def getCloudForEndPoint(self,endPoint):
self.updateEndPointDict()
if endPoint in self.endPointDict:
return self.endPointDict[endPoint]['cloud']
return None
# check if endpoint is NG
def checkNGEndPoint(self,endPoint,ngList):
for ngPatt in ngList:
if re.search(ngPatt, endPoint) is not None:
return True
return False
def SiteHasCompleteReplica(self, dataset_replica_map, endpoint, total_files_in_dataset):
"""
Checks the #found files at site == #total files at site == #files in dataset. VP is regarded as complete
:return: True or False
"""
try:
if 'vp' in dataset_replica_map[endpoint][-1]:
if dataset_replica_map[endpoint][-1]['vp']:
return True
found_tmp = dataset_replica_map[endpoint][-1]['found']
total_tmp = dataset_replica_map[endpoint][-1]['total']
if found_tmp is not None and total_tmp == found_tmp and total_tmp >= total_files_in_dataset:
return True
except KeyError:
pass
return False
def getAvailableFiles(self, dataset_spec, site_endpoint_map, site_mapper, check_LFC=False,
check_completeness=True, storage_token=None, complete_only=False,
use_vp=True, file_scan_in_container=True):
"""
:param dataset_spec: dataset spec object
:param site_endpoint_map: panda sites to ddm endpoints map. The list of panda sites includes the ones to scan
:param site_mapper: site mapper object
:param check_LFC: check/ask Tadashi/probably obsolete
:param check_completeness:
:param storage_token:
:param complete_only: check only for complete replicas
:param use_vp: use virtual placement
:param file_scan_in_container: enable file lookup for container
TODO: do we need NG, do we need alternate names
TODO: the storage_token is not used anymore
:return:
"""
# make logger
method_name = 'getAvailableFiles'
method_name += ' pid={0}'.format(self.pid)
method_name += ' < jediTaskID={0} datasetID={1} >'.format(dataset_spec.jediTaskID, dataset_spec.datasetID)
tmp_log = MsgWrapper(logger, method_name)
loopStart = datetime.datetime.utcnow()
try:
tmp_log.debug('start datasetName={} check_completeness={} nFiles={} nSites={} '
'complete_only={}'.format(
dataset_spec.datasetName,
check_completeness,
len(dataset_spec.Files),
len(site_endpoint_map),
complete_only))
# update the definition of all endpoints from AGIS
self.updateEndPointDict()
# get the file map
tmp_status, tmp_output = self.getDatasetMetaData(dataset_spec.datasetName)
if tmp_status != self.SC_SUCCEEDED:
regTime = datetime.datetime.utcnow() - loopStart
tmp_log.error('failed in {} sec to get metadata with {}'.format(regTime.seconds, tmp_output))
return tmp_status, tmp_output
total_files_in_dataset = tmp_output['length']
if total_files_in_dataset is None:
total_files_in_dataset = 0
if tmp_output['did_type'] == 'CONTAINER':
is_container = True
else:
is_container = False
# get the dataset replica map
tmp_status, tmp_output, detailed_replica_map = self.listDatasetReplicas(dataset_spec.datasetName,
use_vp=use_vp,
detailed=True)
if tmp_status != self.SC_SUCCEEDED:
regTime = datetime.datetime.utcnow() - loopStart
tmp_log.error('failed in {} sec to get dataset replicas with {}'.format(regTime.seconds, tmp_output))
return tmp_status, tmp_output
dataset_replica_map = tmp_output
# collect GUIDs and LFNs
file_map = {} # GUID to LFN
lfn_filespec_map = {} # LFN to file spec
scope_map = {} # LFN to scope list
for tmp_file in dataset_spec.Files:
file_map[tmp_file.GUID] = tmp_file.lfn
lfn_filespec_map.setdefault(tmp_file.lfn, [])
lfn_filespec_map[tmp_file.lfn].append(tmp_file)
scope_map[tmp_file.lfn] = tmp_file.scope
complete_replica_map = {}
endpoint_storagetype_map = {}
rse_list = []
# figure out complete replicas and storage types
for site_name, endpoint_list in iteritems(site_endpoint_map):
tmp_site_spec = site_mapper.getSite(site_name)
has_complete = False
tmp_rse_list = []
# loop over all endpoints
for endpoint in endpoint_list:
# storage type
tmp_status, is_tape = self.getSiteProperty(endpoint, 'is_tape')
if is_tape:
storage_type = 'localtape'
else:
storage_type = 'localdisk'
if self.SiteHasCompleteReplica(dataset_replica_map, endpoint, total_files_in_dataset) \
or (endpoint in dataset_replica_map and not check_completeness) \
or DataServiceUtils.isCachedFile(dataset_spec.datasetName, tmp_site_spec):
complete_replica_map[endpoint] = storage_type
has_complete = True
# no scan for many-time datasets or disabled completeness check
if dataset_spec.isManyTime() \
or (not check_completeness and endpoint not in dataset_replica_map) \
or complete_only:
continue
# disable file lookup if unnecessary
if endpoint not in rse_list and (file_scan_in_container or not is_container):
tmp_rse_list.append(endpoint)
endpoint_storagetype_map[endpoint] = storage_type
# add to list to trigger file lookup if complete replica is unavailable
if not has_complete and tmp_rse_list:
rse_list += tmp_rse_list
# get the file locations from Rucio
if len(rse_list) > 0:
tmp_log.debug('lookup file replicas in Rucio for RSEs: {0}'.format(rse_list))
tmp_status, rucio_lfn_to_rse_map = self.jedi_list_replicas(file_map, rse_list, scopes=scope_map)
tmp_log.debug('lookup file replicas return status: {0}'.format(str(tmp_status)))
if tmp_status != self.SC_SUCCEEDED:
raise RuntimeError(rucio_lfn_to_rse_map)
else:
rucio_lfn_to_rse_map = dict()
if not file_scan_in_container and is_container:
# remove incomplete
detailed_comp_replica_map = dict()
for tmp_ds_name, tmp_ds_value in iteritems(detailed_replica_map):
new_map = {}
for tmp_k, tmp_v in iteritems(tmp_ds_value):
if tmp_v[0]['total'] and tmp_v[0]['total'] == tmp_v[0]['found']:
new_map[tmp_k] = tmp_v
if new_map:
detailed_comp_replica_map[tmp_ds_name] = new_map
# make file list from detailed replica map
files_in_container = {}
for tmp_ds_name in detailed_comp_replica_map.keys():
tmp_status, tmp_files = self.getFilesInDataset(tmp_ds_name, ignoreUnknown=True, lfn_only=True)
if tmp_status != self.SC_SUCCEEDED:
raise RuntimeError(tmp_files)
for tmp_lfn in tmp_files:
files_in_container[tmp_lfn] = tmp_ds_name
for tmp_file in dataset_spec.Files:
if tmp_file.lfn in files_in_container and \
files_in_container[tmp_file.lfn] in detailed_comp_replica_map:
rucio_lfn_to_rse_map[tmp_file.lfn] = \
detailed_comp_replica_map[files_in_container[tmp_file.lfn]]
# initialize the return map and add complete/cached replicas
return_map = {}
checked_dst = set()
for site_name, tmp_endpoints in iteritems(site_endpoint_map):
return_map.setdefault(site_name, {'localdisk': [], 'localtape': [], 'cache': [], 'remote': []})
tmp_site_spec = site_mapper.getSite(site_name)
# check if the dataset is cached
if DataServiceUtils.isCachedFile(dataset_spec.datasetName, tmp_site_spec):
# add to cached file list
return_map[site_name]['cache'] += dataset_spec.Files
# complete replicas
if not check_LFC:
for tmp_endpoint in tmp_endpoints:
if tmp_endpoint in complete_replica_map:
storage_type = complete_replica_map[tmp_endpoint]
return_map[site_name][storage_type] += dataset_spec.Files
checked_dst.add(site_name)
# loop over all available LFNs
available_lfns = list(rucio_lfn_to_rse_map.keys())
available_lfns.sort()
for tmp_lfn in available_lfns:
tmp_filespec_list = lfn_filespec_map[tmp_lfn]
tmp_filespec = lfn_filespec_map[tmp_lfn][0]
for site in site_endpoint_map:
for endpoint in site_endpoint_map[site]:
if endpoint in rucio_lfn_to_rse_map[tmp_lfn] and endpoint in endpoint_storagetype_map:
storage_type = endpoint_storagetype_map[endpoint]
if tmp_filespec not in return_map[site][storage_type]:
return_map[site][storage_type] += tmp_filespec_list
checked_dst.add(site)
break
# aggregate all types of storage types into the 'all' key
for site, storage_type_files in iteritems(return_map):
site_all_file_list = set()
for storage_type, file_list in iteritems(storage_type_files):
for tmp_file_spec in file_list:
site_all_file_list.add(tmp_file_spec)
storage_type_files['all'] = site_all_file_list
# dump for logging
logging_str = ''
for site, storage_type_file in iteritems(return_map):
logging_str += '{0}:('.format(site)
for storage_type, file_list in iteritems(storage_type_file):
logging_str += '{0}:{1},'.format(storage_type, len(file_list))
logging_str = logging_str[:-1]
logging_str += ') '
logging_str = logging_str[:-1]
tmp_log.debug(logging_str)
# return
regTime = datetime.datetime.utcnow() - loopStart
tmp_log.debug('done in {} sec'.format(regTime.seconds))
return self.SC_SUCCEEDED, return_map
except Exception as e:
regTime = datetime.datetime.utcnow() - loopStart
error_message = 'failed in {} sec with {} {} '.format(regTime.seconds, str(e), traceback.format_exc())
tmp_log.error(error_message)
return self.SC_FAILED, '{0}.{1} {2}'.format(self.__class__.__name__, method_name, error_message)
def jedi_list_replicas(self, files, storages, scopes={}):
try:
method_name = 'jedi_list_replicas'
method_name += ' pid={0}'.format(self.pid)
tmp_log = MsgWrapper(logger, method_name)
client = RucioClient()
i_guid = 0
max_guid = 1000 # do 1000 guids in each Rucio call
lfn_to_rses_map = {}
dids = []
i_loop = 0
startTime = datetime.datetime.utcnow()
tmp_log.debug('start')
for guid, lfn in iteritems(files):
i_guid += 1
scope = scopes[lfn]
dids.append({'scope': scope, 'name': lfn})
if len(dids) % max_guid == 0 or i_guid == len(files):
i_loop += 1
tmp_log.debug('lookup {} start'.format(i_loop))
loopStart = datetime.datetime.utcnow()
x = client.list_replicas(dids, ['srm', 'gsiftp'], resolve_archives=True)
regTime = datetime.datetime.utcnow() - loopStart
tmp_log.info('rucio.list_replicas took {0} sec for {1} files'.format(regTime.seconds, len(dids)))
loopStart = datetime.datetime.utcnow()
for tmp_dict in x:
try:
tmp_LFN = str(tmp_dict['name'])
lfn_to_rses_map[tmp_LFN] = tmp_dict['rses']
except Exception:
pass
# reset the dids list for the next bulk for Rucio
dids = []
regTime = datetime.datetime.utcnow() - loopStart
tmp_log.debug('lookup {} end in {} sec'.format(i_loop, regTime.seconds))
regTime = datetime.datetime.utcnow() - startTime
tmp_log.debug('end in {} sec'.format(regTime.seconds))
except Exception as e:
regTime = datetime.datetime.utcnow() - startTime
tmp_log.error('failed in {} sec'.format(regTime.seconds))
return self.SC_FAILED, "file lookup failed with {} {}".format(str(e), traceback.format_exc())
return self.SC_SUCCEEDED, lfn_to_rses_map
# list file replicas with dataset name/scope
def jedi_list_replicas_with_dataset(self, datasetName):
try:
scope, dsn = self.extract_scope(datasetName)
client = RucioClient()
lfn_to_rses_map = {}
dids = []
dids = [{'scope': scope, 'name': dsn}]
for tmp_dict in client.list_replicas(dids, ['srm', 'gsiftp'], resolve_archives=True):
try:
tmp_LFN = str(tmp_dict['name'])
except Exception:
continue
lfn_to_rses_map[tmp_LFN] = tmp_dict['rses']
except Exception:
err_type, err_value = sys.exc_info()[:2]
return self.SC_FAILED, "file lookup failed with {0}:{1} {2}".format(err_type, err_value, traceback.format_exc())
return self.SC_SUCCEEDED, lfn_to_rses_map
# get dataset metadata
def getDatasetMetaData(self, datasetName, ignore_missing=False):
# make logger
methodName = 'getDatasetMetaData'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1}'.format(methodName,datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# get
if dsn.endswith('/'):
dsn = dsn[:-1]
tmpRet = client.get_metadata(scope,dsn)
# set state
if tmpRet['is_open'] is True and tmpRet['did_type'] != 'CONTAINER':
tmpRet['state'] = 'open'
else:
tmpRet['state'] = 'closed'
tmpLog.debug(str(tmpRet))
return self.SC_SUCCEEDED,tmpRet
except DataIdentifierNotFound as e:
errType = e
errCode, errMsg = self.checkError(errType)
if ignore_missing:
tmpLog.debug(errMsg)
tmpRet = {}
tmpRet['state'] = 'missing'
return self.SC_SUCCEEDED, tmpRet
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# check error
def checkError(self,errType):
errMsg = '{} : {}'.format(str(type(errType)), str(errType))
if type(errType) in self.fatalErrors:
# fatal error
return self.SC_FATAL, errMsg
else:
# temporary error
return self.SC_FAILED, errMsg
# list dataset/container
def listDatasets(self,datasetName,ignorePandaDS=True):
methodName = 'listDatasets'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
filters = {}
if dsn.endswith('/'):
dsn = dsn[:-1]
filters['name'] = dsn
dsList = set()
for name in client.list_dids(scope, filters, 'dataset'):
dsList.add('%s:%s' % (scope, name))
for name in client.list_dids(scope, filters, 'container'):
dsList.add('%s:%s/' % (scope, name))
dsList = list(dsList)
# ignore panda internal datasets
if ignorePandaDS:
tmpDsList = []
for tmpDS in dsList:
if re.search('_dis\d+$',tmpDS) is not None or re.search('_sub\d+$',tmpDS):
continue
tmpDsList.append(tmpDS)
dsList = tmpDsList
tmpLog.debug('got '+str(dsList))
return self.SC_SUCCEEDED,dsList
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# register new dataset/container
def registerNewDataset(self,datasetName,backEnd='rucio',location=None,lifetime=None,metaData=None,resurrect=False):
methodName = 'registerNewDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start location={0} lifetime={1}'.format(location,lifetime))
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# lifetime
if lifetime is not None:
lifetime=lifetime*86400
# register
if not datasetName.endswith('/'):
# register dataset
name = dsn
client.add_dataset(scope,name,meta=metaData,lifetime=lifetime,rse=location)
else:
# register container
name = dsn
client.add_container(scope=scope,name=name)
except DataIdentifierAlreadyExists:
pass
except InvalidObject as e:
errMsg = '{} : {}'.format(InvalidObject, str(e))
tmpLog.error(errMsg)
return self.SC_FATAL, '{0} : {1}'.format(methodName, errMsg)
except Exception as e:
errType = e
resurrected = False
# try to resurrect
if 'DELETED_DIDS_PK violated' in str(errType) and resurrect:
try:
client.resurrect([{'scope': scope, 'name': name}])
resurrected = True
except Exception:
pass
if not resurrected:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode,'{0} : {1}'.format(methodName,errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# wrapper for list_content
def wp_list_content(self,client,scope,dsn):
if dsn.endswith('/'):
dsn = dsn[:-1]
retList = []
# get contents
for data in client.list_content(scope,dsn):
if data['type'] == 'CONTAINER':
retList += self.wp_list_content(client,data['scope'],data['name'])
elif data['type'] == 'DATASET':
retList.append('{0}:{1}'.format(data['scope'],data['name']))
else:
pass
return retList
# list datasets in container
def listDatasetsInContainer(self,containerName):
methodName = 'listDatasetsInContainer'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <containerName={0}>'.format(containerName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get rucio
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(containerName)
# get contents
dsList = self.wp_list_content(client,scope,dsn)
tmpLog.debug('got '+str(dsList))
return self.SC_SUCCEEDED,dsList
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# expand Container
def expandContainer(self,containerName):
methodName = 'expandContainer'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <contName={0}>'.format(containerName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
dsList = []
# get real names
tmpS,tmpRealNameList = self.listDatasets(containerName)
if tmpS != self.SC_SUCCEEDED:
tmpLog.error('failed to get real names')
return tmpS,tmpRealNameList
# loop over all names
for tmpRealName in tmpRealNameList:
# container
if tmpRealName.endswith('/'):
# get contents
tmpS,tmpO = self.listDatasetsInContainer(tmpRealName)
if tmpS != self.SC_SUCCEEDED:
tmpLog.error('failed to get datasets in {0}'.format(tmpRealName))
return tmpS,tmpO
else:
tmpO = [tmpRealName]
# collect dataset names
for tmpStr in tmpO:
if tmpStr not in dsList:
dsList.append(tmpStr)
dsList.sort()
# return
tmpLog.debug('got {0}'.format(str(dsList)))
return self.SC_SUCCEEDED,dsList
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# add dataset to container
def addDatasetsToContainer(self,containerName,datasetNames,backEnd='rucio'):
methodName = 'addDatasetsToContainer'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <contName={0}>'.format(containerName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get Rucio API
client = RucioClient()
c_scope,c_name = self.extract_scope(containerName)
if c_name.endswith('/'):
c_name = c_name[:-1]
dsns = []
for ds in datasetNames:
ds_scope, ds_name = self.extract_scope(ds)
dsn = {'scope': ds_scope, 'name': ds_name}
dsns.append(dsn)
try:
# add datasets
client.add_datasets_to_container(scope=c_scope, name=c_name, dsns=dsns)
except DuplicateContent:
# add datasets one by one
for ds in dsns:
try:
client.add_datasets_to_container(scope=c_scope, name=c_name, dsns=[ds])
except DuplicateContent:
pass
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# get latest DBRelease
def getLatestDBRelease(self):
methodName = 'getLatestDBRelease'
methodName += ' pid={0}'.format(self.pid)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('trying to get the latest version number of DBR')
# get ddo datasets
tmpStat,ddoDatasets = self.listDatasets('ddo.*')
if tmpStat != self.SC_SUCCEEDED or ddoDatasets == {}:
tmpLog.error('failed to get a list of DBRelease datasets from DDM')
return self.SC_FAILED,None
# reverse sort to avoid redundant lookup
ddoDatasets.sort()
ddoDatasets.reverse()
# extract version number
latestVerMajor = 0
latestVerMinor = 0
latestVerBuild = 0
latestVerRev = 0
latestDBR = ''
for tmpName in ddoDatasets:
# ignore CDRelease
if ".CDRelease." in tmpName:
continue
# ignore user
if tmpName.startswith('ddo.user'):
continue
# use Atlas.Ideal
if ".Atlas.Ideal." not in tmpName:
continue
match = re.search('\.v(\d+)(_*[^\.]*)$',tmpName)
if match is None:
tmpLog.warning('cannot extract version number from %s' % tmpName)
continue
# ignore special DBRs
if match.group(2) != '':
continue
# get major,minor,build,revision numbers
tmpVerStr = match.group(1)
tmpVerMajor = 0
tmpVerMinor = 0
tmpVerBuild = 0
tmpVerRev = 0
try:
tmpVerMajor = int(tmpVerStr[0:2])
except Exception:
pass
try:
tmpVerMinor = int(tmpVerStr[2:4])
except Exception:
pass
try:
tmpVerBuild = int(tmpVerStr[4:6])
except Exception:
pass
try:
tmpVerRev = int(tmpVerStr[6:])
# use only three digit DBR
continue
except Exception:
pass
# compare
if latestVerMajor > tmpVerMajor:
continue
elif latestVerMajor == tmpVerMajor:
if latestVerMinor > tmpVerMinor:
continue
elif latestVerMinor == tmpVerMinor:
if latestVerBuild > tmpVerBuild:
continue
elif latestVerBuild == tmpVerBuild:
if latestVerRev > tmpVerRev:
continue
# check if well replicated
tmpStat,ddoReplicas = self.listDatasetReplicas(tmpName)
if ddoReplicas == []:
continue
# higher or equal version
latestVerMajor = tmpVerMajor
latestVerMinor = tmpVerMinor
latestVerBuild = tmpVerBuild
latestVerRev = tmpVerRev
latestDBR = tmpName
# failed
if latestDBR == '':
tmpLog.error('failed to get the latest version of DBRelease dataset from DDM')
return self.SC_FAILED,None
tmpLog.debug('use {0}'.format(latestDBR))
return self.SC_SUCCEEDED,latestDBR
# freeze dataset
def freezeDataset(self,datasetName,ignoreUnknown=False):
methodName = 'freezeDataset'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1}'.format(methodName,datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# check metadata to avoid a bug in rucio
if dsn.endswith('/'):
dsn = dsn[:-1]
tmpRet = client.get_metadata(scope,dsn)
# close
client.set_status(scope,dsn,open=False)
except UnsupportedOperation:
pass
except DataIdentifierNotFound as e:
errType = e
if ignoreUnknown:
pass
else:
isOK = False
except Exception as e:
errType = e
isOK = False
if isOK:
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
else:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# finger
def finger(self, dn):
methodName = 'finger'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} userName={1}'.format(methodName, dn)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get rucio API
client = RucioClient()
userInfo = None
x509_user_name = CoreUtils.get_bare_dn(dn)
oidc_user_name = CoreUtils.get_id_from_dn(dn)
if oidc_user_name == x509_user_name:
oidc_user_name = None
else:
x509_user_name = None
for accType in ['USER', 'GROUP']:
if x509_user_name is not None:
userName = x509_user_name
for i in client.list_accounts(account_type=accType, identity=userName):
userInfo = {'nickname':i['account'],
'email':i['email']}
break
if userInfo is None:
# remove /CN=\d
userName = CoreUtils.get_bare_dn(dn, keep_digits=False)
for i in client.list_accounts(account_type=accType, identity=userName):
userInfo = {'nickname':i['account'],
'email':i['email']}
break
else:
userName = oidc_user_name
try:
if userInfo is None:
i = client.get_account(userName)
userInfo = {'nickname': i['account'],
'email': i['email']}
except Exception:
pass
if userInfo is not None:
break
if userInfo is None:
tmpLog.error('failed to get account info')
return self.SC_FAILED,None
tmpRet = userInfo
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done with '+str(tmpRet))
return self.SC_SUCCEEDED,tmpRet
# set dataset metadata
def setDatasetMetadata(self,datasetName,metadataName,metadaValue):
methodName = 'setDatasetMetadata'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1} metadataName={2} metadaValue={3}'.format(methodName,datasetName,
metadataName,metadaValue)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# set
client.set_metadata(scope,dsn,metadataName,metadaValue)
except (UnsupportedOperation,DataIdentifierNotFound):
pass
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# register location
def registerDatasetLocation(self,datasetName,location,lifetime=None,owner=None,backEnd='rucio',
activity=None,grouping=None,weight=None,copies=1,
ignore_availability=True):
methodName = 'registerDatasetLocation'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1} location={2}'.format(methodName,datasetName,location)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# lifetime
if lifetime is not None:
lifetime = lifetime * 86400
elif 'SCRATCHDISK' in location:
lifetime = 14 * 86400
# get owner
if owner is not None:
tmpStat,userInfo = self.finger(owner)
if tmpStat != self.SC_SUCCEEDED:
raise RuntimeError('failed to get nickname for {0}'.format(owner))
owner = userInfo['nickname']
else:
owner = client.account
if grouping is None:
grouping = 'DATASET'
# add rule
dids = []
did = {'scope': scope, 'name': dsn}
dids.append(did)
locList = location.split(',')
for tmpLoc in locList:
client.add_replication_rule(dids=dids,copies=copies,rse_expression=tmpLoc,lifetime=lifetime,
grouping=grouping,account=owner,locked=False,notify='N',
ignore_availability=ignore_availability,activity=activity,
weight=weight)
except DuplicateRule:
pass
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done for owner={}'.format(owner))
return self.SC_SUCCEEDED,True
# delete dataset
def deleteDataset(self,datasetName,emptyOnly,ignoreUnknown=False):
methodName = 'deleteDataset'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1}'.format(methodName,datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
retStr = ''
nFiles = -1
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# get the number of files
if emptyOnly:
nFiles = 0
for x in client.list_files(scope, dsn):
nFiles += 1
# erase
if not emptyOnly or nFiles == 0:
client.set_metadata(scope=scope, name=dsn, key='lifetime', value=0.0001)
retStr = 'deleted {0}'.format(datasetName)
else:
retStr = 'keep {0} where {1} files are available'.format(datasetName,nFiles)
except DataIdentifierNotFound as e:
errType = e
if ignoreUnknown:
pass
else:
isOK = False
except Exception as e:
isOK = False
errType = e
if isOK:
tmpLog.debug('done')
return self.SC_SUCCEEDED,retStr
else:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode,'{0} : {1}'.format(methodName, errMsg)
# register subscription
def registerDatasetSubscription(self,datasetName,location,activity,lifetime=None,
asynchronous=False):
methodName = 'registerDatasetSubscription'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1} location={2} activity={3} asyn={4}'.format(methodName,datasetName,
location,activity,asynchronous)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
try:
if lifetime is not None:
lifetime = lifetime*24*60*60
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
dids = [{'scope': scope, 'name': dsn}]
# check if a replication rule already exists
for rule in client.list_did_rules(scope=scope, name=dsn):
if (rule['rse_expression'] == location) and (rule['account'] == client.account):
return True
client.add_replication_rule(dids=dids,copies=1,rse_expression=location,weight=None,
lifetime=lifetime, grouping='DATASET', account=client.account,
locked=False, notify='N',ignore_availability=True,
activity=activity,asynchronous=asynchronous)
except DuplicateRule:
pass
except DataIdentifierNotFound:
pass
except Exception as e:
isOK = False
errType = e
if not isOK:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# find lost files
def findLostFiles(self, datasetName, fileMap):
methodName = 'findLostFiles'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get replicas
tmpStat,tmpOut = self.listDatasetReplicas(datasetName)
if tmpStat != self.SC_SUCCEEDED:
tmpLog.error('faild to get dataset replicas with {0}'.format(tmpOut))
return tmpStat,tmpOut
# check if complete replica is available
hasCompReplica = False
datasetReplicaMap = tmpOut
for tmpEndPoint in datasetReplicaMap.keys():
if datasetReplicaMap[tmpEndPoint][-1]['found'] is not None and \
datasetReplicaMap[tmpEndPoint][-1]['total'] == datasetReplicaMap[tmpEndPoint][-1]['found']:
hasCompReplica = True
break
# no lost files
if hasCompReplica:
tmpLog.debug('done with no lost files')
return self.SC_SUCCEEDED,{}
# get LFNs and scopes
lfnMap = {}
scopeMap = {}
for tmpGUID in fileMap.keys():
tmpLFN = fileMap[tmpGUID]['lfn']
lfnMap[tmpGUID] = tmpLFN
scopeMap[tmpLFN] = fileMap[tmpGUID]['scope']
# get SURLs
seList = list(datasetReplicaMap.keys())
tmpStat, tmpRetMap = self.jedi_list_replicas_with_dataset(datasetName)
if tmpStat != self.SC_SUCCEEDED:
tmpLog.error('failed to get SURLs with {0}'.format(tmpRetMap))
return tmpStat,tmpRetMap
# look for missing files
lfnMap = {}
for tmpGUID,tmpLFN in iteritems(lfnMap):
if tmpLFN not in tmpRetMap:
lfnMap[tmpGUID] = tmpLFN
tmpLog.debug('done with lost '+','.join(str(tmpLFN) for tmpLFN in lfnMap.values()))
return self.SC_SUCCEEDED,lfnMap
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# convert output of listDatasetReplicas
def convertOutListDatasetReplicas(self, datasetName, usefileLookup=False, use_vp=False,
skip_incomplete_element=False):
retMap = {}
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# get replicas
itr = client.list_dataset_replicas(scope,dsn,deep=usefileLookup)
items = []
for item in itr:
if 'vp' not in item:
item['vp'] = False
items.append(item)
# deep lookup if shallow gave nothing
if items == [] and not usefileLookup:
itr = client.list_dataset_replicas(scope,dsn,deep=True)
for item in itr:
if 'vp' not in item:
item['vp'] = False
items.append(item)
# VP
if use_vp:
itr = client.list_dataset_replicas_vp(scope, dsn)
for item in itr:
if item['vp']:
# add dummy
if "length" not in item:
item["length"] = 1
if "available_length" not in item:
item["available_length"] = 1
if "bytes" not in item:
item["bytes"] = 1
if "available_bytes" not in item:
item["available_bytes"] = 1
if "site" in item and "rse" not in item:
item["rse"] = item["site"]
items.append(item)
for item in items:
rse = item["rse"]
if skip_incomplete_element and (not item["available_length"] or item["length"] != item["available_length"]):
continue
retMap[rse] = [{'total':item["length"],
'found':item["available_length"],
'tsize':item["bytes"],
'asize':item["available_bytes"],
'vp': item["vp"],
'immutable':1}]
return retMap
# delete files from dataset
def deleteFilesFromDataset(self,datasetName,filesToDelete):
methodName = 'deleteFilesFromDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# open dataset
try:
client.set_status(scope,dsn,open=True)
except UnsupportedOperation:
pass
# exec
client.detach_dids(scope=scope, name=dsn, dids=filesToDelete)
except Exception as e:
isOK = False
errType = e
if not isOK:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# extract scope
def extract_scope(self, dsn):
if dsn.endswith('/'):
dsn = re.sub('/$', '', dsn)
if ':' in dsn:
return dsn.split(':')[:2]
scope = dsn.split('.')[0]
if dsn.startswith('user') or dsn.startswith('group'):
scope = ".".join(dsn.split('.')[0:2])
return scope,dsn
# open dataset
def openDataset(self,datasetName):
methodName = 'openDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# open dataset
try:
client.set_status(scope,dsn,open=True)
except (UnsupportedOperation,DataIdentifierNotFound):
pass
except Exception as e:
isOK = False
errType = e
if not isOK:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# update backlist
def updateBlackList(self):
methodName = 'updateBlackList'
methodName += ' pid={0}'.format(self.pid)
tmpLog = MsgWrapper(logger,methodName)
# check freashness
timeNow = datetime.datetime.utcnow()
if self.lastUpdateBL is not None and timeNow-self.lastUpdateBL < self.timeIntervalBL:
return
self.lastUpdateBL = timeNow
# get json
try:
tmpLog.debug('start')
with open('/cvmfs/atlas.cern.ch/repo/sw/local/etc/cric_ddmblacklisting.json') as f:
ddd = json.load(f)
self.blackListEndPoints = \
[k for k in ddd if 'write_wan' in ddd[k] and ddd[k]['write_wan']["status"]["value"] == 'OFF']
tmpLog.debug('{0} endpoints blacklisted'.format(len(self.blackListEndPoints)))
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
return
# check if the endpoint is backlisted
def isBlackListedEP(self,endPoint):
methodName = 'isBlackListedEP'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <endPoint={0}>'.format(endPoint)
tmpLog = MsgWrapper(logger,methodName)
try:
# update BL
self.updateBlackList()
if endPoint in self.blackListEndPoints:
return self.SC_SUCCEEDED,True
except Exception:
pass
return self.SC_SUCCEEDED,False
# get disk usage at RSE
def getRseUsage(self,rse,src='srm'):
methodName = 'getRseUsage'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <rse={0}>'.format(rse)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
retMap = {}
try:
# get rucio API
client = RucioClient()
# get info
itr = client.get_rse_usage(rse)
# look for srm
for item in itr:
if item['source'] == src:
try:
total = item['total']/1024/1024/1024
except Exception:
total = None
try:
used = item['used']/1024/1024/1024
except Exception:
used = None
try:
free = item['free']/1024/1024/1024
except Exception:
free = None
retMap = {'total':total,
'used':used,
'free':free}
break
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done {0}'.format(str(retMap)))
return self.SC_SUCCEEDED,retMap
# update endpoint dict
def updateEndPointDict(self):
methodName = 'updateEndPointDict'
methodName += ' pid={0}'.format(self.pid)
tmpLog = MsgWrapper(logger,methodName)
# check freshness
timeNow = datetime.datetime.utcnow()
if self.lastUpdateEP is not None and timeNow-self.lastUpdateEP < self.timeIntervalEP:
return
self.lastUpdateEP = timeNow
# get json
try:
tmpLog.debug('start')
if hasattr(jedi_config.ddm, 'endpoints_json_path'):
tmp_path = jedi_config.ddm.endpoints_json_path
else:
tmp_path = '/cvmfs/atlas.cern.ch/repo/sw/local/etc/cric_ddmendpoints.json'
if tmp_path.startswith('http'):
ddd = requests.get(tmp_path, verify=False).json()
else:
with open(tmp_path) as f:
ddd = json.load(f)
self.endPointDict = {k: ddd[k] for k in ddd if ddd[k]['state'] == 'ACTIVE'}
tmpLog.debug('got {0} endpoints '.format(len(self.endPointDict)))
except Exception as e:
errStr = 'failed to update EP with {0}'.format(str(e))
tmpLog.error(errStr)
return
# check if the dataset is distributed
def isDistributedDataset(self,datasetName):
methodName = 'isDistributedDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isDDS = None
isOK = True
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# get rules
for rule in client.list_did_rules(scope,dsn):
if rule['grouping'] != 'NONE':
isDDS = False
break
elif isDDS is None:
isDDS = True
# use False when there is no rule
if isDDS is None:
isDDS = False
except Exception as e:
isOK = False
errType = e
if not isOK:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done with {0}'.format(isDDS))
return self.SC_SUCCEEDED,isDDS
# update replication rules
def updateReplicationRules(self,datasetName,dataMap):
methodName = 'updateReplicationRules'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1}'.format(methodName,datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# get rules
for rule in client.list_did_rules(scope=scope, name=dsn):
for dataKey,data in iteritems(dataMap):
if rule['rse_expression'] == dataKey or re.search(dataKey,rule['rse_expression']) is not None:
tmpLog.debug('set data={0} on {1}'.format(str(data),rule['rse_expression']))
client.update_replication_rule(rule['id'],data)
except DataIdentifierNotFound:
pass
except Exception as e:
isOK = False
errType = e
if not isOK:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# get active staging rule
def getActiveStagingRule(self, dataset_name):
methodName = 'getActiveStagingRule'
methodName += ' datasetName={0}'.format(dataset_name)
tmpLog = MsgWrapper(logger, methodName)
tmpLog.debug('start')
ruleID = None
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(dataset_name)
# get rules
for rule in client.list_did_rules(scope=scope, name=dsn):
if rule['activity'] == 'Staging':
ruleID = rule['id']
break
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('got ruleID={0}'.format(ruleID))
return self.SC_SUCCEEDED, ruleID
# check quota
def check_quota(self, userName):
methodName = 'check_quota'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} userName={1}'.format(methodName, userName)
tmpLog = MsgWrapper(logger, methodName)
tmpLog.debug('start')
retVal = True, None
try:
# get rucio API
client = RucioClient()
tmpStat, user_info = self.finger(userName)
if tmpStat != self.SC_SUCCEEDED:
retVal = False, "failed to get nickname"
else:
owner = user_info['nickname']
quota_info = client.get_global_account_usage(owner)
for info in quota_info:
if info['bytes'] >= info['bytes_limit']:
retVal = False, 'exceeded quota on {}'.format(info['rse_expression'])
break
except Exception as e:
errMsg = 'failed to get quota info with {}'.format(str(e))
tmpLog.error(errMsg)
retVal = False, errMsg
tmpLog.debug('done {} {}'.format(*retVal))
return self.SC_SUCCEEDED, retVal
|
{
"content_hash": "302a0320e2a13fdb5c8ba4a7de65a6a7",
"timestamp": "",
"source": "github",
"line_count": 1590,
"max_line_length": 134,
"avg_line_length": 41.99245283018868,
"alnum_prop": 0.5238287802540139,
"repo_name": "PanDAWMS/panda-jedi",
"id": "d4427815763a67453b5f50ebdcb1344017595d2e",
"size": "66768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandajedi/jediddm/AtlasDDMClient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2698"
},
{
"name": "Python",
"bytes": "2118485"
},
{
"name": "Shell",
"bytes": "3029"
}
],
"symlink_target": ""
}
|
import requests
from core.cache.tokencache import TokenCache
class WdAPI(self):
baseURL = 'https://api.vdian.com/api'
_token_cache = TokenCache()
def fetch
def getAccessToken(self):
return _token_cache.get_cache(self._token_cache.KEY_WD_ACCESS_TOKEN)
|
{
"content_hash": "d93b7be0320c842723b128781d95f4b9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 76,
"avg_line_length": 23.916666666666668,
"alnum_prop": 0.6829268292682927,
"repo_name": "Maru-zhang/FilmHub-Tornado",
"id": "73e01f6328cd15cccec475afd07886d615a2b35c",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/server/wdapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "48"
},
{
"name": "Python",
"bytes": "28839"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import socket
import random
import capnp
import calculator_capnp
def read_value(value):
'''Helper function to asynchronously call read() on a Calculator::Value and
return a promise for the result. (In the future, the generated code might
include something like this automatically.)'''
return value.read().then(lambda result: result.value)
def evaluate_impl(expression, params=None):
'''Implementation of CalculatorImpl::evaluate(), also shared by
FunctionImpl::call(). In the latter case, `params` are the parameter
values passed to the function; in the former case, `params` is just an
empty list.'''
which = expression.which()
if which == 'literal':
return capnp.Promise(expression.literal)
elif which == 'previousResult':
return read_value(expression.previousResult)
elif which == 'parameter':
assert expression.parameter < len(params)
return capnp.Promise(params[expression.parameter])
elif which == 'call':
call = expression.call
func = call.function
# Evaluate each parameter.
paramPromises = [evaluate_impl(param, params) for param in call.params]
joinedParams = capnp.join_promises(paramPromises)
# When the parameters are complete, call the function.
ret = (joinedParams
.then(lambda vals: func.call(vals))
.then(lambda result: result.value))
return ret
else:
raise ValueError("Unknown expression type: " + which)
class ValueImpl(calculator_capnp.Calculator.Value.Server):
"Simple implementation of the Calculator.Value Cap'n Proto interface."
def __init__(self, value):
self.value = value
def read(self, **kwargs):
return self.value
class FunctionImpl(calculator_capnp.Calculator.Function.Server):
'''Implementation of the Calculator.Function Cap'n Proto interface, where the
function is defined by a Calculator.Expression.'''
def __init__(self, paramCount, body):
self.paramCount = paramCount
self.body = body.as_builder()
def call(self, params, _context, **kwargs):
'''Note that we're returning a Promise object here, and bypassing the
helper functionality that normally sets the results struct from the
returned object. Instead, we set _context.results directly inside of
another promise'''
assert len(params) == self.paramCount
# using setattr because '=' is not allowed inside of lambdas
return evaluate_impl(self.body, params).then(lambda value: setattr(_context.results, 'value', value))
class OperatorImpl(calculator_capnp.Calculator.Function.Server):
'''Implementation of the Calculator.Function Cap'n Proto interface, wrapping
basic binary arithmetic operators.'''
def __init__(self, op):
self.op = op
def call(self, params, **kwargs):
assert len(params) == 2
op = self.op
if op == 'add':
return params[0] + params[1]
elif op == 'subtract':
return params[0] - params[1]
elif op == 'multiply':
return params[0] * params[1]
elif op == 'divide':
return params[0] / params[1]
else:
raise ValueError('Unknown operator')
class CalculatorImpl(calculator_capnp.Calculator.Server):
"Implementation of the Calculator Cap'n Proto interface."
def evaluate(self, expression, _context, **kwargs):
return evaluate_impl(expression).then(lambda value: setattr(_context.results, 'value', ValueImpl(value)))
def defFunction(self, paramCount, body, _context, **kwargs):
return FunctionImpl(paramCount, body)
def getOperator(self, op, **kwargs):
return OperatorImpl(op)
def parse_args():
parser = argparse.ArgumentParser(usage='''Runs the server bound to the\
given address/port ADDRESS may be '*' to bind to all local addresses.\
:PORT may be omitted to choose a port automatically. ''')
parser.add_argument("address", help="ADDRESS[:PORT]")
return parser.parse_args()
def main():
address = parse_args().address
server = capnp.TwoPartyServer(address, bootstrap=CalculatorImpl())
server.run_forever()
if __name__ == '__main__':
main()
|
{
"content_hash": "909104af4c32fb2b7ccece0090cc7353",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 113,
"avg_line_length": 31.5,
"alnum_prop": 0.6648263170002301,
"repo_name": "zielmicha/capnp.nim",
"id": "a1bf926ab1754e08ec7144c111c61cf6d77c1819",
"size": "4405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/calculator_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cap'n Proto",
"bytes": "102664"
},
{
"name": "Nim",
"bytes": "117777"
},
{
"name": "Shell",
"bytes": "3024"
}
],
"symlink_target": ""
}
|
try:
import pyclamd
HAVE_CLAMD = True
except ImportError:
HAVE_CLAMD = False
from viper.common.abstracts import Module
from viper.core.session import __sessions__
class ClamAV(Module):
cmd = 'clamav'
description = 'Scan file from local ClamAV daemon'
authors = ['neriberto']
def __init__(self):
super(ClamAV, self).__init__()
self.parser.add_argument('-s', '--socket', help='Specify an unix socket (default: Clamd Unix Socket)')
def run(self):
super(ClamAV, self).run()
if self.args is None:
return
if not HAVE_CLAMD:
self.log('error', "Missing dependency, install requests (`pip install pyclamd`)")
return
if not __sessions__.is_set():
self.log('error', 'No open session')
return
daemon = None
socket = None
socket = self.args.socket
try:
if socket is not None:
daemon = pyclamd.ClamdUnixSocket(socket)
self.log('info', 'Using socket {0} to scan'.format(socket))
else:
daemon = pyclamd.ClamdUnixSocket()
socket = 'Clamav'
except Exception as ex:
msg = 'Daemon connection failure, {0}'.format(ex)
self.log('error,', msg)
return
try:
if daemon.ping():
with open(__sessions__.current.file.path, 'r') as fd:
results = daemon.scan_stream(fd.read())
else:
self.log('error', "Unable to connect to the daemon")
except Exception as ex:
msg = 'Unable to scan with antivirus daemon, {0}'.format(ex)
self.log('error', msg)
return
found = None
name = 'not found'
if results:
for item in results:
found = results[item][0]
name = results[item][1]
if found == 'ERROR':
self.log('error', "Check permissions of the binary folder, {0}".format(name))
else:
self.log('info', "Daemon {0} returns: {1}".format(socket, name))
|
{
"content_hash": "3f63f22b60a104c0b6a92ddca26b94f8",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 110,
"avg_line_length": 30.366197183098592,
"alnum_prop": 0.5320037105751392,
"repo_name": "MeteorAdminz/viper",
"id": "0d2859e4a36d3305738516511acbb683745141b1",
"size": "2278",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "viper/modules/clamav.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1306"
},
{
"name": "JavaScript",
"bytes": "9205"
},
{
"name": "Makefile",
"bytes": "436"
},
{
"name": "Python",
"bytes": "1577731"
},
{
"name": "Smarty",
"bytes": "28258"
}
],
"symlink_target": ""
}
|
from parser import parse_prefix
class Limit_Plugin:
""" Plugin to control which users can use which commands.
Saves list of commands and their permissions to limit.cfg
in format "command regexp".
"""
def __init__(self, mib, params=None):
self.mib = mib
self.mib.register_privmsg_cmd('allow', self.allow)
self.mib.register_privmsg_cmd('deny', self.deny)
self.load_lists()
def clean(self):
self.save_lists()
def allow(self, msg):
parsed = self.parse(msg)
if parsed:
cmd, mask = parsed
print 'Adding %s for command %s' % (mask, cmd)
self.mib.add_cmd_permission(cmd, mask)
def deny(self, msg):
parsed = self.parse(msg)
if parsed:
cmd, mask = parsed
print 'Removing %s from command %s' % (mask, cmd)
self.mib.rm_cmd_permission(cmd, mask)
def parse(self, msg):
prefix = parse_prefix(msg.prefix)
postfix = msg.postfix.split()
if len(postfix) != 2:
error_msg = 'Usage: mask command'
self.mib.socket.send('PRIVMSG %s :%s' % (prefix.nick, error_msg))
return None
mask = postfix[0]
cmd = postfix[1]
return (cmd, mask)
def load_lists(self):
try:
f = open('limit.cfg')
except IOError:
return
try:
for line in f:
line = line.split()
if len(line) != 2:
continue # config file syntax error
cmd = line[0]
mask = line[1]
self.mib.add_cmd_permission(cmd, mask, regexpify=False)
finally:
f.close()
def save_lists(self):
try:
f = open('limit.cfg', 'w')
except IOError:
return
try:
for cmd in self.mib.command_masks:
for regexp in self.mib.command_masks[cmd]:
line = '%s %s\n' % (cmd, regexp.pattern)
f.write(line)
finally:
f.close()
def init(mib, params=None):
return Limit_Plugin(mib, params)
|
{
"content_hash": "ca96684c0273dc4b8db9fabb313ee360",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 77,
"avg_line_length": 28.75,
"alnum_prop": 0.5125858123569794,
"repo_name": "aalien/mib",
"id": "05f3cf9d6024091f3e9aecb96c2616be2e261a5d",
"size": "2185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/limit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20848"
}
],
"symlink_target": ""
}
|
def dist(a, b, limit=None):
return BerghelRoach(a).getDistance(b, limit or len(a) + len(b))
# This is a modification of the original Berghel-Roach edit
# distance (based on prior work by Ukkonen) described in
# ACM Transactions on Information Systems, Vol. 14, No. 1,
# January 1996, pages 94-106.
#
# I observed that only O(d) prior computations are required
# to compute edit distance. Rather than keeping all prior
# f(k,p) results in a matrix, we keep only the two "outer edges"
# in the triangular computation pattern that will be used in
# subsequent rounds. We cannot reconstruct the edit path,
# but many applications do not require that; for them, this
# modification uses less space (and empirically, slightly
# less time).
#
# First, some history behind the algorithm necessary to understand
# Berghel-Roach and our modification...
#
# The traditional algorithm for edit distance uses dynamic programming,
# building a matrix of distances for substrings:
# D[i,j] holds the distance for string1[0..i]=>string2[0..j].
# The matrix is initially populated with the trivial values
# D[0,j]=j and D[i,0]=i; and then expanded with the rule:
# <pre>
# D[i,j] = min( D[i-1,j]+1, // insertion
# D[i,j-1]+1, // deletion
# (D[i-1,j-1]
# + (string1[i]==string2[j])
# ? 0 // match
# : 1 // substitution ) )
# </pre>
#
# Ukkonen observed that each diagonal of the matrix must increase
# by either 0 or 1 from row to row. If D[i,j] = p, then the
# matching rule requires that D[i+x,j+x] = p for all x
# where string1[i..i+x) matches string2[j..j+j+x). Ukkonen
# defined a function f(k,p) as the highest row number in which p
# appears on the k-th diagonal (those D[i,j] where k=(i-j), noting
# that k may be negative). The final result of the edit
# distance is the D[n,m] cell, on the (n-m) diagonal; it is
# the value of p for which f(n-m, p) = m. The function f can
# also be computed dynamically, according to a simple recursion:
# <pre>
# f(k,p) {
# contains_p = max(f(k-1,p-1), f(k,p-1)+1, f(k+1,p-1)+1)
# while (string1[contains_p] == string2[contains_p + k])
# contains_p++;
# return contains_p;
# }
# </pre>
# The max() expression finds a row where the k-th diagonal must
# contain p by virtue of an edit from the prior, same, or following
# diagonal (corresponding to an insert, substitute, or delete);
# we need not consider more distant diagonals because row-to-row
# and column-to-column changes are at most +/- 1.
#
# The original Ukkonen algorithm computed f(k,p) roughly as
# follows:
# <pre>
# for (p = 0; ; p++) {
# compute f(k,p) for all valid k
# if (f(n-m, p) == m) return p;
# }
# </pre>
#
# Berghel and Roach observed that many values of f(k,p) are
# computed unnecessarily, and reorganized the computation into
# a just-in-time sequence. In each iteration, we are primarily
# interested in the terminating value f(main,p), where main=(n-m)
# is the main diagonal. To compute that we need f(x,p-1) for
# three values of x: main-1, main, and main+1. Those depend on
# values for p-2, and so forth. We will already have computed
# f(main,p-1) in the prior round, and thus f(main-1,p-2) and
# f(main+1,p-2), and so forth. The only new values we need to compute
# are on the edges: f(main-i,p-i) and f(main+i,p-i). Noting that
# f(k,p) is only meaningful when abs(k) is no greater than p,
# one of the Berghel-Roach reviewers noted that we can compute
# the bounds for i:
# <pre>
# (main+i &le p-i) implies (i ≤ (p-main)/2)
# </pre>
# (where main+i is limited on the positive side) and similarly
# <pre>
# (-(main-i) &le p-i) implies (i ≤ (p+main)/2).
# </pre>
# (where main-i is limited on the negative side).
#
# This reduces the computation sequence to
# <pre>
# for (i = (p-main)/2; i > 0; i--) compute f(main+i,p-i);
# for (i = (p+main)/2; i > 0; i--) compute f(main-i,p-i);
# if (f(main, p) == m) return p;
# </pre>
#
# The original Berghel-Roach algorithm recorded prior values
# of f(k,p) in a matrix, using O(distance^2) space, enabling
# reconstruction of the edit path, but if all we want is the
# edit *distance*, we only need to keep O(distance) prior computations.
#
# The requisite prior k-1, k, and k+1 values are conveniently
# computed in the current round and the two preceding it.
# For example, on the higher-diagonal side, we compute:
# <pre>
# current[i] = f(main+i, p-i)
# </pre>
# We keep the two prior rounds of results, where p was one and two
# smaller. So, from the preceidng round
# <pre>
# last[i] = f(main+i, (p-1)-i)
# </pre>
# and from the prior round, but one position back:
# <pre>
# prior[i-1] = f(main+(i-1), (p-2)-(i-1))
# </pre>
# In the current round, one iteration earlier:
# <pre>
# current[i+1] = f(main+(i+1), p-(i+1))
# </pre>
# Note that the distance in all of these evaluates to p-i-1,
# and the diagonals are (main+i) and its neighbors... just
# what we need. The lower-diagonal side behaves similarly.
#
# We need to materialize values that are not computed in prior
# rounds, for either of two reasons: <ul>
# <li> Initially, we have no prior rounds, so we need to fill
# all of the "last" and "prior" values for use in the
# first round. The first round uses only on one side
# of the main diagonal or the other.
# <li> In every other round, we compute one more diagonal than before.
# </ul>
# In all of these cases, the missing f(k,p) values are for abs(k) > p,
# where a real value of f(k,p) is undefined. [The original Berghel-Roach
# algorithm prefills its F matrix with these values, but we fill
# them as we go, as needed.] We define
# <pre>
# f(-p-1,p) = p, so that we start diagonal -p with row p,
# f(p+1,p) = -1, so that we start diagonal p with row 0.
# </pre>
# (We also allow f(p+2,p)=f(-p-2,p)=-1, causing those values to
# have no effect in the starting row computation.]
#
# We only expand the set of diagonals visited every other round,
# when (p-main) or (p+main) is even. We keep track of even/oddness
# to save some arithmetic. The first round is always even, as p=abs(main).
# Note that we rename the "f" function to "computeRow" to be Googley.
class BerghelRoach(object):
def __init__(self, pattern):
# The "pattern" string against which others are compared.
self.pattern = pattern
# The current and two preceding sets of Ukkonen f(k,p) values for diagonals
# around the main, computed by the main loop of {@code getDistance}. These
# arrays are retained between calls to save allocation costs. (They are all
# initialized to a real array so that we can indiscriminately use length
# when ensuring/resizing.)
self.currentLeft = []
self.currentRight = []
self.lastLeft = []
self.lastRight = []
self.priorLeft = []
self.priorRight = []
def getDistance(self, target, limit):
# Compute the main diagonal number.
# The final result lies on this diagonal.
main = len(self.pattern) - len(target)
# Compute our initial distance candidate.
# The result cannot be less than the difference in
# string lengths, so we start there.
distance = abs(main)
if distance > limit:
# More than we wanted. Give up right away
return distance
# In the main loop below, the current{Right,Left} arrays record results
# from the current outer loop pass. The last{Right,Left} and
# prior{Right,Left} arrays hold the results from the preceding two passes.
# At the end of the outer loop, we shift them around (reusing the prior
# array as the current for the next round, to avoid reallocating).
# The Right reflects higher-numbered diagonals, Left lower-numbered.
# Fill in "prior" values for the first two passes through
# the distance loop. Note that we will execute only one side of
# the main diagonal in these passes, so we only need
# initialize one side of prior values.
if main <= 0:
self.ensureCapacityRight(distance, False)
for j in range(distance):
self.lastRight[j] = distance - j - 1 # Make diagonal -k start in row k
self.priorRight[j] = -1
else:
self.ensureCapacityLeft(distance, False)
for j in range(distance):
self.lastLeft[j] = -1 # Make diagonal +k start in row 0
self.priorLeft[j] = -1
# Keep track of even rounds. Only those rounds consider new diagonals,
# and thus only they require artificial "last" values below.
even = True
# MAIN LOOP: try each successive possible distance until one succeeds.
while True:
# Before calling computeRow(main, distance), we need to fill in
# missing cache elements. See the high-level description above.
# Higher-numbered diagonals
offDiagonal = (distance - main) / 2
self.ensureCapacityRight(offDiagonal, True)
if even:
# Higher diagonals start at row 0
self.lastRight[offDiagonal] = -1
immediateRight = -1
while offDiagonal > 0:
immediateRight = computeRow(
(main + offDiagonal),
(distance - offDiagonal),
self.pattern,
target,
self.priorRight[offDiagonal-1],
self.lastRight[offDiagonal],
immediateRight)
self.currentRight[offDiagonal] = immediateRight
offDiagonal -= 1
# Lower-numbered diagonals
offDiagonal = (distance + main) / 2
self.ensureCapacityLeft(offDiagonal, True)
if even:
# Lower diagonals, fictitious values for f(-x-1,x) = x
self.lastLeft[offDiagonal] = (distance-main)/2 - 1
if even:
immediateLeft = -1
else:
immediateLeft = (distance - main) / 2
while offDiagonal > 0:
immediateLeft = computeRow(
(main - offDiagonal),
(distance - offDiagonal),
self.pattern, target,
immediateLeft,
self.lastLeft[offDiagonal],
self.priorLeft[offDiagonal-1])
self.currentLeft[offDiagonal] = immediateLeft
offDiagonal -= 1
# We are done if the main diagonal has distance in the last row.
mainRow = computeRow(main, distance, self.pattern, target,
immediateLeft, self.lastLeft[0], immediateRight)
if mainRow == len(target):
break
distance += 1
if distance > limit or distance < 0:
break
# The [0] element goes to both sides.
self.currentRight[0] = mainRow
self.currentLeft[0] = mainRow
# Rotate rows around for next round: current=>last=>prior (=>current)
tmp = self.priorLeft
self.priorLeft = self.lastLeft
self.lastLeft = self.currentLeft
self.currentLeft = self.priorLeft
tmp = self.priorRight
self.priorRight = self.lastRight
self.lastRight = self.currentRight
self.currentRight = tmp
# Update evenness, too
even = not even
return distance
def ensureCapacityLeft(self, index, cp):
# Ensures that the Left arrays can be indexed through {@code index},
# inclusively, resizing (and copying) as necessary.
if len(self.currentLeft) <= index:
index += 1
self.priorLeft = resize(self.priorLeft, index, cp)
self.lastLeft = resize(self.lastLeft, index, cp)
self.currentLeft = resize(self.currentLeft, index, False)
def ensureCapacityRight(self, index, cp):
# Ensures that the Right arrays can be indexed through {@code index},
# inclusively, resizing (and copying) as necessary.
if len(self.currentRight) <= index:
index += 1
self.priorRight = resize(self.priorRight, index, cp)
self.lastRight = resize(self.lastRight, index, cp)
self.currentRight = resize(self.currentRight, index, False)
# Resize an array, copying old contents if requested
def resize(array, size, cp):
if cp:
return array + [0] * (size - len(array))
else:
return [0] * size
# Computes the highest row in which the distance {@code p} appears
# in diagonal {@code k} of the edit distance computation for
# strings {@code a} and {@code b}. The diagonal number is
# represented by the difference in the indices for the two strings;
# it can range from {@code -b.length()} through {@code a.length()}.
#
# More precisely, this computes the highest value x such that
# <pre>
# p = edit-distance(a[0:(x+k)), b[0:x)).
# </pre>
#
# This is the "f" function described by Ukkonen.
#
# The caller must assure that abs(k) ≤ p, the only values for
# which this is well-defined.
#
# The implementation depends on the cached results of prior
# computeRow calls for diagonals k-1, k, and k+1 for distance p-1.
# These must be supplied in {@code knownLeft}, {@code knownAbove},
# and {@code knownRight}, respectively.
# @param k diagonal number
# @param p edit distance
# @param a one string to be compared
# @param b other string to be compared
# @param knownLeft value of {@code computeRow(k-1, p-1, ...)}
# @param knownAbove value of {@code computeRow(k, p-1, ...)}
# @param knownRight value of {@code computeRow(k+1, p-1, ...)}
def computeRow(k, p, a, b,
knownLeft, knownAbove, knownRight):
assert abs(k) <= p
assert p >= 0
# Compute our starting point using the recurrance.
# That is, find the first row where the desired edit distance
# appears in our diagonal. This is at least one past
# the highest row for
if p == 0:
t = 0
else:
# We look at the adjacent diagonals for the next lower edit distance.
# We can start in the next row after the prior result from
# our own diagonal (the "substitute" case), or the next diagonal
# ("delete"), but only the same row as the prior result from
# the prior diagonal ("insert").
t = max(max(knownAbove, knownRight)+1, knownLeft)
# Look down our diagonal for matches to find the maximum
# row with edit-distance p.
tmax = min(len(b), len(a)-k)
while t < tmax and b[t] == a[t+k]:
t += 1
return t
|
{
"content_hash": "c079d986eacf600dae303708e6626212",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 86,
"avg_line_length": 41.428969359331475,
"alnum_prop": 0.6230081355476367,
"repo_name": "madhusudancs/test-infra",
"id": "37f6b4822129c4c55e3626a56a2fda6f20dc0299",
"size": "16100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "triage/berghelroach.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7520"
},
{
"name": "Go",
"bytes": "1321132"
},
{
"name": "HTML",
"bytes": "55691"
},
{
"name": "JavaScript",
"bytes": "58924"
},
{
"name": "Makefile",
"bytes": "28574"
},
{
"name": "Nginx",
"bytes": "1532"
},
{
"name": "Protocol Buffer",
"bytes": "4002"
},
{
"name": "Python",
"bytes": "634055"
},
{
"name": "Roff",
"bytes": "10676"
},
{
"name": "Shell",
"bytes": "125766"
}
],
"symlink_target": ""
}
|
import functools
import operator
from chainer.functions.normalization import layer_normalization
from chainer import link
from chainer import variable
class LayerNormalization(link.Link):
"""Layer normalization layer on outputs of linear functions.
.. warning::
This feature is experimental. The interface can change in the future.
This link implements a "layer normalization" layer
which normalizes the input units by statistics
that are computed along the second axis,
scales and shifts them.
Parameter initialization will be deferred until
the first forward data pass at which time the size will be determined.
Args:
size (int): Size of input units. If ``None``, parameter initialization
will be deferred until the first forward data pass at which time
the size will be determined.
eps (float): Epsilon value for numerical stability of normalization.
initial_gamma (~chainer.Initializer): Initializer for scaling vector.
If ``None``, then the vector is filled by 1.
If a scalar, the vector is filled by it.
If ``numpy.ndarray``, the vector is set by it.
initial_beta (~chainer.Initializer): Initializer for shifting vector.
If ``None``, then the vector is filled by 0.
If a scalar, the vector is filled by it.
If ``numpy.ndarray``, the vector is set by it.
Attributes:
gamma (~chainer.Parameter): Scaling parameter.
beta (~chainer.Parameter): Shifting parameter.
~LayerNormalization.eps (float): Epsilon value for numerical stability.
See: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_
"""
def __init__(self, size=None, eps=1e-6, initial_gamma=None,
initial_beta=None):
super(LayerNormalization, self).__init__()
if initial_gamma is None:
initial_gamma = 1
if initial_beta is None:
initial_beta = 0
with self.init_scope():
self.gamma = variable.Parameter(initial_gamma)
self.beta = variable.Parameter(initial_beta)
self.eps = eps
if size is not None:
self._initialize_params(size)
def _initialize_params(self, size):
self.gamma.initialize(size)
self.beta.initialize(size)
def __call__(self, x):
"""Apply layer normalization to given input.
Args:
x (~chainer.Variable): Batch vectors.
Shape of this value must be `(batch_size, unit_size)`,
e.g., the output of :func:`~chainer.functions.linear`.
Returns:
~chainer.Variable: Output of the layer normalization.
"""
if self.gamma.data is None:
in_size = functools.reduce(operator.mul, x.shape[1:], 1)
self._initialize_params(in_size)
return layer_normalization.layer_normalization(
x, self.gamma, self.beta, self.eps)
|
{
"content_hash": "ce3a81998801e7c365d2cc59b67f252f",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 35.75,
"alnum_prop": 0.6356976356976357,
"repo_name": "anaruse/chainer",
"id": "b70dde2f9c8b7975030b5e719006feee5a1569d8",
"size": "3003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainer/links/normalization/layer_normalization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3723858"
}
],
"symlink_target": ""
}
|
def connected_components(self):
"""
Connected components determines groups all the vertices in a particular graph
by whether or not there is path between these vertices. This method returns
a frame with the vertices and their corresponding component
Parameters
----------
:return: (Frame) Frame containing the vertex id's and their components
Examples
--------
>>> vertex_schema = [('id', int)]
>>> edge_schema = [('src', int), ('dst', int)]
>>> vertex_rows = [ [1], [2], [3], [4], [5] ]
>>> edge_rows = [ [1, 2], [1, 3], [2, 3], [4, 5] ]
>>> vertex_frame = tc.frame.create(vertex_rows, vertex_schema)
>>> edge_frame = tc.frame.create(edge_rows, edge_schema)
>>> graph = tc.graph.create(vertex_frame, edge_frame)
>>> result = graph.connected_components()
>>> result.inspect()
[#] id component
==================
[0] 1 1
[1] 2 1
[2] 3 1
[3] 4 4
[4] 5 4
"""
from sparktk.frame.frame import Frame
return Frame(self._tc, self._scala.connectedComponents())
|
{
"content_hash": "e0d49c3d8e1f008945a6720d27257b27",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 81,
"avg_line_length": 31.44736842105263,
"alnum_prop": 0.5188284518828452,
"repo_name": "dmsuehir/spark-tk",
"id": "860dc34e491390258cfa08a8c11e5d45f5a11f10",
"size": "1901",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/sparktk/graph/ops/connected_components.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "11509"
},
{
"name": "Python",
"bytes": "1453671"
},
{
"name": "R",
"bytes": "2242"
},
{
"name": "Scala",
"bytes": "1495386"
},
{
"name": "Shell",
"bytes": "24621"
}
],
"symlink_target": ""
}
|
import pipeCore.common as common
import pipeCore.config as pcConfig
def outputMayaSetVars():
mayaVersion = pcConfig.getProjectMayaVersion(common.CLIENT, common.PROJECT)
mayaExePath = pcConfig.getProjectMayaExePath(common.CLIENT, common.PROJECT)
mayaBatchExePath = pcConfig.getProjectMayaBatchExePath(common.CLIENT, common.PROJECT)
mayaSetEnvPath = pcConfig.getProjectMayaSetEnvPath(common.CLIENT, common.PROJECT)
mayaPrefsDir = pcConfig.getProjectMayaPrefsDir(common.CLIENT, common.PROJECT)
mayaUserPrefsDir = pcConfig.getProjectMayaUserPrefsDir(common.CLIENT, common.PROJECT)
mayaBatchPrefsDir = pcConfig.getProjectMayaBatchPrefsDir(common.CLIENT, common.PROJECT)
result = 'set MAYA_VERSION=%s' % mayaVersion
result += '&&set MAYA_EXE=%s' % mayaExePath
result += '&&set MAYA_BATCH_EXE=%s' % mayaBatchExePath
result += '&&set MAYA_SETENV=%s' % mayaSetEnvPath
result += '&&set MAYA_PREFS=%s' % mayaPrefsDir
result += '&&set MAYA_USER_PREFS=%s' % mayaUserPrefsDir
result += '&&set MAYA_BATCH_PREFS=%s' % mayaBatchPrefsDir
return result
if __name__ == "__main__":
print outputMayaSetVars()
|
{
"content_hash": "816947db20fe06dc7a4cbe71e080c44a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 88,
"avg_line_length": 46.125,
"alnum_prop": 0.7768744354110207,
"repo_name": "wgergely/After-Effects",
"id": "045cead16c9218fc62b1a97be4f7d17042d74f30",
"size": "1107",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Maya/PipeUtility/ref/pipeCore/mayaInitMain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "505980"
}
],
"symlink_target": ""
}
|
"""A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
from . import machinery #fix me brython
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
sys.modules['importlib._bootstrap'] = _bootstrap
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Find the loader for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__loader__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable loader with the
value of 'path' given to the finders. None is returned if no loader could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct loader.
"""
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
return _bootstrap._find_module(name, path)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
#need at least one import hook for importlib stuff to work.
import basehook
sys.meta_path.append(basehook.BaseHook())
|
{
"content_hash": "a5be49e61cf9f8caea58135366ef52a3",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 80,
"avg_line_length": 35.79381443298969,
"alnum_prop": 0.6661866359447005,
"repo_name": "kevinmel2000/brython",
"id": "12c2c8751f6157399fe26d7ccd5dda9a129b9481",
"size": "3472",
"binary": false,
"copies": "610",
"ref": "refs/heads/master",
"path": "www/src/Lib/importlib/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "15757"
},
{
"name": "Groff",
"bytes": "21080"
},
{
"name": "HTML",
"bytes": "4915132"
},
{
"name": "JavaScript",
"bytes": "1273016"
},
{
"name": "Makefile",
"bytes": "61"
},
{
"name": "Python",
"bytes": "14074023"
},
{
"name": "R",
"bytes": "2918"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import numpy as np
import code
from imagernn.utils import merge_init_structs, initw, accumNpDicts
from imagernn.lstm_generator import LSTMGenerator
from imagernn.rnn_generator import RNNGenerator
def decodeGenerator(generator):
if generator == 'lstm':
return LSTMGenerator
if generator == 'rnn':
return RNNGenerator
else:
raise Exception('generator %s is not yet supported' % (base_generator_str,))
class GenericBatchGenerator:
"""
Base batch generator class.
This class is aware of the fact that we are generating
sentences from images.
"""
@staticmethod
def init(params, misc):
# inputs
image_encoding_size = params.get('image_encoding_size', 128)
word_encoding_size = params.get('word_encoding_size', 128)
hidden_size = params.get('hidden_size', 128)
generator = params.get('generator', 'lstm')
vocabulary_size = len(misc['wordtoix'])
output_size = len(misc['ixtoword']) # these should match though
image_size = 4096 # size of CNN vectors hardcoded here
if generator == 'lstm':
assert image_encoding_size == word_encoding_size, 'this implementation does not support different sizes for these parameters'
# initialize the encoder models
model = {}
model['We'] = initw(image_size, image_encoding_size) # image encoder
model['be'] = np.zeros((1,image_encoding_size))
model['Ws'] = initw(vocabulary_size, word_encoding_size) # word encoder
update = ['We', 'be', 'Ws']
regularize = ['We', 'Ws']
init_struct = { 'model' : model, 'update' : update, 'regularize' : regularize}
# descend into the specific Generator and initialize it
Generator = decodeGenerator(generator)
generator_init_struct = Generator.init(word_encoding_size, hidden_size, output_size)
merge_init_structs(init_struct, generator_init_struct)
return init_struct
@staticmethod
def forward(batch, model, params, misc, predict_mode = False):
""" iterates over items in the batch and calls generators on them """
# we do the encoding here across all images/words in batch in single matrix
# multiplies to gain efficiency. The RNNs are then called individually
# in for loop on per-image-sentence pair and all they are concerned about is
# taking single matrix of vectors and doing the forward/backward pass without
# knowing anything about images, sentences or anything of that sort.
# encode all images
# concatenate as rows. If N is number of image-sentence pairs,
# F will be N x image_size
F = np.row_stack(x['image']['feat'] for x in batch)
We = model['We']
be = model['be']
Xe = F.dot(We) + be # Xe becomes N x image_encoding_size
# decode the generator we wish to use
generator_str = params.get('generator', 'lstm')
Generator = decodeGenerator(generator_str)
# encode all words in all sentences (which exist in our vocab)
wordtoix = misc['wordtoix']
Ws = model['Ws']
gen_caches = []
Ys = [] # outputs
for i,x in enumerate(batch):
# take all words in this sentence and pluck out their word vectors
# from Ws. Then arrange them in a single matrix Xs
# Note that we are setting the start token as first vector
# and then all the words afterwards. And start token is the first row of Ws
ix = [0] + [ wordtoix[w] for w in x['sentence']['tokens'] if w in wordtoix ]
Xs = np.row_stack( [Ws[j, :] for j in ix] )
Xi = Xe[i,:]
# forward prop through the RNN
gen_Y, gen_cache = Generator.forward(Xi, Xs, model, params, predict_mode = predict_mode)
gen_caches.append((ix, gen_cache))
Ys.append(gen_Y)
# back up information we need for efficient backprop
cache = {}
if not predict_mode:
# ok we need cache as well because we'll do backward pass
cache['gen_caches'] = gen_caches
cache['Xe'] = Xe
cache['Ws_shape'] = Ws.shape
cache['F'] = F
cache['generator_str'] = generator_str
return Ys, cache
@staticmethod
def backward(dY, cache):
Xe = cache['Xe']
generator_str = cache['generator_str']
dWs = np.zeros(cache['Ws_shape'])
gen_caches = cache['gen_caches']
F = cache['F']
dXe = np.zeros(Xe.shape)
Generator = decodeGenerator(generator_str)
# backprop each item in the batch
grads = {}
for i in xrange(len(gen_caches)):
ix, gen_cache = gen_caches[i] # unpack
local_grads = Generator.backward(dY[i], gen_cache)
dXs = local_grads['dXs'] # intercept the gradients wrt Xi and Xs
del local_grads['dXs']
dXi = local_grads['dXi']
del local_grads['dXi']
accumNpDicts(grads, local_grads) # add up the gradients wrt model parameters
# now backprop from dXs to the image vector and word vectors
dXe[i,:] += dXi # image vector
for n,j in enumerate(ix): # and now all the other words
dWs[j,:] += dXs[n,:]
# finally backprop into the image encoder
dWe = F.transpose().dot(dXe)
dbe = np.sum(dXe, axis=0, keepdims = True)
accumNpDicts(grads, { 'We':dWe, 'be':dbe, 'Ws':dWs })
return grads
@staticmethod
def predict(batch, model, params, **kwparams):
""" some code duplication here with forward pass, but I think we want the freedom in future """
F = np.row_stack(x['image']['feat'] for x in batch)
We = model['We']
be = model['be']
Xe = F.dot(We) + be # Xe becomes N x image_encoding_size
generator_str = params['generator']
Generator = decodeGenerator(generator_str)
Ys = []
for i,x in enumerate(batch):
gen_Y = Generator.predict(Xe[i, :], model, model['Ws'], params, **kwparams)
Ys.append(gen_Y)
return Ys
|
{
"content_hash": "13d5b14ac71d41192d173e3bf6235428",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 131,
"avg_line_length": 37.26143790849673,
"alnum_prop": 0.6576039291352395,
"repo_name": "sethuiyer/mlhub",
"id": "0dc1e6251bff513ce35e6663ab635ebcf0b388c8",
"size": "5701",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Neural Image Caption Generator/imagernn/generic_batch_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "4070"
},
{
"name": "CSS",
"bytes": "136"
},
{
"name": "HTML",
"bytes": "4284"
},
{
"name": "JavaScript",
"bytes": "16740"
},
{
"name": "Jupyter Notebook",
"bytes": "866288"
},
{
"name": "Python",
"bytes": "104996"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
}
|
"""
Build a half-adder quantum circuit that takes two bits as input,
encodes them into qubits, then runs the half-adder circuit calculating
the sum and carry qubits, observed over 1000 runs of the experiment
.
References:
https://en.wikipedia.org/wiki/Adder_(electronics)
https://qiskit.org/textbook/ch-states/atoms-computation.html#4.2-Remembering-how-to-add-
"""
import qiskit as q
def half_adder(bit0: int, bit1: int) -> q.result.counts.Counts:
"""
>>> half_adder(0, 0)
{'00': 1000}
>>> half_adder(0, 1)
{'01': 1000}
>>> half_adder(1, 0)
{'01': 1000}
>>> half_adder(1, 1)
{'10': 1000}
"""
# Use Aer's qasm_simulator
simulator = q.Aer.get_backend("qasm_simulator")
qc_ha = q.QuantumCircuit(4, 2)
# encode inputs in qubits 0 and 1
if bit0 == 1:
qc_ha.x(0)
if bit1 == 1:
qc_ha.x(1)
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0, 2)
qc_ha.cx(1, 2)
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0, 1, 3)
qc_ha.barrier()
# extract outputs
qc_ha.measure(2, 0) # extract XOR value
qc_ha.measure(3, 1) # extract AND value
# Execute the circuit on the qasm simulator
job = q.execute(qc_ha, simulator, shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(qc_ha)
if __name__ == "__main__":
counts = half_adder(1, 1)
print(f"Half Adder Output Qubit Counts: {counts}")
|
{
"content_hash": "3b9dc19b95b1586328b174f422f5d317",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 88,
"avg_line_length": 26.275862068965516,
"alnum_prop": 0.6253280839895013,
"repo_name": "wuweilin/python",
"id": "4af704e640be4ba3693aba377d7db6ac498f1931",
"size": "1547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/half_adder.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import sys
import socket
import paramiko
import ConfigParser
from time import sleep
#=================================
# Class: PySSH
#=================================
class pySSH(object):
def __init__ (self):
self.ssh = None
self.transport = None
self.ftp = None
def disconnect (self):
if self.transport is not None:
self.transport.close()
if self.ssh is not None:
self.ssh.close()
def connect(self,hostname,username,password,port=22):
self.hostname = hostname
self.username = username
self.password = password
self.ssh = paramiko.SSHClient()
#Don't use host key auto add policy for production servers
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.load_system_host_keys()
try:
self.ssh.connect(hostname,port,username,password)
self.transport=self.ssh.get_transport()
except (socket.error,paramiko.AuthenticationException) as message:
print "ERROR: SSH connection to "+self.hostname+" failed: " +str(message)
sys.exit(1)
return self.transport is not None
def execute(self,cmd,sudoenabled=False):
if sudoenabled:
fullcmd="echo " + self.password + " | sudo -S -p '' " + cmd
else:
fullcmd=cmd
if self.transport is None:
return "ERROR: connection was not established"
session=self.transport.open_session()
session.set_combine_stderr(True)
#print "running command: "+fullcmd
if sudoenabled:
session.get_pty()
session.exec_command(fullcmd)
stdout = session.makefile('rb', -1)
#print stdout.readlines()
#print stdout.read()
output=stdout.read()
#print output+ "Here"*10
session.close()
return output
def _open(self):
"""open a scp channel"""
if self.channel is None:
self.channel = self.transport.open_session()
def scp_put_file(self, local_file, remote_file):
"""
Copies a file from local to remote using FTP
:param remote_file: Path to remote file
:param local_file: Path to local file
:return: Returns boolean to show whether transfer succeeded
"""
if self.ftp is None:
self.ftp = self.ssh.open_sftp()
self.ftp.put(local_file, remote_file)
return True
if __name__ == '__main__':
pass
|
{
"content_hash": "e065581e3cb9678aac54f74fe2b942e3",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 85,
"avg_line_length": 33.21052631578947,
"alnum_prop": 0.5804278922345484,
"repo_name": "satroutr/pod-cloud",
"id": "629cf99b86f68db4dfaaedb96b9aa0cd0b64f3f3",
"size": "2546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pySSH.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "49"
},
{
"name": "HTML",
"bytes": "23602"
},
{
"name": "JavaScript",
"bytes": "968"
},
{
"name": "Python",
"bytes": "27492"
}
],
"symlink_target": ""
}
|
from InputManager import *
from CharMaps import STANDARD_ALLOWED_KEYCODES as CASE_INSENSITIVE_KEYCODE_MAP
|
{
"content_hash": "f4c5ea867de11592ca904369b81e9ffd",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 78,
"avg_line_length": 27.5,
"alnum_prop": 0.8181818181818182,
"repo_name": "tectronics/enso",
"id": "fcdf92510862d61c9b55c76a1b2e08b0bd5e9ac7",
"size": "165",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "enso/platform/win32/input/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4635533"
},
{
"name": "C++",
"bytes": "401064"
},
{
"name": "Objective-C",
"bytes": "11847"
},
{
"name": "Python",
"bytes": "593013"
}
],
"symlink_target": ""
}
|
"""Handlers for custom HTML tags."""
__author__ = 'John Orr (jorr@google.com)'
import inspect
import logging
import mimetypes
import os
import pkgutil
from xml.etree import cElementTree
import html5lib
import safe_dom
import webapp2
import appengine_config
from common import schema_fields
from extensions import tags
from models import config
CAN_USE_DYNAMIC_TAGS = config.ConfigProperty(
'gcb_can_use_dynamic_tags', bool, safe_dom.Text(
'Whether lesson content can make use of custom HTML tags such as '
'<gcb-youtube videoid="...">. If this is enabled some legacy content '
'may be rendered differently. '),
default_value=True)
DUPLICATE_INSTANCE_ID_MESSAGE = (
'Error processing custom HTML tag: duplicate tag id')
INVALID_HTML_TAG_MESSAGE = 'Invalid HTML tag'
class BaseTag(object):
"""Base class for the custom HTML tags."""
@classmethod
def name(cls):
return cls.__name__
@classmethod
def vendor(cls):
return cls.__module__
@classmethod
def required_modules(cls):
"""Lists the inputEx modules required by the editor."""
return []
def render(self, node, handler): # pylint: disable=W0613
"""Receive a node and return a node.
Args:
node: cElementTree.Element. The DOM node for the tag which should be
rendered.
handler: controllers.utils.BaseHandler. The server runtime.
Returns:
A cElementTree.Element holding the rendered DOM.
"""
return cElementTree.XML('<div>[Unimplemented custom tag]</div>')
def get_icon_url(self):
"""Return the URL for the icon to be displayed in the rich text editor.
Images should be placed in a folder called 'resources' inside the main
package for the tag definitions.
Returns:
the URL for the icon to be displayed in the editor.
"""
return """
data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs
4c6QAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB90EGgAIFHpT6h
8AAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAC30lEQVRo3u1ZP2sqQRCfVVGUXC
FqoZAmbSBYxFikMojBD2ErkgdC/AxpAn4A2wRMKptgCrWwSApBEG2DCidcI0gIxogXnXnFI5I87y6Jd6
seOHDN7LL7+83u/Nk5hoh/wMTCEJHMTMDGGDMzfrCAyWVL4DdCZLy72YwCxhgDIoKXlxcQRREeHx9BFE
WYTqfg9XohGAxCKBSCnZ0dcDqdhlrFEKlWq8QYIwD49ovFYjQajYiICBF17auLACLSbDaj3d3dObizsz
Nqt9v09PRE8Xhck0gul9NtONADnojI7XbPAXW73YV55XJZk8TFxcX6TuDk5GQORBAE1StxeXmpSaJery
99lWBZ69dqtQUgpVJJcW6/39cksL+/v/oTiEajC0DsdjvNZjPF+Q6HQ5PEsrJ0Huj1egs6WZbh+flZcX
4kEtFcr1KprDaRybKsqL++vlbU+/1+zfVEUVwtAZ/Pp6h/f39X1COi5nqBQGC1iaxUKine5eFwqDg/Fo
tx8QFdYfTm5uYLiPv7e0JExZD4OV/8/+3t7a0vkcmyTJIk0Xg8Vs0Dr6+vmta/vb1dbR74rTw8PKiCPz
09XV8m/qmEQiFF8IeHh7oLOq4EEJGazaam5ddajf5ElKJPNps1BDxXAohIjUbjC3CPx0OTycTQfbiewO
f3QDKZ5LIHVwIf4PP5vGFXZmUErq6uCAAok8lw9TFuBFKp1LxE4GF53eX0d10KSZLg+Pj4X/+SY/ePCw
HGGIzHYzg6OuLfG+W18MHBAYTDYf7daeLRLtv2RrcE9DdvC4UC5PN5mE6n3DvGhtU+RETn5+cLxVsikT
BHIru7u1N9uKTTaS4EDItCiAhWq1V13OVywWg02lwfGA6HmuNvb2+b7cQWi8XcUUgQBPB6varjWmMbE0
Y7nY5q4VYsFs0RRvv9PgmCMI8+VquVWq0WtzBqaC308bMPAGAwGAAiqvZQt8XcthbaELGZ/AbBX0kdVa
SPB+uxAAAAAElFTkSuQmCC
"""
def get_schema(self, unused_handler):
"""Return the list of fields which will be displayed in the editor.
This method assembles the list of fields which will be displayed in
the rich text editor when a user double-clicks on the icon for the tag.
The fields are a list of SchemaField objects in a FieldRegistry
container. Each SchemaField has the actual attribute name as used in
the tag, the display name for the form, and the type (usually
string).
Returns:
the list of fields to be displayed in the editor.
"""
reg = schema_fields.FieldRegistry('Unimplemented Custom Tag')
return reg
def unavailable_schema(self, message):
"""Utility to generate a schema for a "not available" message."""
reg = schema_fields.FieldRegistry(self.name())
reg.add_property(
schema_fields.SchemaField(
'unused_id', '', 'string', optional=True,
editable=False, extra_schema_dict_values={
'value': message,
'visu': {
'visuType': 'funcName',
'funcName': 'disableSave'}}))
return reg
class ContextAwareTag(BaseTag):
"""A tag which shares a context with other tags of the same type."""
class Context(object):
"""Carries the environment and other data used by the tag."""
def __init__(self, handler, env):
"""Initialize the context.
Args:
handler: controllers.utils.BaseHandler. The server runtime.
env: dict. A dict of values shared shared between instances of
the tag on the same page. Values stored in this dict will be
available to subsequent calls to render() on the same page,
and to the call to rollup_header_footer() made at the end of
the page. Use this to store things like JS library refs
which can be de-dup'd and put in the header or footer.
"""
self.handler = handler
self.env = env
def render(self, node, context): # pylint: disable=W0613
"""Receive a node and return a node.
Args:
node: cElementTree.Element. The DOM node for the tag which should be
rendered.
context: Context. The context shared between instances of the tag.
Returns:
A cElementTree.Element holding the rendered DOM.
"""
return super(ContextAwareTag, self).render(node, context.handler)
def rollup_header_footer(self, context):
"""Roll up header and footer from data stored in the tag environment.
This method is called once at the end of page processing. It receives
the context object, which has been passed to all rendering methods for
this tag on the page, and which accumulates data stored by the
renderers.
Args:
context: Context. Holds data set in an environment dict by previous
calls to render, containing, e.g., URLs of CSS or JS resources.
Returns:
A pair of cElementTree.Element's (header, footer).
"""
pass
class ResourcesHandler(webapp2.RequestHandler):
"""Content handler for resources associated with custom tags."""
def rebase_path(self, path):
"""Override this method to rebase the path to a different root."""
return path
def transform_resource(self, resource_str):
"""Override this method to apply a transforation to the resource."""
return resource_str
def get(self):
"""Respond to HTTP GET methods."""
path = self.rebase_path(self.request.path)
if path.startswith('/'):
path = path[1:]
path = os.path.normpath(path)
resource_file = os.path.join(appengine_config.BUNDLE_ROOT, path)
mimetype = mimetypes.guess_type(resource_file)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
try:
self.response.status = 200
self.response.headers['Content-Type'] = mimetype
self.response.cache_control.no_cache = None
self.response.cache_control.public = 'public'
self.response.cache_control.max_age = 600
stream = open(resource_file)
self.response.write(self.transform_resource(stream.read()))
except IOError:
self.error(404)
class JQueryHandler(ResourcesHandler):
"""A content handler which serves jQuery scripts wrapped in $.ready()."""
def transform_resource(self, resource_str):
return '$(function() {%s});' % resource_str
class EditorBlacklists(object):
"""Lists tags which should not be supported by various editors."""
COURSE_SCOPE = set()
ASSESSMENT_SCOPE = set()
DESCRIPTIVE_SCOPE = set()
@classmethod
def register(cls, tag_name, editor_set):
editor_set.add(tag_name)
@classmethod
def unregister(cls, tag_name, editor_set):
if tag_name in editor_set:
editor_set.remove(tag_name)
class Registry(object):
"""A class that holds all dynamically registered tags."""
_bindings = {}
@classmethod
def add_tag_binding(cls, tag_name, clazz):
"""Registers a tag name to class binding."""
cls._bindings[tag_name] = clazz
@classmethod
def remove_tag_binding(cls, tag_name):
"""Unregisters a tag binding."""
if tag_name in cls._bindings:
del cls._bindings[tag_name]
@classmethod
def get_all_tags(cls):
return dict(cls._bindings.items())
def get_tag_bindings():
"""Return the bindings of tag names to implementing classes.
Tag bindings work by looking for classes which extend BaseTag and which
belong to packages inside extensions/tags. The tag name is then composed
from the package name and the class name, after lower-casing and separated
with a dash. E.g., the class
extensions.tags.gcb.YouTube
is bound to the tag name gcb-youtube.
Returns:
the bindings of tag names to implementing classes.
"""
bindings = {}
for loader, name, ispkg in pkgutil.walk_packages(tags.__path__):
if ispkg:
mod = loader.find_module(name).load_module(name)
for name, clazz in inspect.getmembers(mod, inspect.isclass):
if issubclass(clazz, BaseTag):
tag_name = ('%s-%s' % (mod.__name__, name)).lower()
bindings[tag_name] = clazz
return dict(bindings.items() + Registry.get_all_tags().items())
def html_string_to_element_tree(html_string):
parser = html5lib.HTMLParser(
tree=html5lib.treebuilders.getTreeBuilder('etree', cElementTree),
namespaceHTMLElements=False)
return parser.parseFragment('<div>%s</div>' % html_string)[0]
def html_to_safe_dom(html_string, handler):
"""Render HTML text as a tree of safe_dom elements."""
tag_bindings = get_tag_bindings()
node_list = safe_dom.NodeList()
if not html_string:
return node_list
# Set of all instance id's used in this dom tree, used to detect duplication
used_instance_ids = set([])
# A dictionary of environments, one for each tag type which appears in the
# page
tag_contexts = {}
def _generate_error_message_node_list(elt, error_message):
"""Generates a node_list representing an error message."""
logging.error(
'[%s, %s]: %s.', elt.tag, dict(**elt.attrib), error_message)
node_list = safe_dom.NodeList()
node_list.append(safe_dom.Element(
'span', className='gcb-error-tag'
).add_text(error_message))
if elt.tail:
node_list.append(safe_dom.Text(elt.tail))
return node_list
def _process_html_tree(elt):
"""Recursively parses an HTML tree into a safe_dom.NodeList()."""
# Return immediately with an error message if a duplicate instanceid is
# detected.
if 'instanceid' in elt.attrib:
if elt.attrib['instanceid'] in used_instance_ids:
return _generate_error_message_node_list(
elt, DUPLICATE_INSTANCE_ID_MESSAGE)
used_instance_ids.add(elt.attrib['instanceid'])
# Otherwise, attempt to parse this tag and all its child tags.
original_elt = elt
try:
if elt.tag in tag_bindings:
tag = tag_bindings[elt.tag]()
if isinstance(tag, ContextAwareTag):
# Get or initialize a environment dict for this type of tag.
# Each tag type gets a separate environment shared by all
# instances of that tag.
context = tag_contexts.get(elt.tag)
if context is None:
context = ContextAwareTag.Context(handler, {})
tag_contexts[elt.tag] = context
# Render the tag
elt = tag.render(elt, context)
else:
# Render the tag
elt = tag.render(elt, handler)
if elt.tag == cElementTree.Comment:
out_elt = safe_dom.Comment()
elif elt.tag.lower() == 'script':
out_elt = safe_dom.ScriptElement()
else:
out_elt = safe_dom.Element(elt.tag)
out_elt.add_attribute(**elt.attrib)
if elt.text:
out_elt.add_text(elt.text)
for child in elt:
out_elt.add_children(
_process_html_tree(child))
node_list = safe_dom.NodeList()
node_list.append(out_elt)
if original_elt.tail:
node_list.append(safe_dom.Text(original_elt.tail))
return node_list
except Exception as e: # pylint: disable-msg=broad-except
logging.exception('Error handling tag: %s', elt.tag)
return _generate_error_message_node_list(
original_elt, '%s: %s' % (INVALID_HTML_TAG_MESSAGE, e))
root = html_string_to_element_tree(html_string)
if root.text:
node_list.append(safe_dom.Text(root.text))
for child_elt in root:
node_list.append(_process_html_tree(child_elt))
# After the page is processed, rollup any global header/footer data which
# the environment-aware tags have accumulated in their env's
for tag_name, context in tag_contexts.items():
header, footer = tag_bindings[tag_name]().rollup_header_footer(context)
node_list.insert(0, _process_html_tree(header))
node_list.append(_process_html_tree(footer))
return node_list
def get_components_from_html(html):
"""Returns a list of dicts representing the components in a lesson.
Args:
html: a block of html that may contain some HTML tags representing
custom components.
Returns:
A list of dicts. Each dict represents one component and has two
keys:
- instanceid: the instance id of the component
- cpt_name: the name of the component tag (e.g. gcb-googlegroup)
"""
parser = html5lib.HTMLParser(
tree=html5lib.treebuilders.getTreeBuilder('etree', cElementTree),
namespaceHTMLElements=False)
content = parser.parseFragment('<div>%s</div>' % html)[0]
components = []
for component in content.findall('.//*[@instanceid]'):
component_dict = {'cpt_name': component.tag}
component_dict.update(component.attrib)
components.append(component_dict)
return components
|
{
"content_hash": "6fee366a7d8b08af1b630a06395f5717",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 80,
"avg_line_length": 36.21739130434783,
"alnum_prop": 0.6402561024409764,
"repo_name": "wavemind/mlgcb",
"id": "02ca8a1b31826a4b8339c29c62689b7947b014ab",
"size": "15592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/tags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31927"
},
{
"name": "JavaScript",
"bytes": "329372"
},
{
"name": "Python",
"bytes": "2274187"
},
{
"name": "Shell",
"bytes": "15633"
}
],
"symlink_target": ""
}
|
"""
Very simple module to solve linear equation systems in finite fields.
Author: Ivo Kubjas
BSD 2-Clause License
"""
import numpy as np
from gmpy import mpz, invert
import pdb # XXX: Delete when done
from math import floor
def solve_bottom(mat, base):
localmat = mat.copy()
n, m = mat.shape
for i in range(n-1):
localmat[i] = inverse_array(localmat[i], base, i)
for j in range(i+1,n):
localmat[j] += -localmat[j][i] * localmat[i]
return mod_matrix(localmat, base)
def solve_top(mat, base):
localmat = mat.copy()
n, m = mat.shape
for i in range(1,n)[::-1]:
localmat[i] = inverse_array(localmat[i], base, i)
for j in range(i)[::-1]:
localmat[j] += -localmat[j][i] * localmat[i]
return mod_matrix(localmat, base)
def solve(mat, base):
localmat = mat.copy()
localmat = solve_bottom(localmat, base)
localmat = solve_top(localmat, base)
return localmat
def mod_array(arr, base):
return arr % base
def mod_matrix(mat, base):
return mat % base
def inverse_array(arr, base, pos = 0):
inverse = invert(arr[pos], base)
return mod_array(arr * inverse, base)
def create_array(arr):
mpzarr = [mpz(i) for i in arr]
a = np.array(mpzarr)
return a
def create_mat(mat):
npmat = np.array([create_array(vec) for vec in mat])
return npmat
def create_equations(evpoints, values, d1, d2, base):
mat = []
for (ev, val) in zip(evpoints, values):
vec = []
for i in range(d1 - 1, -1, -1):
vec.append(pow(ev, i, base))
for i in range(d2 - 1, -1, -1):
vec.append(-val * pow(ev, i, base))
vec.append(val * pow(ev, d2, base) - pow(ev,d1, base))
mat.append(vec)
return mod_matrix(create_mat(mat), base)
def indep_solutions(solved, base):
fil = lambda x: x != 0
## TODO: Consider if solution is not sigular and how to compute all
## solutions
independent = np.array([0] * solved.shape[1])
dependent = np.array([0] * solved.shape[1])
for row in solved:
dependent |= map(fil, row) & independent
independent |= map(fil, row)
dependent[-1] = 0 # answer may be dependent, we dont care
return dependent, independent
def rat_poly_sing(mat, indep, d1, d2, base, coeff=False):
## XXX: Only for singular solutions for now
coefficients = [0] * (d1 + d2)
try:
coefficients[list(indep).index(1)] = mpz(1)
except ValueError:
pass
for (i, v) in enumerate(indep[:-1]):
if v == 0:
coefficients[i] = (mat[i][-1] - mat[i][list(indep).index(1)]) % base
coeff_n = list(enumerate(coefficients[d1-1::-1]))
coeff_d = list(enumerate(coefficients[:d1-1:-1]))
if coeff:
return coeff_n, coeff_d
f = lambda x: (pow(x, d1, base) + sum([pow(x, a, base) * b for (a,b) in coeff_n])) % base
g = lambda x: (pow(x, d2, base) + sum([pow(x, a, base) * b for (a,b) in coeff_d])) % base
return f,g
def evaluate(hostset, points, base):
values = []
char_coef = np.poly(hostset)
for point in points:
evaluated = np.polyval(char_coef, point) % base
values.append(mpz(int(evaluated)))
return values
def divide(set1, set2, base):
values = []
for (v1, v2) in zip(set1, set2):
values.append(v1 * v2.invert(base) % base)
return values
def poly_bounds(set1, set2, m):
delta = len(set1) - len(set2)
d1 = int(floor((m+delta)/2.0))
d2 = int(floor((m-delta)/2.0))
return d1,d2
def test(fn1, fn2, base):
## _very_ naive. still working on root finding
sol1 = []
sol2 = []
for i in range(base):
if fn1(i) == 0:
sol1.append(i)
for i in range(base):
if fn2(i) == 0:
if i in sol1:
sol1.remove(i)
else:
sol2.append(i)
return sol1, sol2
def test_data():
evpoints = [-1, -2, -3, -4, -5]
values = [75, 74, 17, 1, 35]
d1 = 2
d2 = 3
base = 97
return evpoints, values, d1, d2, base
|
{
"content_hash": "e35c2bd933ce052ca1beb5483047a400",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 93,
"avg_line_length": 29.071428571428573,
"alnum_prop": 0.5761670761670762,
"repo_name": "ivokub/polyrecon",
"id": "baa3efb87beb29054467496fcc2be3330d41eff5",
"size": "4070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solve_recon.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4070"
}
],
"symlink_target": ""
}
|
from bingads.v13.bulk.entities.audiences.bulk_ad_group_audience_association import BulkAdGroupAudienceAssociation
class BulkAdGroupInMarketAudienceAssociation(BulkAdGroupAudienceAssociation):
""" Represents an Ad Group In Market Audience Association that can be read or written in a bulk file.
For more information, see Ad Group In Market Audience Association at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
|
{
"content_hash": "4b62c818a6503072d68c592884d1c874",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 120,
"avg_line_length": 41,
"alnum_prop": 0.7543554006968641,
"repo_name": "bing-ads-sdk/BingAds-Python-SDK",
"id": "bee26cf6c665c697eb50bc08736ce8a83e6215ab",
"size": "574",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bingads/v13/bulk/entities/audiences/bulk_ad_group_in_market_audience_association.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "947470"
}
],
"symlink_target": ""
}
|
"""
get_terminal_size() -- return width and height of terminal as a tuple
code from:
http://stackoverflow.com/questions/566746/how-to-get-console- window-width-in-
python
written by
Harco Kuppens (http://stackoverflow.com/users/825214/harco-kuppens)
It is mentioned in the stackoverflow response that this code works
on linux, os x, windows and cygwin (windows).
"""
from __future__ import print_function
import os
import shutil
from pandas.compat import PY3
__all__ = ['get_terminal_size', 'is_terminal']
def get_terminal_size():
"""
Detect terminal size and return tuple = (width, height).
Only to be used when running in a terminal. Note that the IPython notebook,
IPython zmq frontends, or IDLE do not run in a terminal,
"""
import platform
if PY3:
return shutil.get_terminal_size()
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os == 'Linux' or \
current_os == 'Darwin' or \
current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
tuple_xy = (80, 25) # default value
return tuple_xy
def is_terminal():
"""
Detect if Python is running in a terminal.
Returns True if Python is running in a terminal or False if not.
"""
try:
ip = get_ipython()
except NameError: # assume standard Python interpreter in a terminal
return True
else:
if hasattr(ip, 'kernel'): # IPython as a Jupyter kernel
return False
else: # IPython in a terminal
return True
def _get_terminal_size_windows():
res = None
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
except:
return None
if res:
import struct
(bufx, bufy, curx, cury, wattr, left, top, right, bottom, maxx,
maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
else:
return None
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width
# -height-of-a-terminal-window
try:
import subprocess
proc = subprocess.Popen(["tput", "cols"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
output = proc.communicate(input=None)
cols = int(output[0])
proc = subprocess.Popen(["tput", "lines"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
output = proc.communicate(input=None)
rows = int(output[0])
return (cols, rows)
except:
return None
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr or cr == (0, 0):
try:
from os import environ as env
cr = (env['LINES'], env['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
if __name__ == "__main__":
sizex, sizey = get_terminal_size()
print('width = {w} height = {h}'.format(w=sizex, h=sizey))
|
{
"content_hash": "e581ae8cd40a37ee9fba5df99491b824",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 79,
"avg_line_length": 28.351724137931033,
"alnum_prop": 0.5757723181707614,
"repo_name": "ryfeus/lambda-packs",
"id": "52262ea05bf96336ce9e5577992821374c7b7548",
"size": "4111",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Tensorflow_Pandas_Numpy/source3.6/pandas/io/formats/terminal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
from pandas import DataFrame, SparseDataFrame, SparseSeries
from pandas.util import testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
def test_quantile():
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
q = 0.1
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseSeries(dense_expected)
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
def test_quantile_multi():
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
q = [0.1, 0.5]
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseDataFrame(dense_expected)
tm.assert_frame_equal(result, dense_expected)
tm.assert_sp_frame_equal(result, sparse_expected)
|
{
"content_hash": "39d86a6edf95504a3c438b14b18d7f8b",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 72,
"avg_line_length": 30.146341463414632,
"alnum_prop": 0.6974110032362459,
"repo_name": "toobaz/pandas",
"id": "fae879b3d33b598ea57c9c18f9d9b7d4fb0d12f0",
"size": "1236",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/tests/sparse/frame/test_analytics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394843"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "15031623"
},
{
"name": "Shell",
"bytes": "27585"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
"""Support for Homekit Alarm Control Panel."""
from __future__ import annotations
from typing import Any
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import Service, ServicesTypes
from homeassistant.components.alarm_control_panel import (
AlarmControlPanelEntity,
AlarmControlPanelEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import KNOWN_DEVICES, HomeKitEntity
ICON = "mdi:security"
CURRENT_STATE_MAP = {
0: STATE_ALARM_ARMED_HOME,
1: STATE_ALARM_ARMED_AWAY,
2: STATE_ALARM_ARMED_NIGHT,
3: STATE_ALARM_DISARMED,
4: STATE_ALARM_TRIGGERED,
}
TARGET_STATE_MAP = {
STATE_ALARM_ARMED_HOME: 0,
STATE_ALARM_ARMED_AWAY: 1,
STATE_ALARM_ARMED_NIGHT: 2,
STATE_ALARM_DISARMED: 3,
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Homekit alarm control panel."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(service: Service) -> bool:
if service.type != ServicesTypes.SECURITY_SYSTEM:
return False
info = {"aid": service.accessory.aid, "iid": service.iid}
async_add_entities([HomeKitAlarmControlPanelEntity(conn, info)], True)
return True
conn.add_listener(async_add_service)
class HomeKitAlarmControlPanelEntity(HomeKitEntity, AlarmControlPanelEntity):
"""Representation of a Homekit Alarm Control Panel."""
_attr_supported_features = (
AlarmControlPanelEntityFeature.ARM_HOME
| AlarmControlPanelEntityFeature.ARM_AWAY
| AlarmControlPanelEntityFeature.ARM_NIGHT
)
def get_characteristic_types(self) -> list[str]:
"""Define the homekit characteristics the entity cares about."""
return [
CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT,
CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET,
CharacteristicsTypes.BATTERY_LEVEL,
]
@property
def icon(self) -> str:
"""Return icon."""
return ICON
@property
def state(self) -> str:
"""Return the state of the device."""
return CURRENT_STATE_MAP[
self.service.value(CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT)
]
async def async_alarm_disarm(self, code: str | None = None) -> None:
"""Send disarm command."""
await self.set_alarm_state(STATE_ALARM_DISARMED, code)
async def async_alarm_arm_away(self, code: str | None = None) -> None:
"""Send arm command."""
await self.set_alarm_state(STATE_ALARM_ARMED_AWAY, code)
async def async_alarm_arm_home(self, code: str | None = None) -> None:
"""Send stay command."""
await self.set_alarm_state(STATE_ALARM_ARMED_HOME, code)
async def async_alarm_arm_night(self, code: str | None = None) -> None:
"""Send night command."""
await self.set_alarm_state(STATE_ALARM_ARMED_NIGHT, code)
async def set_alarm_state(self, state: str, code: str | None = None) -> None:
"""Send state command."""
await self.async_put_characteristics(
{CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET: TARGET_STATE_MAP[state]}
)
@property
def extra_state_attributes(self) -> dict[str, Any] | None:
"""Return the optional state attributes."""
battery_level = self.service.value(CharacteristicsTypes.BATTERY_LEVEL)
if not battery_level:
return {}
return {ATTR_BATTERY_LEVEL: battery_level}
|
{
"content_hash": "2f62335e31b4d6509ec1c696a091f9f5",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 88,
"avg_line_length": 32.53658536585366,
"alnum_prop": 0.6766616691654173,
"repo_name": "toddeye/home-assistant",
"id": "c7e499b6e89e6ba7a6d15dc67844b24ddc173171",
"size": "4002",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homekit_controller/alarm_control_panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import pickle
from athletelist import AthleteList
def get_coath_data(fileName):
try:
with open(fileName) as myfile:
data = myfile.readline()
templ = (data.strip().split(','))
return (AthleteList(templ.pop(0), templ.pop(0),templ))
except IOError as ioerr:
print ('IOError:' + str(ioerr))
return (None)
def put_to_store(files_list):
all_athletes = {}
for each_file in files_list:
athlete = get_coath_data(each_file)
# 每个选手名字作为键,值是AthleteList实例
all_athletes[athlete.name] = athlete
try:
with open('athletes.pickle', 'wb') as athf:
# 将所有实例对象字典保存到pickle中
pickle.dump(all_athletes, athf)
except IOError as ioerr:
print ('File error (put to store)' + str(ioerr))
return (all_athletes)
def get_from_store():
all_athletes = {}
try:
with open('athletes.pickle', 'rb') as athf:
# 从pickle中,读入字典
all_athletes = pickle.load(athf)
except IOError as ioerr:
print ('File error (get from store)' + str(ioerr))
return (all_athletes)
|
{
"content_hash": "d24b1383f2e9e0cfbfcca4b6b2859ddb",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 62,
"avg_line_length": 26.761904761904763,
"alnum_prop": 0.5951957295373665,
"repo_name": "ColorPenBoy/PythonClass",
"id": "e946c63e0df0157957b3444644dc3fdf9d77faf2",
"size": "1252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HeadFirstPython/7_chapter2/athletemodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2176"
},
{
"name": "HTML",
"bytes": "1532"
},
{
"name": "Python",
"bytes": "32896"
}
],
"symlink_target": ""
}
|
import ctypes
import time
from .tmex import *
from .system import ISPYTHON3, iteritems
from datetime import datetime
if not ISPYTHON3:
import binascii
DEVICEINFO = {
0x01: ("DS1990A", "Serial Number iButton"),
0x10: ("DS18S2", "High-Precision 1-Wire Digital Thermometer"),
0x26: ("DS2438", "Smart Battery Monitor"),
0x28: ("DS18B2", "Programmable Resolution 1-Wire Digital Thermometer"),
0x81: ("DS1420", "Serial ID Button")
}
class Session(object):
"""
Session is a class that encapsulates a 1-Wire session.
"""
def __init__(self, port=0):
"""
Initializes a 1-Wire session.
port:
A optional port number that will be used to start the session.
"""
self._handle = 0
if ISPYTHON3:
self._context = ctypes.create_string_buffer(b'\000' * 15360)
else:
self._context = ctypes.create_string_buffer('\000' * 15360)
self._devices = {}
self.initialize(port)
def __del__(self):
if self._handle != 0:
TMEndSession(self._handle)
def initialize(self, port=0):
"""
Initializes a 1-Wire session.
port:
A optional port number that will be used to start the session.
"""
portNumber = ctypes.c_short(port)
portType = ctypes.c_short(0)
TMReadDefaultPort(portNumber, portType)
self._handle = TMExtendedStartSession(portNumber, portType, None)
result = TMSetup(self._handle)
if (result != 1):
TMEndSession(self._handle)
if result in TMSetupMessages:
raise TMEXException(TMSetupMessages[result])
else:
raise TMEXException('Unknown setup error, %d' % (result))
def valid(self):
"""
Check if the 1-Wire session is valid.
"""
if self._handle == 0:
return False
return TMValidSession(self._handle) == 1
def _deviceFilter(self, devices, familyFilter):
"""
Filter devices by family.
devices:
A list of device identifiers.
familyFilter:
A list of integers or strings where integers are the device family code and strings is the device family
name.
returns a list of device identifiers.
"""
result = []
filterNumbers = []
for family in familyFilter:
if isinstance(family, (int)):
filterNumbers.append(family)
elif isinstance(family, (str)):
found = False
for num, info in iteritems(DEVICEINFO):
if family == info[0]:
filterNumbers.append(num)
found = True
break
if not found:
raise TMEXException('Unknown device {}'.format(family))
if not filterNumbers:
return devices
for deviceId in devices:
code = int(deviceId[:2], 16)
if code in filterNumbers:
result.append(deviceId)
return result
def enumrate(self, familyFilter=None):
"""
Enumerates the devices on the 1-Wire bus.
familyFilter:
A optional list of family codes for devices to enumerate, omitting any device of a family not found in the
list.
"""
if self._handle == 0:
raise TMEXException('Bus not initialized')
if not self.valid():
raise TMEXException('Bus not valid')
result = TMFirst(self._handle, self._context)
devices = {}
while result != 0:
rom = (ctypes.c_short * 8)()
result = TMRom(self._handle, self._context, rom)
if result == 1:
deviceId = ''.join(['%02X' % x for x in rom])
romBytes = [x for x in rom]
rb = (ctypes.c_ubyte * 8)(*romBytes)
result = TMCRC(8, rb, 0, 0)
if result == 0:
kind = int(rom[0])
info = None
if kind in DEVICEINFO:
info = DEVICEINFO[kind]
devices[deviceId] = {'kind': kind, 'name': info[0], 'description': info[1]}
else:
devices[deviceId] = {'kind': kind}
result = TMNext(self._handle, self._context)
self._devices = devices
filteredDevices = {}
if familyFilter:
ids = self._deviceFilter(devices.keys(), familyFilter)
for deviceId in ids:
filteredDevices[deviceId] = devices[deviceId]
else:
filteredDevices = devices
return filteredDevices
def readDevice(self, deviceId, enableWireLeveling=False):
"""
Reads the value from a device on the 1-Wire bus.
deviceId:
The device id of the device to read. Must have been enumerated.
enableWireLeveling:
Enables the reader to use wire leveling to read from certain devices.
"""
if deviceId not in self._devices:
raise ValueError()
deviceName = self._devices[deviceId]['name']
func = None
try:
func = getattr(self, '_read_' + deviceName)
except AttributeError:
func = None
if func:
return func(deviceId, enableWireLeveling)
else:
return {}
def readDevices(self, devices, familyFilter=None, timeStamp=True):
"""
Reads the value from a list of devices from the 1-Wire bus.
devices:
A list of device id of the devices to read. Must have been enumerated.
familyFilter:
A optional list of integers or strings where integers are the device family code and strings is the device
family name.
timeStamp:
A optional boolean to enable value timestamps and request timing.
"""
if familyFilter:
devices = self._deviceFilter(devices, familyFilter)
if len(devices) == 0:
return {}
if timeStamp:
startTime = datetime.now()
TMTouchReset(self._handle)
TMOneWireLevel(self._handle, 0, 1, 2)
TMTouchByte(self._handle, 0xCC)
TMTouchByte(self._handle, 0x44)
time.sleep(.750)
data = TMTouchByte(self._handle, 0xFF)
while data == 0:
data = TMTouchByte(self._handle, 0xFF)
TMOneWireLevel(self._handle, 0, 0, 0)
result = {}
for deviceId in devices:
t = self.readDevice(deviceId, enableWireLeveling=False)
result[deviceId] = {}
for key, value in iteritems(t):
result[deviceId][key] = value
if timeStamp:
result[deviceId]['timestamp'] = datetime.now()
if timeStamp:
stopTime = datetime.now()
result['time'] = (startTime, stopTime)
result['delta'] = stopTime - startTime
return result
def _addressDevice(self, deviceId):
"""
Addresses a device on the 1-Wire bus directly.
deviceId:
The device id of the device to read. Must have been enumerated.
"""
if deviceId not in self._devices:
raise ValueError()
TMTouchReset(self._handle)
TMTouchByte(self._handle, 0x55) # MATCH ROM
if ISPYTHON3:
# FIXME: Bad code
address = [ord(chr(x)) for x in bytes.fromhex(deviceId) ]
else:
address = [ord(x) for x in binascii.unhexlify(deviceId)]
for i in range(8):
TMTouchByte(self._handle, address[i]) # Send Id
return 1
def _read_DS18B2(self, deviceId, enableWireLeveling=False):
"""
Reads the value from a DS18B2 device on the 1-Wire bus.
deviceId:
The device id of the device to read. Must have been enumerated.
enableWireLeveling:
Enables the reader to use wire leveling to read from certain devices.
"""
result = self._addressDevice(deviceId)
temp = None
if result == 1:
if enableWireLeveling:
TMOneWireLevel(self._handle, 0, 1, 2)
data = TMTouchByte(self._handle, 0x44)
time.sleep(0.6)
data = TMTouchByte(self._handle, 0xFF)
while data == 0:
data = TMTouchByte(self._handle, 0xFF)
TMOneWireLevel(self._handle, 0, 0, 0)
result = self._addressDevice(deviceId)
data = TMTouchByte(self._handle, 0xBE)
data = [TMTouchByte(self._handle, 0xFF) for i in range(9)]
temp = ((0x07 & data[1]) << 4) + ((0xF0 & data[0]) >> 4) + (((0x08 & data[0]) >> 3) * 0.5) + (((0x04 & data[0]) >> 2) * 0.25) + (((0x02 & data[0]) >> 1) * 0.125) + (((0x01 & data[0])) * 0.0625)
if (0x08 & data[1]) == 0x08:
temp = -temp
return {'temperature': temp}
def _read_DS2438(self, deviceId, enableWireLeveling=False):
"""
Reads the value from a DS2438 device connected to a HIH-4021 family device on the 1-Wire bus.
deviceId:
The device id of the device to read. Must have been enumerated.
enableWireLeveling:
Enables the reader to use wire leveling to read from certain devices. Not used for DS2438.
"""
result = self._addressDevice(deviceId)
temp = None
if result == 1:
data = TMTouchByte(self._handle, 0x44)
data = TMTouchByte(self._handle, 0xFF)
while data == 0:
data = TMTouchByte(self._handle, 0xFF)
result = self._addressDevice(deviceId)
data = TMTouchByte(self._handle, 0xB4)
data = TMTouchByte(self._handle, 0xFF)
while data == 0:
data = TMTouchByte(self._handle, 0xFF)
result = self._addressDevice(deviceId)
data = TMTouchByte(self._handle, 0xB8)
data = TMTouchByte(self._handle, 0x00)
result = self._addressDevice(deviceId)
data = TMTouchByte(self._handle, 0xBE)
data = TMTouchByte(self._handle, 0x00)
data = [TMTouchByte(self._handle, 0xFF) for i in range(9)]
temp = (0x7F & data[2])
temp += ((0x80 & data[1]) >> 7) * 0.5
temp += ((0x40 & data[1]) >> 6) * 0.25
temp += ((0x20 & data[1]) >> 5) * 0.125
temp += ((0x10 & data[1]) >> 4) * 0.0625
temp += ((0x08 & data[1]) >> 3) * 0.03125
if (0x80 & data[2]) == 0x80:
temp = -temp
voltage = (((data[4] & 0x03) << 8) + (data[3])) * 0.01
# conversion from voltage to humidity, see HIH-4021 datasheet
humidity = ((voltage / 5.0) - 0.16) / 0.0062
humidity = humidity / (1.0546 - (0.00216 * temp))
return {'temperature': temp, 'humidity': humidity}
|
{
"content_hash": "180ce63fc1b0f13efb707cc9fd2f6d95",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 205,
"avg_line_length": 37.080128205128204,
"alnum_prop": 0.5201832483360705,
"repo_name": "valnoverm/pytmex-git",
"id": "59c6de2d21cce3a153dce74e6a1359d617983f38",
"size": "11689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tmex/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29728"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.pyplot as plt
centers = np.load("model.npy")
data_dir = "/home/bsprague/Projects/CS589/MovieLens/Data/"
Items = np.load(data_dir + "items.npy")
Genres = np.load(data_dir + "genres.npy")
genresSub = [np.zeros(19),np.zeros(19),np.zeros(19)]
genresTotal = [np.zeros(19),np.zeros(19),np.zeros(19)]
for i, center in enumerate(centers):
for movie_id, rating in enumerate(center):
if rating >= 4:
genresSub[i] = np.add(genresSub[i], Items[movie_id][2])
genresTotal[i] = np.add(genresTotal[i], Items[movie_id][2])
plt.tick_params(axis='both', which='major', labelsize=10)
for i in range(3):
plt.figure(i+1)
plt.xticks(np.arange(19), Genres)
plt.xlabel("Genre")
plt.ylabel("Portion of Titles")
plt.title("Portion of 4+ star rated titles by genre in Cluster " + str(i))
plt.bar(np.arange(19), np.divide(genresSub[i], genresTotal[i]), align='center')
plt.show()
|
{
"content_hash": "ceb2b4b05e5b2fb1969dbeb6fb3630c0",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 83,
"avg_line_length": 32.93103448275862,
"alnum_prop": 0.6607329842931937,
"repo_name": "bcspragu/Machine-Learning-Projects",
"id": "f574c903faf9ead7cdbe49c0ec4643fa795804c5",
"size": "955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MovieLens/Code/data8.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86950"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from unittest.mock import patch
from easy2fa import cli
class TestCheckInput(TestCase):
@patch('builtins.input')
def test_default(self, mock_input):
mock_input.return_value = ''
self.assertEquals(cli.check_input('prompt', default='one'), 'one')
mock_input.return_value = 'two'
self.assertEquals(cli.check_input('prompt', default='one'), 'two')
@patch('builtins.input')
@patch('builtins.print')
def test_assertions(self, mock_print, mock_input):
def assertion(value):
if value not in ['yes', 'no']:
return 'use yes or no'
mock_input.side_effect = ['input', '', 'no']
self.assertEquals(cli.check_input('prompt', assertion=assertion),
'no')
mock_print.assert_called_with('\tInvalid input: use yes or no')
|
{
"content_hash": "448458ff564c3b9a4407c62a322e258b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 35,
"alnum_prop": 0.6148571428571429,
"repo_name": "lutostag/easy2fa",
"id": "86acd82b514b30458fa54cefc7db6d72f32e8646",
"size": "875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easy2fa/tests/test_checkinput.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17401"
}
],
"symlink_target": ""
}
|
import pytest
from dcicutils.qa_utils import notice_pytest_fixtures
from ..util import delay_rerun
from .workbook_fixtures import app_settings, app, workbook
# NOTE WELL: Even though app_settings and app are not autouse fixtures, they must be imported.
# Removing these will not cause fixtures by those names not to be found, but my guess is that
# it will find different versions of those fixtures, which is what will cause the tests to fail
# with: 404 "The resource could not be found."
# -kmp 28-Jun-2020
notice_pytest_fixtures(app_settings, app, workbook)
pytestmark = [pytest.mark.working,
# pytest.mark.indexing,
pytest.mark.workbook,
pytest.mark.flaky(rerun_filter=delay_rerun)]
def test_aggregation_facet(workbook, testapp):
notice_pytest_fixtures(workbook, testapp)
res = testapp.get('/search/?type=ExperimentSetReplicate').json
badge_facets = [facet for facet in res['facets'] if facet['title'] in
['Commendations', 'Warnings']]
assert badge_facets
titles = [facet['title'] for facet in badge_facets]
assert 'Commendations' in titles and 'Warnings' in titles
terms = [term['key'] for i in range(2) for term in badge_facets[i]['terms']]
assert len([t for t in terms if t != 'No value']) == 3
def test_aggregation_itemview(workbook, testapp):
notice_pytest_fixtures(workbook, testapp)
res = testapp.get('/experiment-set-replicates/4DNESAAAAAA1/').json
assert 'aggregated-items' in res.keys()
parents = ''.join([badge['parent'] for badge in res['aggregated-items']['badges']])
assert 'biosample' in parents and 'experiment-set-replicate' in parents
items = [badge['parent'] + ', ' + badge['item']['badge']['uuid'] for
badge in res['aggregated-items']['badges']]
assert len(items) == len(list(set(items)))
def test_aggregation_view(workbook, testapp):
notice_pytest_fixtures(workbook, testapp)
res = testapp.get('/experiment-set-replicates/4DNESAAAAAA1/@@aggregated-items').json
agg = res['aggregated_items']
assert 'badges' in agg.keys()
assert len(agg['badges']) >= 3
for agg_badge in agg['badges']:
assert 'parent' in agg_badge
assert 'embedded_path' in agg_badge
# check the embedded_path based off of parent
expected_path = None
if '/experiment-set' in agg_badge['parent']:
expected_path = 'badges'
elif '/experiments' in agg_badge['parent']:
expected_path = 'experiments_in_set.badges'
elif '/biosample' in agg_badge['parent']:
expected_path = 'experiments_in_set.biosample.badges'
elif '/files' in agg_badge['parent']:
expected_path = 'experiments_in_set.files.badges'
# let's not make this test too brittle; handle unexpected test data
if expected_path:
assert agg_badge['embedded_path'] == expected_path
# check fields on badge. hardcoded fields may need to be updated
assert 'messages' in agg_badge['item']
assert 'badge' in agg_badge['item']
assert 'commendation' in agg_badge['item']['badge']
assert 'warning' in agg_badge['item']['badge']
|
{
"content_hash": "72b8ec56be4ad29500edfb0f4796b682",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 96,
"avg_line_length": 43.45945945945946,
"alnum_prop": 0.6635572139303483,
"repo_name": "hms-dbmi/fourfront",
"id": "c59ab7cc069fa50789ad009a716b6c7356b77161",
"size": "3216",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/encoded/tests/test_aggregation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "198339"
},
{
"name": "Cucumber",
"bytes": "16918"
},
{
"name": "HTML",
"bytes": "371973"
},
{
"name": "JavaScript",
"bytes": "1403972"
},
{
"name": "Makefile",
"bytes": "110"
},
{
"name": "PLpgSQL",
"bytes": "12067"
},
{
"name": "Python",
"bytes": "751772"
},
{
"name": "Ruby",
"bytes": "1066"
},
{
"name": "Shell",
"bytes": "2248"
}
],
"symlink_target": ""
}
|
import redis
from django.conf import settings
from django_haikus.models import BaseHaikuText
class BigramHistogram:
def __init__(self, *args, **kwargs):
self.model = BaseHaikuText.get_concrete_child()
redis_conf = getattr(settings, 'REDIS', False)
if redis_conf:
client = redis.Redis(**redis_conf)
else:
client = redis.Redis()
self.redis = client
self.key = BaseHaikuText.get_concrete_child().__name__.lower()
def load(self, instances=None):
if instances is None:
instances = self.model.objects.all()
for instance in instances:
t = instance.filtered_text()
words = t.split()
for i in range(0, len(words)-1):
w1, w2 = words[i:i+2]
self.incr(w1, w2)
def incr(self, w1, w2):
count = self.redis.hincrby(self.key, self._build_field(w1, w2), 1)
max = self.redis.hget(self.key, '__max')
if max is None or int(count) > int(max):
self.redis.hset(self.key, '__max_bigram', self._build_field(w1, w2))
self.redis.hset(self.key, '__max', count)
def _build_field(self, w1, w2):
return "%s,%s" % (w1.lower(), w2.lower())
def count(self):
return self.redis.hlen(self.key)
def max(self):
try:
return float(self.redis.hget(self.key, '__max'))
except TypeError:
return 0
def get(self, key, normalize=True):
score = self.redis.hget(self.key, key)
if score is None:
return False
if normalize:
scale = self.max() / 100.0
return float(score) / scale
else:
return score
def max_bigram(self):
return self.get('__max_bigram', normalize=False)
def lookup(self, bigram):
return self.normalize(self.get(bigram))
def flush(self):
self.redis.delete(self.key)
|
{
"content_hash": "3d2c90db3613bd2c774cea8d021558a5",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 80,
"avg_line_length": 30.276923076923076,
"alnum_prop": 0.5564024390243902,
"repo_name": "wieden-kennedy/django-haikus",
"id": "b859291ac8671607f85fe2cce9395013d2fd596e",
"size": "1968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_haikus/bigrams.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "54532"
},
{
"name": "Python",
"bytes": "78808"
}
],
"symlink_target": ""
}
|
import pkg_resources
try:
pkg_resources.get_distribution('numpy')
except pkg_resources.DistributionNotFound:
numpyPresent = False
print("Error: Numpy package not available.")
else:
numpyPresent = True
import numpy as np
try:
pkg_resources.get_distribution('pandas')
except pkg_resources.DistributionNotFound:
pandasPresent = False
print("Error: Pandas package not available.")
else:
pandasPresent = True
import pandas as pd
import re
import collections
import inspect
from .phjTestFunctionParameters import phjAssert
def phjGetStrFromArgOrFile(phjStr = None,
phjPathAndFileName = None,
phjAllowedAttempts = 3,
phjPrintResults = False):
# This function retrieves a string either from a string contained in a text file
# given by a path and file name, or from a string entered as an argument for this
# function. A string saved in a file is given preference (i.e. this is checked first).
# Initially set phjTempStr to be None
phjTempStr = None
# If one or other of the string input options is not None then get query string
if (phjStr is not None) or (phjPathAndFileName is not None):
# File name and path given preference (i.e. check this first)
if phjPathAndFileName is not None:
# Load SQL query from text file
phjTempStr = phjReadTextFromFile(phjPathAndFileName = phjPathAndFileName,
phjMaxAttempts = phjAllowedAttempts,
phjPrintResults = False)
if phjPrintResults == True:
print("\nString retrieved from file ('{0}'):".format(phjPathAndFileName))
print(phjTempStr)
# If the text file did not yield a string, move on to the query string.
if (phjTempStr is None) and (phjStr is not None):
phjTempStr = phjStr
if phjPrintResults == True:
print("\nString retrieved from passed argument (phjStr):")
print(phjTempStr)
else:
phjTempStr = None
return phjTempStr
def phjReadTextFromFile(phjPathAndFileName = None,
phjMaxAttempts = 3,
phjPrintResults = False):
for i in range(phjMaxAttempts):
if (phjPathAndFileName is None) or (i > 0):
phjPathAndFileName = input('Enter path and filename for file containing text (e.g. query or regex): ')
try:
# The following original code ran the risk of not closing the file if a
# problem occurred.
# phjTempFileObject = open(phjFilePathAndName)
# phjTempText = phjTempFileObject.read()
# phjTempFileObject.close()
with open(phjPathAndFileName,'r') as phjTempFileObject:
phjTempText = phjTempFileObject.read()
if phjPrintResults:
print('Text read from file:')
print(phjTempText)
print('\n')
break
except FileNotFoundError as e:
print("\nA FileNotFoundError occurred.\nError number {0}: {1}. File named \'{2}\' does not exist at that location.".format(e.args[0],e.args[1],phjPathAndFileName))
if i < (phjMaxAttempts-1):
print('\nPlease re-enter path and filename details.\n') # Only say 'Please try again' if not last attempt.
else:
# If file can't be found then set phjTempText to None
print('\nFailed to find file containing text after {0} attempts.\n'.format(i+1))
phjTempText = None
return phjTempText
def phjCreateNamedGroupRegex(phjDF,
phjGroupVarName,
phjRegexVarName,
phjIDVarName = None,
phjRegexPreCompile = False,
phjPrintResults = False):
# Check function parameters
# =========================
try:
# Check whether required parameters have been set to correct type
assert isinstance(phjDF,pd.DataFrame), "Parameter, 'phjDF' needs to be a Pandas dataframe."
assert isinstance(phjGroupVarName,str), "Parameter 'phjGroupVarName' needs to be a string."
assert isinstance(phjRegexVarName,str), "Parameter 'phjRegexVarName' needs to be a string."
if phjIDVarName is not None:
assert isinstance(phjIDVarName,str), "Parameter 'phjIDVarName' (if set) needs to be a string."
# Check whether required columns exist
assert phjGroupVarName in phjDF.columns.values, "Column '{0}' is not in dataframe.".format(phjGroupVarName)
assert phjRegexVarName in phjDF.columns.values, "Column '{0}' is not in dataframe.".format(phjRegexVarName)
if phjIDVarName is not None:
assert phjIDVarName in phjDF.columns.values, "Column '{0}' is not in dataframe.".format(phjIDVarName)
# Check whether arguments are set to allowable values
assert phjRegexPreCompile in [True, False], "Parameter 'phjRegexPreCompile' can only be True or False; it is incorrectly set."
assert phjPrintResults in [True, False], "Parameter 'phjPrintResults' can only be True or False; it is incorrectly set."
# Check whether there are named groups contained in the regexes; the named group
# format is created based on the phjGroupVarName variable and, therefore, the
# individual regex components should not contain a named group. The regex looks
# for the pattern (?P<...>
assert phjDF[phjRegexVarName].str.match('\(\?P<\w+>').sum() == 0, "Regexes should not contain a named group."
# Check whether there are capture groups contained in the regexes. The regexes
# are not intended to capture text but rather to determine whether regex matches
# are present. This regex checks for a round bracket that is NOT followed by a
# question mark plus some other characters followed by a closing round bracket.
# The regex looks for set of round brackets where the opening brackets is not
# followed by question mark.
assert phjDF[phjRegexVarName].str.match('\((?!\?).+?\)').sum() == 0, "Regexes should not contain capture groups."
except AssertionError as e:
# Set return value to none
phjCategoryGroupRegex = None
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
# Complete function if parameter checks are OK and AssertionError not raised
# ==========================================================================
else:
# Remove any rows where group name is empty (and reset the index)
phjEmptyCellMask = ~(phjDF[phjGroupVarName] == "")
phjDF = phjDF[phjEmptyCellMask].reset_index(drop = True)
# Group dataframe based on phjGroupVarName and concatenate strings in phjRegexVarName, separated
# by OR and carriage return (i.e. '|\n').
# And sort by ID number if appropriate.
try:
if phjIDVarName is None:
phjCategoryGroupRegexDF = phjDF.groupby(phjGroupVarName).agg({phjRegexVarName:'|'.join}).reset_index()
else:
phjCategoryGroupRegexDF = phjDF.groupby(phjGroupVarName).agg({phjRegexVarName:'|'.join,
phjIDVarName: 'mean'}).sort_values(phjIDVarName,
axis = 0,
ascending = True).reset_index()
# Check that the values in the phjIDVarName variable (after the 'mean' operation) are all integers.
# If not, itq may indicate an error in the original labelling of category groups.
phjIntegerCheckMask = phjCategoryGroupRegexDF[phjIDVarName]%1 == 0
if sum(~phjIntegerCheckMask) > 0:
if sum(~phjIntegerCheckMask) == 1:
print("\nWarning: there may be an error in the category ID values since one of the mean group values is not an integer.")
else:
print("\nWarning: there may be an error in the category ID values since {0} mean group values are not integers.".format(sum(~phjIntegerCheckMask)))
except pd.core.groupby.DataError as e:
if inspect.stack()[1][3] == '<module>':
print("\nA DataError has occurred in {fname}() function. This may occur if, for example, the '{var}' variable contains missing values. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
var = phjIDVarName))
else:
# If function has been called by another function then modify message and re-raise exception
print("\nA DataError has occurred in {fname}() function when called by {callfname}() function. This may occur if, for example, the '{var}' variable contains missing values. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
var = phjIDVarName,
callfname = inspect.stack()[1][3]))
raise
phjCategoryGroupRegex = None
# Complete function if no exception raised
else:
# Create a column containing a concatenated and combined named group regex for each group
phjCategoryGroupRegexDF['NamedGroupRegex'] = '(?P<' + phjCategoryGroupRegexDF[phjGroupVarName].map(str).str.strip().str.replace(' ','_').str.replace('[^\w\s]','') + '>' + phjCategoryGroupRegexDF[phjRegexVarName].map(str) + ')'
# Create a string representing entire regex
# Various string patterns are included to ensure the final string is easier to read:
# (?P<group1>
# (?:abc)|
# (?:cde))
# (?P<group2>
# (?:fgh)|
# (?:ijk))
phjCategoryGroupRegexStr = phjCategoryGroupRegexDF['NamedGroupRegex'].map(str).str.replace('>\(\?',
'>\n (?').str.replace('\)\|\(',
')|\n (').str.cat(others = None, sep = '|\n')
if phjPrintResults == True:
print("\nFull Regex string")
print(phjCategoryGroupRegexStr)
if phjRegexPreCompile == True:
try:
phjCategoryGroupRegex = re.compile(phjCategoryGroupRegexStr,flags = re.X|re.I)
except re.error as e:
if inspect.stack()[1][3] == '<module>':
print("Regex failed to compile in {fname}() function: {msg}.".format(msg = e,
fname = inspect.stack()[0][3]))
else:
# If function has been called by another function then modify message and re-raise exception
print("Regex failed to compile in {fname}() function when called by {callfname}() function: {msg}.".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
phjCategoryGroupRegex = None
else:
# In the regex has not been compiled then return the string
return phjCategoryGroupRegexStr
return phjCategoryGroupRegex
# This function takes a column of text and uses a regex with named groups to
# determine the group to which the text best fits.
def phjFindRegexNamedGroups(phjDF,
phjDescriptorVarName,
phjNamedGroupRegexStr,
phjSeparateRegexGroups = False,
phjNumberMatchesVarName = 'numberMatches',
phjMatchedGroupVarName = 'matchedgroup',
phjUnclassifiedStr = 'unclassified',
phjMultipleMatchStr = 'multiple',
phjCleanup = False,
phjPrintResults = False):
# Check whether required parameters have been set to correct type and are
# set to allowable values
try:
phjAssert('phjDF',phjDF,pd.DataFrame)
phjAssert('phjDescriptorVarName',phjDescriptorVarName,str,phjMustBePresentColumnList = list(phjDF.columns))
phjAssert('phjNamedGroupRegexStr',phjNamedGroupRegexStr,str)
phjAssert('phjSeparateRegexGroups',phjSeparateRegexGroups,bool)
phjAssert('phjNumberMatchesVarName',phjNumberMatchesVarName,str,phjMustBeAbsentColumnList = list(phjDF.columns))
phjAssert('phjMatchedGroupVarName',phjMatchedGroupVarName,str,phjMustBeAbsentColumnList = list(phjDF.columns))
phjAssert('phjUnclassifiedStr',phjUnclassifiedStr,str)
phjAssert('phjMultipleMatchStr',phjMultipleMatchStr,str)
phjAssert('phjCleanup',phjCleanup,bool)
phjAssert('phjPrintResults',phjPrintResults,bool)
# Could also check that each named group does not already exist as a column name.
# Actually this is done when the phjScratchDF is joined back to the original
# dataframe (see below).
except AssertionError as e:
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
else:
try:
# Try to compile the full and complete named-group regex containing
# multiple named groups.
phjNamedGroupRegex = re.compile(phjNamedGroupRegexStr,flags = re.I|re.X)
except re.error as e:
if inspect.stack()[1][3] == '<module>':
print("Regex failed to compile in {fname}() function: {msg}\n.".format(msg = e,
fname = inspect.stack()[0][3]))
else:
# If function has been called by another function then modify message and re-raise exception
print("Regex failed to compile in {fname}() function when called by {callfname}() function: {msg}\n.".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
# Continue with function if regex compiles
else:
# Run the named-group regex as a single regex. (This may result in some matches being missed
# if preceding mathces are found because grouped regexes cannot overlap.)
if phjSeparateRegexGroups == False:
# Create a dataframe with a column for each named group in the regex
phjScratchDF = phjDF[phjDescriptorVarName].str.extract(phjNamedGroupRegex, expand = True)
# Add the descriptor column only from the original dataframe
phjScratchDF = phjScratchDF.join(phjDF[phjDescriptorVarName])
# Move descriptor column to the front
cols = phjScratchDF.columns.tolist()
cols = cols[-1:] + cols[:-1]
phjScratchDF = phjScratchDF[cols]
# Run each named group regex separately to ensure all group matches are identified.
# It is more likely that multiple group matches may be identified.
else:
# Create dataframe containing matched species.
# This routine is based on a function supplied by Nathan Vērzemnieks on 19 Feb 2018
# on StackOverflow in response to a question.
# https://stackoverflow.com/questions/48858357/extract-named-group-regex-pattern-from-a-compiled-regex-in-python
# Nathan Vērzemnieks said:
# The argument to re.split looks for a literal pipe [and white space] followed by [a non-capturing look-ahead for]
# the (?=< , the beginning of a named group. It compiles each subpattern and uses the groupindex attribute to
# extract the name.
phjScratchDF = phjDF.loc[:,[phjDescriptorVarName]]
for subpattern in re.split('\|\s*(?=\(\?P<)', phjNamedGroupRegexStr):
phjGroupRegex = re.compile(subpattern,flags = re.I|re.X)
phjGroupName = list(phjGroupRegex.groupindex)[0]
phjScratchDF[phjGroupName] = phjScratchDF[phjDescriptorVarName].str.extract(phjGroupRegex,
expand = False)
if phjPrintResults == True:
print(phjGroupName + ' ... done')
else:
print('.',end = '')
print('\n')
# Create a new column that contains a count of the number of matches.
# Obviously, it should only be 1 but it is possible that a small number of cases
# may have more than 1 match.
# Some cases may not be classified.
phjGroupNamesList = list(phjNamedGroupRegex.groupindex)
phjScratchDF[phjNumberMatchesVarName] = phjScratchDF[phjGroupNamesList].count(axis = 1)
if phjPrintResults == True:
# Frequency table showing number of group matches
print('\nTable of number of group matches identified per description term\n')
print(pd.DataFrame(phjScratchDF[phjNumberMatchesVarName].value_counts(sort = False)).rename_axis('Number of matches').rename(columns = {phjNumberMatchesVarName:'Frequency'}))
print('\n')
# Create a new column that contains the name of the matched group for rows where a regex match was identified
phjScratchDF[phjMatchedGroupVarName] = np.nan
for grp in phjGroupNamesList:
phjScratchDF.loc[(phjScratchDF[grp].notnull()) & (phjScratchDF[phjNumberMatchesVarName] == 1),phjMatchedGroupVarName] = grp
# Replace rows with no match to unclassified
phjScratchDF.loc[phjScratchDF[phjNumberMatchesVarName] == 0,phjMatchedGroupVarName] = phjUnclassifiedStr
# Replace rows with multiple matches to multiple match string
phjScratchDF.loc[phjScratchDF[phjNumberMatchesVarName] > 1,phjMatchedGroupVarName] = phjMultipleMatchStr
if phjCleanup == False:
# Check that none of the regex group names already exist as column headings in
# the original dataframe
for grp in phjGroupNamesList:
if grp in phjDF.columns.values:
print('One or more group names in the regular expression clash with column headings in the dataframe. In order to avoid confusion, the phjCleanup variable has been set to True.')
phjCleanup = True
# Remove the temporary columns before joining with original dataframe.
if phjCleanup == True:
phjScratchDF = phjScratchDF.drop(phjGroupNamesList,
axis = 1)
# Join phjScratchDF to original database
phjDF = phjDF.join(phjScratchDF.loc[:,phjScratchDF.columns != phjDescriptorVarName],
how = 'left')
if phjPrintResults == True:
# Print samples of rows of dataframe
with pd.option_context('display.max_rows', 20, 'display.max_columns', 20):
print(phjDF)
print('\n')
return phjDF
def phjMaxLevelOfTaxonomicDetail(phjDF,
phjFirstCol,
phjLastCol,
phjNewColName = 'newColumn',
phjDropPreExisting = False,
phjCleanup = False,
phjPrintResults = False):
# Check function parameters have been set to reasonable values
try:
# Check whether required parameters have been set to correct type
assert isinstance(phjDF,pd.DataFrame), "Parameter 'phjDF' needs to be a Pandas dataframe."
assert isinstance(phjFirstCol,str), "Argument 'phjFirstCol' needs to be a string." # In Python 2, use isinstance(s,basestring)
assert isinstance(phjLastCol,str), "Argument 'phjLastCol' needs to be a string."
assert isinstance(phjNewColName,str), "Argument 'phjNewColName' needs to be a string."
# Check whether required columns exist
assert phjFirstCol in phjDF.columns.values, "Column '{0}' is not in dataframe.".format(phjFirstCol)
assert phjLastCol in phjDF.columns.values, "Column '{0}' is not in dataframe.".format(phjLastCol)
assert phjDF.columns.get_loc(phjLastCol) - phjDF.columns.get_loc(phjFirstCol) > 0, "Columns are given in the wrong order; '{0}' (phjFirstCol) needs to occur BEFORE '{1}' (phjLastCol) in the dataframe.".format(phjFirstCol,phjLastCol)
# Check whether arguments are set to allowable values
assert phjDropPreExisting in [True,False], "Argument 'phjDropPreExisting' can only be True or False; it is incorrectly set."
assert phjCleanup in [True,False], "Argument 'phjCleanup' can only be True or False; it is incorrectly set."
assert phjPrintResults in [True,False], "Argument 'phjPrintResults' can only be True or False; it is incorrectly set."
# Check whether columns that will be created already exist
if phjDropPreExisting == False:
assert 'bin' not in phjDF.columns.values, "A column named 'bin' will be temporarily created but already exists; please rename."
assert 'posFromR' not in phjDF.columns.values, "A column named 'posFromR' will be temporarily created but already exists; please rename."
assert phjNewColName not in phjDF.columns.values, "A column named '{0}' (phjNewColName) will be permanently created but already exists; please choose a different name.".format(phjNewColName)
except AssertionError as e:
print("An AssertionError occurred. ({0})".format(e))
else:
# If columns already exist, drop from dataframe
if phjDropPreExisting == True:
if 'bin' in phjDF.columns.values:
phjDF = phjDF.drop('bin',axis = 1)
if 'posFromR' in phjDF.columns.values:
phjDF = phjDF.drop('posFromR',axis = 1)
if phjNewColName in phjDF.columns.values:
phjDF = phjDF.drop(phjNewColName,axis = 1)
# A discussion of solutions to convert a dataframe with strings and empty cells to binary is given at:
# https://stackoverflow.com/questions/49003150/creating-a-binary-representation-of-whether-a-string-is-present-or-not-in-a-pand
# Several options given.
# Decided to use a Pandas-based option suggested by jezrael (although Numpy options may be quicker)
phjRangeOfCols = list(range(phjDF.columns.get_loc(phjFirstCol),
phjDF.columns.get_loc(phjLastCol) + 1))
phjDF['bin'] = (phjDF.iloc[:,phjRangeOfCols] != '').astype(int).astype(str).values.sum(axis=1)
# Number of digits from right
# ---------------------------
# This method makes use of the idea of two's-complement
#(see https://en.wikipedia.org/wiki/Two%27s_complement#From_the_ones'_complement).
# The algorithm to find the position of the rightmost set bit (i.e. the position
# on the right that is set to '1') is given at:
# https://www.geeksforgeeks.org/position-of-rightmost-set-bit/
# The algorithm is described as:
# Algorithm: (Example 18(010010))
# Let I/P be 12 (1100)
#
# 1. Take two's complement of the given no as all bits are reverted
# except the first '1' from right to left (10111)
#
# 2 Do an bit-wise & with original no, this will return no with the
# required one only (00010)
#
# 3 Take the log2 of the no, you will get position -1 (1)
#
# 4 Add 1 (2)
# The site also gave the following Python code:
# Python Code for Position
# of rightmost set bit
#
# import math
#
# def getFirstSetBitPos(n):
#
# return math.log2(n&-n)+1
#
# # driver code
#
# n = 12
# print(int(getFirstSetBitPos(n)))
#
# This code is contributed
# by Anant Agarwal.
# This was adapted to use array arithmatic in a Pandas dataframe:
# df['pos'] = (np.log2(df['bin']&-df['bin'])+1).astype(int)
# Position of rightmost set bit
#phjDF['posFromR'] = (np.log2(phjDF['bin'].astype(int) & -phjDF['bin'].astype(int)) + 1).astype(int)
# If all cells in a row were empty, the binary representation would be 000...000. This causes
# big problems when trying to calculate the two's complement because log2(0) is infinity.
# To overcome this problem, add a '1' to start of each string; this won't affect the calculation
# of the rightmost set bit except in cases where all cells are empty, in which case the rightmost
# set bit will lie outside the number of columns being considered.
phjDF['posFromR'] = (np.log2( ('1' + phjDF['bin']).astype(int) & -('1' + phjDF['bin']).astype(int)) + 1).astype(int)
# Count back from the last column to find the column containing the last string entry
phjDF[phjNewColName] = phjDF.columns[phjDF.columns.get_loc(phjLastCol) - phjDF['posFromR'] + 1]
# If the posFromR value is greater than the number of columns listed then the row of cells
# must have consisted of all zeroes. Therefore, replace all such occurrences with some
# indicator e.g. 'unclassified'.
nCols = phjDF.columns.get_loc(phjLastCol) - phjDF.columns.get_loc(phjFirstCol) + 1
phjMask = phjDF['posFromR'].astype(int) > nCols
phjDF.loc[phjMask,phjNewColName] = 'unclassified'
# Remove temporary columns
if phjCleanup == True:
phjDF = phjDF.drop(['bin','posFromR'],
axis = 1)
# Print dataframe
if phjPrintResults == True:
print(phjDF)
print('\n')
return phjDF
def phjReverseMap(phjDF,
phjMappingDict,
phjCategoryVarName,
phjMappedVarName = 'mapped_cat',
phjUnmapped = np.nan,
phjTreatAsRegex = False,
phjDropPreExisting = False,
phjPrintResults = False):
# Check whether required parameters have been set to correct type
try:
phjAssert('phjDF',phjDF,pd.DataFrame)
phjAssert('phjMappingDict',phjMappingDict,collections.Mapping)
phjAssert('phjCategoryVarName',phjCategoryVarName,str,phjMustBePresentColumnList = list(phjDF.columns))
#phjAssert('phjMappedVarName',phjMappedVarName,str)
phjAssert('phjUnmapped',phjUnmapped,(str,int,float))
phjAssert('phjTreatAsRegex',phjTreatAsRegex,bool)
phjAssert('phjDropPreExisting',phjDropPreExisting,bool)
# Check whether columns that will be created already exist
if phjDropPreExisting == False:
phjAssert('phjMappedVarName',phjMappedVarName,str,phjMustBeAbsentColumnList = list(phjDF.columns))
if phjTreatAsRegex == True:
# If regexes will be used then need to check that numerous other columns
# are not already in use
# The following asserts used the phjAssert() function to ensure consistent
# error messages would be produced even though some functionality was
# redundant (e.g. testing whether 'numberMatches' is a string is not
# strictly necessary).
for col in list(phjMappingDict.keys()):
phjAssert('Dictionary key name',col,str,phjMustBeAbsentColumnList = list(phjDF.columns))
phjAssert('Number of matches','numberMatches',str,phjMustBeAbsentColumnList = list(phjDF.columns))
elif phjDropPreExisting == True:
# If phjDropPreExisting is True then only need to check that name of the
# variable is a string value.
phjAssert('phjMappedVarName',phjMappedVarName,str)
# And almost finally...
phjAssert('phjPrintResults',phjPrintResults,bool)
# Bespoke asserts
# ---------------
# Check that all the items in the dictionary values are uniquely represented, otherwise
# the dictionary entry will only reflect the last occurrence.
# Make a flat list of items in the dict values using list comprehension and check there
# are no duplicates. List comprehension taken from:
# https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists
phjItems = [item for sublist in list(phjMappingDict.values()) for item in sublist]
assert len(set(phjItems)) == len(phjItems), 'Items in dictionary values are not unique.'
except AssertionError as e:
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
else:
if phjTreatAsRegex == True:
if phjDropPreExisting == True:
# Drop pre-existing columns.
# New columns that will be created include:
# i. Names of dictionary keys
# ii. Column named numberMatches (which is created during function)
# iii. Column passed as phjMappedVarName
# Only retain those columns that are not included in the above list of
# columns that will be created during the function.
phjOrigCols = list(phjDF.columns)
phjDF = phjDF[[c for c in phjOrigCols if c not in list(phjMappingDict.keys()) + ['numberMatches',phjMappedVarName]]].copy()
# Print a list of columns that existed in the original dataframe but are not
# found in dataframe.
# The following construction prints the list as a series of comma-separated
# items with the final 2 items separated by 'and'. Hint for achieving this
# found at: https://stackoverflow.com/questions/2556108/rreplace-how-to-replace-the-last-occurrence-of-an-expression-in-a-string
if phjPrintResults == True:
if set(phjOrigCols) != set(list(phjDF.columns)):
print("The following columns already existed in the dataframe and have been dropped: '{}'.".format(' and '.join("', '".join([c for c in phjOrigCols if c not in list(phjDF.columns)]).rsplit(', ',1))))
print('\n')
# The dict as entered is converted to a 'long' format with one regex
# per row. A named group regex is then created using phjCreateNamedGroupRegex()
# and matches identified using phjFindRegexNamedGroups().
# Converting the dictionary to a long dataframe is based on an answer
# by aws_apprentice at https://stackoverflow.com/questions/54368318/reshaping-a-python-dict-to-a-pandas-dataframe.
phjRegexDF = pd.DataFrame.from_dict(phjMappingDict, orient='index').stack().reset_index().drop('level_1', axis=1).rename(columns={'level_0': 'key', 0: 'value'})
try:
phjRegexStr = phjCreateNamedGroupRegex(phjDF = phjRegexDF,
phjGroupVarName = 'key',
phjRegexVarName = 'value',
phjIDVarName = None,
phjRegexPreCompile = False,
phjPrintResults = phjPrintResults)
# Catch exception passed from phjCreateNamedGroupRegex() function
except AssertionError as e:
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
else:
try:
phjDF = phjFindRegexNamedGroups(phjDF = phjDF,
phjDescriptorVarName = phjCategoryVarName,
phjNamedGroupRegexStr = phjRegexStr,
phjSeparateRegexGroups = True,
phjNumberMatchesVarName = 'numberMatches',
phjMatchedGroupVarName = phjMappedVarName,
phjUnclassifiedStr = phjUnmapped,
phjMultipleMatchStr = 'multiple',
phjCleanup = False,
phjPrintResults = phjPrintResults)
# Re-catch exceptions passed by phjFindRegexNamedGroups() function
except AssertionError as e:
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
except re.error as e:
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("Regex failed to compile in {fname}() function: {msg}\n.".format(msg = e,
fname = inspect.stack()[0][3]))
else:
# If function has been called by another function then modify message and re-raise exception
print("Regex failed to compile in {fname}() function when called by {callfname}() function: {msg}\n.".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
elif phjTreatAsRegex == False:
if phjDropPreExisting == True:
# Drop pre-existing column. In this scenario, only need to remove the
# column that has the same name as the phjMappedVarName variable
phjDF = phjDF[[c for c in list(phjDF.columns) if c not in [phjMappedVarName]]].copy()
# Not treating search strings as regexes.
# A function to reverse a dict was given in an answer by MSeifert at:
# https://stackoverflow.com/questions/35491223/inverting-a-dictionary-with-list-values
# Alternatively, use a dictionary comprehension but must be careful that no duplicates
# are found in the items in the dictionary values.
# Taken from: https://stackoverflow.com/questions/37082877/map-pandas-dataframe-columns-to-dictionary-values
phjRevDict = {v: k for k in phjMappingDict for v in phjMappingDict[k]}
if phjPrintResults == True:
print("Reversed dictionary\n")
print(phjRevDict)
print('\n')
phjDF[phjMappedVarName] = phjDF[phjCategoryVarName].map(phjRevDict).fillna(phjUnmapped)
if phjPrintResults == True:
print(phjDF)
print('\n')
finally:
return phjDF
# This function retrieves unique values of a variable from multiple dataframes and
# concatenates into a single dataframe
def phjRetrieveUniqueFromMultiDataFrames(phjDFList,
phjVarNameList,
phjSort = True,
phjPrintResults = False):
##########
# Need to check that phjDFList and phjVarNameList are lists; if strings, need to convert to lists.
# Check that columns exist.
##########
# If phjDFList entered as a dataframe then convert to a list of dataframes (with 1 item)
if isinstance(phjDFList,pd.DataFrame):
phjDFList = [phjDFList]
# If phjVarNameList is a string then convert to a list (with 1 item)
if isinstance(phjVarNameList,str):
phjVarNameList = [phjVarNameList]
try:
# phjDFList needs to be a list of dataframes. If it is confirmed to be a list
# then check that each item is a dataframe
phjAssert('phjDFList',phjDFList,list)
for df in phjDFList:
phjAssert('Element in phjDFList',df,pd.DataFrame)
# phjVarNameList needs to be a list of strings. If it is confirmed to be a list
# then check that all the listed varnames are present in each dataframe in phjDFList
phjAssert('phjVarNameList',phjVarNameList,list)
for df in phjDFList:
phjAssert('phjVarNameList',phjVarNameList,list,phjBespokeMessage = 'The variable names are not present in all dataframes.',phjMustBePresentColumnList = list(df.columns))
# Other asserts
phjAssert('phjSort',phjSort,bool)
phjAssert('phjPrintResults',phjPrintResults,bool)
except AssertionError as e:
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
else:
phjCombDF = pd.DataFrame()
for phjTempDF in phjDFList:
phjCombDF = pd.concat([phjCombDF,phjTempDF[phjVarNameList].drop_duplicates(keep = 'first')])
phjCombDF = phjCombDF[phjVarNameList].drop_duplicates(keep = 'first').reset_index(drop = True)
if phjSort == True:
phjCombDF = phjCombDF.sort_values(by = phjVarNameList).reset_index(drop = True)
if phjPrintResults == True:
for index,df in enumerate(phjDFList):
print('Unique values in dataframe at position {}'.format(index))
print(df[phjVarNameList].drop_duplicates())
print('\n')
print('Dataframe of unique values from all dataframes')
print(phjCombDF)
if phjCombDF.empty:
return None
else:
return phjCombDF
def phjCreateRowOfValuesForDtypes(phjDF,
phjDtypeDict,
phjScalarBool = True,
phjPrintResults = False):
# This function creates a dict containing column names as keys and appropriate strings
# or values for the dictionary value.
#
# The phjDtypeDict defines what values should be added to columns of specific dtypes
# such as:
# dict = {'object':'missing',
# 'int64':999}
#
# Step through each key in phjDtypeDict and add the associated value to all columns of
# that dtype.
# For the following dataframe (with 4 columns of dtypes object, int64, object, int64):
# a b c d
# 0 1 abc 101 jkl
# 1 2 def 102 mno
# 2 3 ghi 103 pqr
#
# The following dict will be produced:
# {'a':999,'b':'missing','c':999,'d':'missing'}
#
# (N.B. The same single missing value code is added to all int64 columns. Future update
# may allow a dictionary of missing value codes to be entered.)
phjOutDict = {}
for key,value in phjDtypeDict.items():
# Create a dict of column names for each dtype
phjTempDict = {col:value for col in phjDF if phjDF[col].dtype == key}
# Add phjDict1 to existing phjDict0. Code to add dictionaries taken from:
# https://stackoverflow.com/questions/6005066/adding-dictionaries-together-python
phjOutDict = {k: v for d in (phjOutDict, phjTempDict) for k, v in d.items()}
# If scalar values are not required then convert values to lists
if phjScalarBool == False:
phjOutDict = {k:[v] for k,v in phjOutDict.items()}
return phjOutDict
def phjDataFrameLowerCase(phjDF,
phjVarNameList,
phjPrintResults = False):
try:
phjAssert('phjDF',phjDF,pd.DataFrame)
phjAssert('phjVarNameList',phjVarNameList,list,phjMustBePresentColumnList = list(phjDF.columns))
phjAssert('phjPrintResults',phjPrintResults,bool)
except AssertionError as e:
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
else:
# Create a copy of the dataframe so the original is not edited
phjDF = phjDF.copy()
# Step through each column of dataframe; if dtype is an object, convert to lowercase
for phjCol in phjVarNameList:
if phjDF[phjCol].dtype == 'object':
phjDF[phjCol] = phjDF[phjCol].str.lower()
return phjDF
# This function updates the lookup tables in the database by appending any new values
# that are included in the data files
def phjUpdateLUT(phjExistDF,
phjNewDF,
phjIDName,
phjVarNameList,
phjMissStr,
phjMissCode,
phjIgnoreCase = True,
phjPrintResults = False):
##########
# Need to check that both DFs have the same structure
# Need to check that if phjVarNameList is a list of length 1 then item needs to be a string
# Need to check that missing value code doesn't clash with pre-existing id value for something that isn't 'missing'.
##########
if phjPrintResults == True:
print('Existing dataframe')
print('------------------')
print(phjExistDF)
print('\n')
print('New dataframe')
print('-------------')
print(phjNewDF)
print('\n')
# Retain only those entries in phjNewDF that don't already exist in phjExistDF
# and are not the missing value string (the missing value string and code will
# be added later if necessary).
#
# If the phjVarName parameter is a string (or a list with a single value) then consider
# the single column as a Series
if isinstance(phjVarNameList,str) | (isinstance(phjVarNameList,list) & (len(phjVarNameList) == 1)):
if isinstance(phjVarNameList,list):
phjVarNameList = phjVarNameList[0]
# This part of code creates a mask to indicate which values in new series already exist
# in old series (with missing string added)
if phjIgnoreCase == False:
phjMask = pd.Series(phjNewDF[phjVarNameList]).isin(phjExistDF[phjVarNameList].append(pd.Series(phjMissStr)))
else:
phjMask = pd.Series(phjNewDF[phjVarNameList].str.lower()).isin(phjExistDF[phjVarNameList].append(pd.Series(phjMissStr)).str.lower())
phjNewDF = phjNewDF[~phjMask].copy()
# If the phjVarName parameter is a list with greater than one value then consider as a dataframe.
elif (isinstance(phjVarNameList,list) & (len(phjVarNameList) > 1)):
# This part of the code creates a mask to determine which values in new dataframe already
# exist in old dataframe (with missing value and missing string added).
# Firstly, missing value is added to columns of type int64 and missing string is added to
# columns of type 'object'
# Define missing values to add to columns of specific dtypes
phjDtypeDict = {'object':phjMissStr,
'int64':phjMissCode}
# Create a row of data (as a dict) containing column names as keys and appropriate
# missing string or value (depending on dtype of column).
phjDict0 = phjCreateRowOfValuesForDtypes(phjDF = phjExistDF,
phjDtypeDict = phjDtypeDict,
phjScalarBool = True,
phjPrintResults = phjPrintResults)
# Create mask to determine which rows already exist. Hints to code this taken from:
# https://stackoverflow.com/questions/60836441/identify-rows-in-pandas-dataframe-that-already-exist
if phjIgnoreCase == False:
phjMask = phjNewDF.merge(phjExistDF.append(phjDict0,ignore_index = True),on=phjVarNameList,how='left',indicator=True)['_merge'].eq('both')
else:
phjMask = phjDataFrameLowerCase(phjDF = phjNewDF,
phjVarNameList = phjVarNameList).merge(phjDataFrameLowerCase(phjDF = phjExistDF.append(phjDict0,ignore_index = True),
phjVarNameList = phjVarNameList),
on= phjVarNameList,
how='left',
indicator=True)['_merge'].eq('both')
phjNewDF = phjNewDF[~phjMask].copy()
if phjPrintResults == True:
print('Dataframe of new values')
print('-----------------------')
print(phjNewDF)
print('\n')
# Create new values for 'id' column that continues on from pre-existing values but does not duplicate
# missing value code (whatever that might be).
# Create a list of existing ID values, not including missing value code, and identify the maximum value
phjIDList = [i for i in phjExistDF[phjIDName] if i not in [phjMissCode]]
# If the list is empty then max value is zero; otherwise the max value is the value returned by max()
if not phjIDList:
phjMax = 0
else:
phjMax = max(phjIDList)
# Produce list of new ID values (with a couple spare to allow missing value code to be removed
# if it occurs somewhere in the list)
phjNewIDList = [n for n in range(phjMax + 1,
phjMax + len(phjNewDF.index) + 2) if n not in [phjMissCode]]
# Number of new items to add
phjNewIDList = phjNewIDList[:len(phjNewDF.index)]
if phjPrintResults == True:
print('List of new ID values')
print('---------------------')
print(phjNewIDList)
print('\n')
# Add new ID values as new column to phjNewDF dataframe
phjNewDF[phjIDName] = phjNewIDList
if phjPrintResults == True:
print('New dataframe with new ID')
print('-------------------------')
print(phjNewDF)
print('\n')
# If missing value code and string are not present in database then add as a row in
# phjNewDF dataframe so it can be added to the database along with new items
if isinstance(phjVarNameList,str) | (isinstance(phjVarNameList,list) & (len(phjVarNameList) == 1)):
if phjExistDF.loc[(phjExistDF[phjIDName] == phjMissCode) &
(phjExistDF[phjVarNameList] == phjMissStr),[phjIDName,phjVarNameList]].empty:
phjNewDF = phjNewDF.append({phjIDName:phjMissCode,
phjVarNameList:phjMissStr},ignore_index = True)
elif (isinstance(phjVarNameList,list) & (len(phjVarNameList) > 1)):
# For the moment, just check whether the missing value code is found in the ID column
if phjExistDF.loc[phjExistDF[phjIDName] == phjMissCode,:].empty:
# Add the phjDict0 created previously as the new row
phjNewDF = phjNewDF.append(phjDict0,ignore_index = True)
phjNewDF = phjNewDF.sort_values(by = [phjIDName]).reset_index(drop = True)
if phjPrintResults == True:
print('Returned dataframe')
print('------------------')
print(phjNewDF)
print('\n')
return phjNewDF
# This function is designed to update LUT tables based on new values
# but maintaining existing id numbers
def phjUpdateLUTToLatestValues(phjDF,
phjIDVarName,
phjGroupbyVarName,
phjAddCountCol = True,
phjPrintResults = False):
######
# Check values are correctly entered
# Check 'n' column does not already exist
######
#####
# Could add different column on which to sort (e.g. date)
#####
# Add count column to indicate how many rows in each groupby group
if phjAddCountCol == True:
phjDF['n'] = phjDF.groupby(phjGroupbyVarName)[phjGroupbyVarName].transform('count')
# Keep record of column order
phjColOrder = [c for c in phjDF]
# Make sure dataframe is ordered based on grouping variable and id variable
phjDF = phjDF.sort_values(by = [phjGroupbyVarName,phjIDVarName]).copy()
if phjPrintResults == True:
if 'n' in phjDF:
print('Original sorted dataframe with count variable')
print('---------------------------------------------')
else:
print('Original sorted dataframe')
print('-------------------------')
print(phjDF)
print('\n')
# The aim of this function is to retain the first row of data (i.e. pre-existing) for the id (and name) column and
# the final row of data for everything else. This could be done using agg() function and pass a dictionary to indicate
# which colunms to extract first rows and which columns to extract the final rows, for example:
# df.groupby('name').agg({'id':'first','name':'last'})
# However, the 'first' and 'last' methods don't handle NaN values and will ignore them. Instead, use different methods,
# namely nth(0) and nth(-1). Not clear how to implement nth() methods because defining a dictionary did not work.
# Use work-around, namely divide dataframe into two sections (by column), select relevant information and rejoin.
# For phjIDVarName and phjGroupbyVarName columns, select the first rows (i.e. the rows that already exist in the dataframe)
phjOutDF_pti = phjDF[[c for c in phjDF if c in [phjIDVarName,phjGroupbyVarName]]].groupby(phjGroupbyVarName).nth(0).reset_index(drop = False)
# For all other columns (including the grouping variable), select the final row (i.e. the most recent data to be added)
phjOutDF_ptii = phjDF[[c for c in phjDF if c not in [phjIDVarName]]].groupby(phjGroupbyVarName).nth(-1).reset_index(drop = False)
# Rejoin dataframes based on grouping variable
phjOutDF = phjOutDF_pti.merge(phjOutDF_ptii, on = phjGroupbyVarName)
# Rearrange order of columns to match original
phjOutDF = phjOutDF[[c for c in phjColOrder if c in phjOutDF]]
if phjPrintResults == True:
print('Updated dataframe')
print('-----------------')
print(phjOutDF)
print('\n')
return phjOutDF
if __name__ == '__main__':
main()
|
{
"content_hash": "06e08e95242c8b8fdcae0bf150388e59",
"timestamp": "",
"source": "github",
"line_count": 1124,
"max_line_length": 242,
"avg_line_length": 53.77669039145908,
"alnum_prop": 0.5408884109521053,
"repo_name": "lvphj/epydemiology",
"id": "1e3d39b0d572d4118ca625dcd56563065341d2a6",
"size": "60447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epydemiology/phjMiscFuncs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "57778"
},
{
"name": "Python",
"bytes": "480010"
}
],
"symlink_target": ""
}
|
from primes import sieve
primes = sieve()
result = 0
for prime in primes:
if prime > 2000000:
break
result += prime
print result, prime
|
{
"content_hash": "2712f8db9d2a25e920e97221802d54ab",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 24,
"avg_line_length": 13,
"alnum_prop": 0.6474358974358975,
"repo_name": "jreese/euler",
"id": "dfb0fdad466f86ef89a27e1ee22478ae7c8b2c1d",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/problem10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "287"
},
{
"name": "C++",
"bytes": "32273"
},
{
"name": "Python",
"bytes": "36518"
},
{
"name": "Swift",
"bytes": "1320"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.cloud.vision_v1p3beta1.proto import geometry_pb2
from google.cloud.vision_v1p3beta1.proto import image_annotator_pb2
from google.cloud.vision_v1p3beta1.proto import product_search_pb2
from google.cloud.vision_v1p3beta1.proto import product_search_service_pb2
from google.cloud.vision_v1p3beta1.proto import text_annotation_pb2
from google.cloud.vision_v1p3beta1.proto import web_detection_pb2
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
from google.protobuf import wrappers_pb2
from google.rpc import status_pb2
from google.type import color_pb2
from google.type import latlng_pb2
_shared_modules = [
operations_pb2,
any_pb2,
empty_pb2,
field_mask_pb2,
timestamp_pb2,
wrappers_pb2,
status_pb2,
color_pb2,
latlng_pb2,
]
_local_modules = [
geometry_pb2,
image_annotator_pb2,
product_search_pb2,
product_search_service_pb2,
text_annotation_pb2,
web_detection_pb2,
]
names = []
for module in _shared_modules: # pragma: NO COVER
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = "google.cloud.vision_v1p3beta1.types"
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
|
{
"content_hash": "b0698552fe673c90490db16244ab4363",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 74,
"avg_line_length": 29.385964912280702,
"alnum_prop": 0.7385074626865672,
"repo_name": "tseaver/google-cloud-python",
"id": "c075adb06aa4739006fdb2692cc2ef4102f270f0",
"size": "2278",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vision/google/cloud/vision_v1p3beta1/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "30519057"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
}
|
from app import create_app, db
from app.models import Article , Category, User
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app('default')
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
{
"content_hash": "0727d2a7605434be63bc1dc574d53411",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 53,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.7283236994219653,
"repo_name": "Millyn/Flask_blog",
"id": "c4ac1d7768346d087d08190c46bb13ed830eb589",
"size": "369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "18467"
},
{
"name": "JavaScript",
"bytes": "67209"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "22670"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
import tensorboard
import torch
from monai.visualize import make_animated_gif_summary
class TestImg2Tensorboard(unittest.TestCase):
def test_write_gray(self):
nparr = np.ones(shape=(1, 32, 32, 32), dtype=np.float32)
summary_object_np = make_animated_gif_summary(
tag="test_summary_nparr.png", image=nparr, max_out=1, scale_factor=253.0
)
for s in summary_object_np:
assert isinstance(
s, tensorboard.compat.proto.summary_pb2.Summary
), "make_animated_gif_summary must return a tensorboard.summary object from numpy array"
tensorarr = torch.tensor(nparr)
summary_object_tensor = make_animated_gif_summary(
tag="test_summary_tensorarr.png", image=tensorarr, max_out=1, frame_dim=-1, scale_factor=253.0
)
for s in summary_object_tensor:
assert isinstance(
s, tensorboard.compat.proto.summary_pb2.Summary
), "make_animated_gif_summary must return a tensorboard.summary object from tensor input"
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "0e4d1485cb14ef33282643dd1e2b9fdf",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 106,
"avg_line_length": 36,
"alnum_prop": 0.6519097222222222,
"repo_name": "Project-MONAI/MONAI",
"id": "58c4d3cfabb532933b8d04403e4e8baebfa372b2",
"size": "1726",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/test_img2tensorboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15956"
},
{
"name": "C++",
"bytes": "189648"
},
{
"name": "Cuda",
"bytes": "154905"
},
{
"name": "Dockerfile",
"bytes": "2454"
},
{
"name": "Python",
"bytes": "7209898"
},
{
"name": "Shell",
"bytes": "20587"
}
],
"symlink_target": ""
}
|
"""Tests for the Google Assistant integration."""
DEMO_DEVICES = [{
'id':
'light.kitchen_lights',
'name': {
'name': 'Kitchen Lights'
},
'traits': [
'action.devices.traits.OnOff', 'action.devices.traits.Brightness',
'action.devices.traits.ColorSpectrum',
'action.devices.traits.ColorTemperature'
],
'type':
'action.devices.types.LIGHT',
'willReportState':
False
}, {
'id':
'switch.ac',
'name': {
'name': 'AC'
},
'traits': [
'action.devices.traits.OnOff'
],
'type': 'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id':
'switch.decorative_lights',
'name': {
'name': 'Decorative Lights'
},
'traits': [
'action.devices.traits.OnOff'
],
'type': 'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id':
'light.ceiling_lights',
'name': {
'name': 'Roof Lights',
'nicknames': ['top lights', 'ceiling lights']
},
'traits': [
'action.devices.traits.OnOff', 'action.devices.traits.Brightness',
'action.devices.traits.ColorSpectrum',
'action.devices.traits.ColorTemperature'
],
'type':
'action.devices.types.LIGHT',
'willReportState':
False
}, {
'id':
'light.bed_light',
'name': {
'name': 'Bed Light'
},
'traits': [
'action.devices.traits.OnOff', 'action.devices.traits.Brightness',
'action.devices.traits.ColorSpectrum',
'action.devices.traits.ColorTemperature'
],
'type':
'action.devices.types.LIGHT',
'willReportState':
False
}, {
'id': 'group.all_lights',
'name': {
'name': 'all lights'
},
'traits': ['action.devices.traits.OnOff'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id': 'group.all_switches',
'name': {
'name': 'all switches'
},
'traits': ['action.devices.traits.OnOff'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id':
'cover.living_room_window',
'name': {
'name': 'Living Room Window'
},
'traits':
['action.devices.traits.OnOff', 'action.devices.traits.Brightness'],
'type':
'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id':
'cover.hall_window',
'name': {
'name': 'Hall Window'
},
'traits':
['action.devices.traits.OnOff', 'action.devices.traits.Brightness'],
'type':
'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id': 'cover.garage_door',
'name': {
'name': 'Garage Door'
},
'traits': ['action.devices.traits.OnOff'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id': 'cover.kitchen_window',
'name': {
'name': 'Kitchen Window'
},
'traits': ['action.devices.traits.OnOff'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id': 'group.all_covers',
'name': {
'name': 'all covers'
},
'traits': ['action.devices.traits.OnOff'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id':
'media_player.bedroom',
'name': {
'name': 'Bedroom'
},
'traits':
[
'action.devices.traits.OnOff', 'action.devices.traits.Brightness',
'action.devices.traits.Modes'
],
'type':
'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id':
'media_player.living_room',
'name': {
'name': 'Living Room'
},
'traits':
[
'action.devices.traits.OnOff', 'action.devices.traits.Brightness',
'action.devices.traits.Modes'
],
'type':
'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id': 'media_player.lounge_room',
'name': {
'name': 'Lounge room'
},
'traits': ['action.devices.traits.OnOff', 'action.devices.traits.Modes'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id':
'media_player.walkman',
'name': {
'name': 'Walkman'
},
'traits':
['action.devices.traits.OnOff', 'action.devices.traits.Brightness'],
'type':
'action.devices.types.SWITCH',
'willReportState':
False
}, {
'id': 'fan.living_room_fan',
'name': {
'name': 'Living Room Fan'
},
'traits': [
'action.devices.traits.FanSpeed',
'action.devices.traits.OnOff'
],
'type': 'action.devices.types.FAN',
'willReportState': False
}, {
'id': 'fan.ceiling_fan',
'name': {
'name': 'Ceiling Fan'
},
'traits': [
'action.devices.traits.FanSpeed',
'action.devices.traits.OnOff'
],
'type': 'action.devices.types.FAN',
'willReportState': False
}, {
'id': 'group.all_fans',
'name': {
'name': 'all fans'
},
'traits': ['action.devices.traits.OnOff'],
'type': 'action.devices.types.SWITCH',
'willReportState': False
}, {
'id': 'climate.hvac',
'name': {
'name': 'Hvac'
},
'traits': ['action.devices.traits.TemperatureSetting'],
'type': 'action.devices.types.THERMOSTAT',
'willReportState': False,
'attributes': {
'availableThermostatModes': 'heat,cool,heatcool,off',
'thermostatTemperatureUnit': 'C',
},
}, {
'id': 'climate.heatpump',
'name': {
'name': 'HeatPump'
},
'traits': [
'action.devices.traits.OnOff',
'action.devices.traits.TemperatureSetting'
],
'type': 'action.devices.types.THERMOSTAT',
'willReportState': False
}, {
'id': 'climate.ecobee',
'name': {
'name': 'Ecobee'
},
'traits': ['action.devices.traits.TemperatureSetting'],
'type': 'action.devices.types.THERMOSTAT',
'willReportState': False
}, {
'id': 'lock.front_door',
'name': {
'name': 'Front Door'
},
'traits': ['action.devices.traits.LockUnlock'],
'type': 'action.devices.types.LOCK',
'willReportState': False
}, {
'id': 'lock.kitchen_door',
'name': {
'name': 'Kitchen Door'
},
'traits': ['action.devices.traits.LockUnlock'],
'type': 'action.devices.types.LOCK',
'willReportState': False
}, {
'id': 'lock.openable_lock',
'name': {
'name': 'Openable Lock'
},
'traits': ['action.devices.traits.LockUnlock'],
'type': 'action.devices.types.LOCK',
'willReportState': False
}]
|
{
"content_hash": "725784d526c0ecf8974f0435f677811e",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 78,
"avg_line_length": 24.205882352941178,
"alnum_prop": 0.5508809234507898,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "03cc327a5c51f775c3e305b5501074f0908f2bf7",
"size": "6584",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/google_assistant/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
"""
robo.tests.test_misawa_handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for robo.handlers.misawa.
:copyright: (c) 2016 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import logging
import requests
from mock import patch
from unittest import TestCase
from robo.robot import Robot
from robo.handlers.misawa import Client, Misawa
def dummy_response(m, filename=None):
response = requests.Response()
response.status_code = 200
if filename is None:
response._content = ''
else:
root_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(root_path, filename)
with open(file_path, 'r') as f:
data = f.read()
response._content = data
m.return_value = response
class NullAdapter(object):
def __init__(self, signal):
self.signal = signal
self.responses = []
def say(self, message, **kwargs):
self.responses.append(message)
return message
class TestClient(TestCase):
@classmethod
def setUpClass(cls):
cls.client = Client()
@patch('robo.handlers.misawa.requests.get')
def test_generate_url(self, m):
""" Client().generate('query') should generate search by keyword. """
dummy_response(m, './fixture.json')
ret = self.client.generate(u'\u30C9\u30E9\u30E0')
self.assertTrue(ret.startswith('http://'))
@patch('robo.handlers.misawa.requests.get')
def test_generate_url_query_is_none(self, m):
""" Client().generate(None) should generate random image. """
dummy_response(m, './fixture.json')
ret = self.client.generate()
self.assertTrue(ret.startswith('http://'))
class TestMisawaHandler(TestCase):
@classmethod
def setUpClass(cls):
logger = logging.getLogger('robo')
logger.level = logging.ERROR
cls.robot = Robot('test', logger)
misawa = Misawa()
misawa.signal = cls.robot.handler_signal
method = cls.robot.parse_handler_methods(misawa)
cls.robot.handlers.extend(method)
adapter = NullAdapter(cls.robot.handler_signal)
cls.robot.adapters['null'] = adapter
@patch('robo.handlers.misawa.requests.get')
def test_should_misawa(self, m):
""" Misawa().get() should search misawa url. """
dummy_response(m, 'fixture.json')
self.robot.handler_signal.send('test misawa')
response = self.robot.adapters['null'].responses[0]
self.assertTrue(response.startswith('http://'))
self.robot.adapters['null'].responses = []
|
{
"content_hash": "ebb11ee5428fa92fc2ab5acfe532da57",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 77,
"avg_line_length": 29.719101123595507,
"alnum_prop": 0.629867674858223,
"repo_name": "heavenshell/py-robo-misawa",
"id": "7878889163f8e14df9aab62a0f8f99696ce465e0",
"size": "2669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_misawa_handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5896"
}
],
"symlink_target": ""
}
|
from unittest import TestCase, main
from datetime import datetime
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_core.qiita_settings import qiita_config
from qiita_core.util import qiita_test_checker
import qiita_db as qdb
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
@qiita_test_checker()
class TestStudyPerson(TestCase):
def setUp(self):
self.studyperson = qdb.study.StudyPerson(1)
def test_create_studyperson(self):
new = qdb.study.StudyPerson.create(
'SomeDude', 'somedude@foo.bar', 'affil', '111 fake street',
'111-121-1313')
nid = new.id
self.assertEqual(nid, 4)
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT * FROM qiita.study_person "
"WHERE study_person_id = %d" % nid)
obs = qdb.sql_connection.TRN.execute_fetchindex()
self.assertEqual(obs, [[nid, 'SomeDude', 'somedude@foo.bar', 'affil',
'111 fake street', '111-121-1313']])
qdb.study.StudyPerson.delete(nid)
def test_delete(self):
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.study.StudyPerson.delete(1)
obs = qdb.study.StudyPerson.create(
'SomeDude', 'somedude@foo.bar', 'affil', '111 fake street',
'111-121-1313')
self.assertTrue(
qdb.study.StudyPerson.exists('SomeDude', 'affil'))
qdb.study.StudyPerson.delete(obs.id)
self.assertFalse(
qdb.study.StudyPerson.exists('SomeDude', 'affil'))
def test_retrieve_non_existant_people(self):
with self.assertRaises(qdb.exceptions.QiitaDBLookupError):
qdb.study.StudyPerson.from_name_and_affiliation('Boaty McBoatFace',
'UCSD')
p = qdb.study.StudyPerson.from_name_and_affiliation('LabDude',
'knight lab')
self.assertEqual(p.name, 'LabDude')
self.assertEqual(p.affiliation, 'knight lab')
self.assertEqual(p.address, '123 lab street')
self.assertEqual(p.phone, '121-222-3333')
self.assertEqual(p.email, 'lab_dude@foo.bar')
def test_iter(self):
"""Make sure that each and every StudyPerson is retrieved"""
expected = [
('LabDude', 'lab_dude@foo.bar', 'knight lab', '123 lab street',
'121-222-3333'),
('empDude', 'emp_dude@foo.bar', 'broad', None, '444-222-3333'),
('PIDude', 'PI_dude@foo.bar', 'Wash U', '123 PI street', None)]
for i, person in enumerate(qdb.study.StudyPerson.iter()):
self.assertEqual(person.id, i+1)
self.assertEqual(person.name, expected[i][0])
self.assertEqual(person.email, expected[i][1])
self.assertEqual(person.affiliation, expected[i][2])
self.assertEqual(person.address, expected[i][3])
self.assertEqual(person.phone, expected[i][4])
def test_exists(self):
self.assertTrue(qdb.study.StudyPerson.exists('LabDude', 'knight lab'))
self.assertFalse(qdb.study.StudyPerson.exists(
'AnotherDude', 'knight lab'))
self.assertFalse(qdb.study.StudyPerson.exists(
'LabDude', 'Another lab'))
def test_create_studyperson_already_exists(self):
obs = qdb.study.StudyPerson.create(
'LabDude', 'lab_dude@foo.bar', 'knight lab')
self.assertEqual(obs.name, 'LabDude')
self.assertEqual(obs.email, 'lab_dude@foo.bar')
def test_retrieve_name(self):
self.assertEqual(self.studyperson.name, 'LabDude')
def test_set_name_fail(self):
with self.assertRaises(AttributeError):
self.studyperson.name = 'Fail Dude'
def test_retrieve_email(self):
self.assertEqual(self.studyperson.email, 'lab_dude@foo.bar')
def test_retrieve_affiliation(self):
self.assertEqual(self.studyperson.affiliation, 'knight lab')
def test_set_email_fail(self):
with self.assertRaises(AttributeError):
self.studyperson.email = 'faildude@foo.bar'
def test_set_affiliation_fail(self):
with self.assertRaises(AttributeError):
self.studyperson.affiliation = 'squire lab'
def test_retrieve_address(self):
self.assertEqual(self.studyperson.address, '123 lab street')
def test_retrieve_address_null(self):
person = qdb.study.StudyPerson(2)
self.assertEqual(person.address, None)
def test_set_address(self):
self.studyperson.address = '123 nonsense road'
self.assertEqual(self.studyperson.address, '123 nonsense road')
def test_retrieve_phone(self):
self.assertEqual(self.studyperson.phone, '121-222-3333')
def test_retrieve_phone_null(self):
person = qdb.study.StudyPerson(3)
self.assertEqual(person.phone, None)
def test_set_phone(self):
self.studyperson.phone = '111111111111111111121'
self.assertEqual(self.studyperson.phone, '111111111111111111121')
@qiita_test_checker()
class TestStudy(TestCase):
def setUp(self):
self.study = qdb.study.Study(1)
self.portal = qiita_config.portal
self.info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"principal_investigator_id": qdb.study.StudyPerson(3),
"lab_person_id": qdb.study.StudyPerson(1)
}
self.infoexp = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"principal_investigator": qdb.study.StudyPerson(3),
"lab_person": qdb.study.StudyPerson(1),
'public_raw_download': False
}
self.existingexp = {
'mixs_compliant': True,
'metadata_complete': True,
'reprocess': False,
'funding': None,
'vamps_id': None,
'first_contact': datetime(2014, 5, 19, 16, 10),
'principal_investigator': qdb.study.StudyPerson(3),
'timeseries_type_id': 1,
'study_abstract':
"This is a preliminary study to examine the "
"microbiota associated with the Cannabis plant. Soils samples "
"from the bulk soil, soil associated with the roots, and the "
"rhizosphere were extracted and the DNA sequenced. Roots "
"from three independent plants of different strains were "
"examined. These roots were obtained November 11, 2011 from "
"plants that had been harvested in the summer. Future "
"studies will attempt to analyze the soils and rhizospheres "
"from the same location at different time points in the plant "
"lifecycle.",
'spatial_series': False,
'study_description': 'Analysis of the Cannabis Plant Microbiome',
'study_alias': 'Cannabis Soils',
'most_recent_contact': datetime(2014, 5, 19, 16, 11),
'lab_person': qdb.study.StudyPerson(1)}
def tearDown(self):
qiita_config.portal = self.portal
self._change_processed_data_status('private')
def _change_processed_data_status(self, new_status):
# Change the status of the studies by changing the status of their
# artifacts
id_status = qdb.util.convert_to_id(new_status, 'visibility')
qdb.sql_connection.perform_as_transaction(
"UPDATE qiita.artifact SET visibility_id = %s", (id_status,))
def test_from_title(self):
study = qdb.study.Study.from_title(
'Identification of the Microbiomes for Cannabis Soils')
self.assertEqual(study, qdb.study.Study(1))
with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):
qdb.study.Study.from_title('Study title')
def test_get_info(self):
# Test get all info for single study
qiita_config.portal = 'QIITA'
obs = qdb.study.Study.get_info([1])
self.assertEqual(len(obs), 1)
obs = dict(obs[0])
exp = {
'mixs_compliant': True, 'metadata_complete': True,
'reprocess': False, 'timeseries_type': 'None',
'funding': None, 'vamps_id': None, 'public_raw_download': False,
'first_contact': datetime(2014, 5, 19, 16, 10),
'principal_investigator_id': 3, 'timeseries_type_id': 1,
'publications': [{'f1': '10.100/123456', 'f2': True},
{'f1': '123456', 'f2': False},
{'f1': '10.100/7891011', 'f2': True},
{'f1': '7891011', 'f2': False}],
'study_alias': 'Cannabis Soils',
'spatial_series': False, 'notes': '',
'study_abstract': 'This is a preliminary study to examine the '
'microbiota associated with the Cannabis plant. Soils samples from'
' the bulk soil, soil associated with the roots, and the '
'rhizosphere were extracted and the DNA sequenced. Roots from '
'three independent plants of different strains were examined. '
'These roots were obtained November 11, 2011 from plants that had '
'been harvested in the summer. Future studies will attempt to '
'analyze the soils and rhizospheres from the same location at '
'different time points in the plant lifecycle.',
'study_description': 'Analysis of the Cannabis Plant Microbiome',
'intervention_type': 'None', 'email': 'test@foo.bar',
'study_id': 1,
'most_recent_contact': datetime(2014, 5, 19, 16, 11),
'lab_person_id': 1,
'study_title': 'Identification of the Microbiomes for Cannabis '
'Soils',
'ebi_submission_status': 'submitted',
'ebi_study_accession': 'EBI123456-BB',
'autoloaded': False}
self.assertDictEqual(obs, exp)
# Test get specific keys for single study
exp_keys = ['metadata_complete', 'reprocess', 'timeseries_type',
'publications', 'study_title']
obs = qdb.study.Study.get_info([1], exp_keys)
self.assertEqual(len(obs), 1)
exp = [{
'metadata_complete': True, 'reprocess': False,
'timeseries_type': 'None',
'publications': [{'f1': '10.100/123456', 'f2': True},
{'f1': '123456', 'f2': False},
{'f1': '10.100/7891011', 'f2': True},
{'f1': '7891011', 'f2': False}],
'study_title': 'Identification of the Microbiomes for Cannabis '
'Soils'}]
self.assertEqual(obs, exp)
# Test get specific keys for all studies
info = {
'timeseries_type_id': 1,
'lab_person_id': None,
'principal_investigator_id': 3,
'metadata_complete': False,
'mixs_compliant': True,
'study_description': 'desc',
'study_alias': 'alias',
'study_abstract': 'abstract'}
user = qdb.user.User('test@foo.bar')
s = qdb.study.Study.create(user, 'test_study_1', info=info)
obs = qdb.study.Study.get_info(info_cols=exp_keys)
exp = [
{'metadata_complete': True, 'reprocess': False,
'timeseries_type': 'None', 'publications': [
{'f1': '10.100/123456', 'f2': True},
{'f1': '123456', 'f2': False},
{'f1': '10.100/7891011', 'f2': True},
{'f1': '7891011', 'f2': False}],
'study_title': ('Identification of the Microbiomes for '
'Cannabis Soils')},
{'metadata_complete': False, 'reprocess': False,
'timeseries_type': 'None', 'publications': None,
'study_title': 'test_study_1'}]
self.assertEqual(obs, exp)
qdb.study.Study.delete(s.id)
# test portal restriction working
qiita_config.portal = 'EMP'
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.study.Study.get_info([1])
def test_has_access_public(self):
self._change_processed_data_status('public')
qiita_config.portal = 'QIITA'
self.assertTrue(
self.study.has_access(qdb.user.User("demo@microbio.me")))
qiita_config.portal = 'EMP'
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.study.Study(1).has_access(qdb.user.User("demo@microbio.me"))
def test_has_access_no_public(self):
self._change_processed_data_status('public')
self.assertFalse(
self.study.has_access(qdb.user.User("demo@microbio.me"), True))
def test_can_edit(self):
self.assertTrue(self.study.can_edit(qdb.user.User('test@foo.bar')))
self.assertTrue(self.study.can_edit(qdb.user.User('shared@foo.bar')))
self.assertTrue(self.study.can_edit(qdb.user.User('admin@foo.bar')))
self.assertFalse(
self.study.can_edit(qdb.user.User('demo@microbio.me')))
def test_owner(self):
self.assertEqual(self.study.owner, qdb.user.User("test@foo.bar"))
def test_autoloaded(self):
self.assertFalse(self.study.autoloaded)
self.study.autoloaded = True
self.assertTrue(self.study.autoloaded)
self.study.autoloaded = False
self.assertFalse(self.study.autoloaded)
def test_public_raw_download(self):
self.assertFalse(self.study.public_raw_download)
self.study.public_raw_download = True
self.assertTrue(self.study.public_raw_download)
self.study.public_raw_download = False
self.assertFalse(self.study.public_raw_download)
def test_share(self):
# Clear all sharing associations
self._change_processed_data_status('sandbox')
qdb.sql_connection.perform_as_transaction(
"delete from qiita.study_users")
self.assertEqual(self.study.shared_with, [])
# Try to share with the owner, which should not work
self.study.share(qdb.user.User("test@foo.bar"))
self.assertEqual(self.study.shared_with, [])
# Then share the study with shared@foo.bar
self.study.share(qdb.user.User("shared@foo.bar"))
self.assertEqual(self.study.shared_with,
[qdb.user.User("shared@foo.bar")])
def test_unshare(self):
self._change_processed_data_status('sandbox')
self.study.unshare(qdb.user.User("shared@foo.bar"))
self.assertEqual(self.study.shared_with, [])
def test_has_access_shared(self):
self._change_processed_data_status('sandbox')
self.assertTrue(self.study.has_access(qdb.user.User("shared@foo.bar")))
def test_has_access_private(self):
self._change_processed_data_status('sandbox')
self.assertTrue(self.study.has_access(qdb.user.User("test@foo.bar")))
def test_has_access_admin(self):
self._change_processed_data_status('sandbox')
self.assertTrue(self.study.has_access(qdb.user.User("admin@foo.bar")))
def test_has_access_no_access(self):
self._change_processed_data_status('sandbox')
self.assertFalse(
self.study.has_access(qdb.user.User("demo@microbio.me")))
def test_get_by_status(self):
obs = qdb.study.Study.get_by_status('sandbox')
self.assertEqual(obs, set())
s = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils',
self.info)
obs = qdb.study.Study.get_by_status('private')
self.assertEqual(obs, {qdb.study.Study(1)})
obs = qdb.study.Study.get_by_status('sandbox')
self.assertEqual(obs, {s})
obs = qdb.study.Study.get_by_status('public')
self.assertEqual(obs, set())
obs = qdb.study.Study.get_by_status('awaiting_approval')
self.assertEqual(obs, set())
qdb.study.Study.delete(s.id)
def test_exists(self):
self.assertTrue(qdb.study.Study.exists(
'Identification of the Microbiomes for Cannabis Soils'))
self.assertFalse(qdb.study.Study.exists('Not Cannabis Soils'))
def test_create_duplicate(self):
to_test = [
'Identification of the Microbiomes for Cannabis Soils',
'Identification of the Microbiomes for Cannabis Soils',
' Identification of the Microbiomes for Cannabis Soils',
'Identification of the Microbiomes for Cannabis Soils ',
' Identification of the Microbiomes for Cannabis Soils '
]
for tt in to_test:
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateError):
qdb.study.Study.create(
qdb.user.User('test@foo.bar'), tt, self.info)
def test_create_study_min_data(self):
"""Insert a study into the database"""
before = datetime.now()
obs = qdb.study.Study.create(
qdb.user.User('test@foo.bar'), "Fried chicken microbiome 1",
self.info)
after = datetime.now()
self.assertEqual(obs.status, 'sandbox')
self.assertEqual(obs.title, "Fried chicken microbiome 1")
obs_info = obs.info
insertion_timestamp = obs_info.pop('first_contact')
exp = {'mixs_compliant': True, 'metadata_complete': True,
'reprocess': False, 'public_raw_download': False,
'funding': None, 'vamps_id': None,
'principal_investigator': qdb.study.StudyPerson(3),
'timeseries_type_id': 1,
'study_abstract': 'Exploring how a high fat diet changes the '
'gut microbiome',
'spatial_series': None,
'study_description': 'Microbiome of people who eat nothing but'
' fried chicken',
'study_alias': 'FCM',
'most_recent_contact': None,
'lab_person': qdb.study.StudyPerson(1),
'notes': ''}
self.assertEqual(obs_info, exp)
# Check the timestamp separately, since it is set by the database
# to the microsecond, and we can't predict it a priori
self.assertTrue(before < insertion_timestamp < after)
self.assertEqual(obs.shared_with, [])
self.assertEqual(obs.publications, [])
self.assertEqual(obs.investigation, None)
self.assertEqual(obs.sample_template, None)
self.assertEqual(obs.data_types, [])
self.assertEqual(obs.owner, qdb.user.User('test@foo.bar'))
self.assertEqual(obs.environmental_packages, [])
self.assertEqual(obs._portals, ['QIITA'])
self.assertEqual(obs.ebi_study_accession, None)
self.assertEqual(obs.ebi_submission_status, "not submitted")
qdb.study.Study.delete(obs.id)
def test_create_nonqiita_portal(self):
qiita_config.portal = "EMP"
s = qdb.study.Study.create(
qdb.user.User('test@foo.bar'), "NEW!", self.info,
qdb.investigation.Investigation(1))
# make sure portal is associated
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(
"SELECT * from qiita.study_portal WHERE study_id = %s", [s.id])
obs = qdb.sql_connection.TRN.execute_fetchindex()
self.assertEqual(obs, [[s.id, 2], [s.id, 1]])
qdb.study.Study.delete(s.id)
def test_create_study_with_investigation(self):
"""Insert a study into the database with an investigation"""
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'), "Fried chicken microbiome 2",
self.info, qdb.investigation.Investigation(1))
# check the investigation was assigned
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(
"SELECT * from qiita.investigation_study WHERE study_id = %s",
[new.id])
obs = qdb.sql_connection.TRN.execute_fetchindex()
self.assertEqual(obs, [[1, new.id]])
# testing Study.iter()
self.assertCountEqual(list(qdb.study.Study.iter()),
[qdb.study.Study(1), new])
qdb.study.Study.delete(new.id)
def test_create_study_all_data(self):
"""Insert a study into the database with every info field"""
self.info.update({
'vamps_id': 'MBE_1111111',
'funding': 'FundAgency',
'spatial_series': True,
'metadata_complete': False,
'reprocess': True,
'first_contact': "10/24/2014 12:47PM",
'study_id': 3827,
'notes': 'an analysis was performed \n here and \n here'
})
obs = qdb.study.Study.create(
qdb.user.User('test@foo.bar'), "Fried chicken microbiome 3",
self.info)
self.assertEqual(obs.id, 3827)
self.assertEqual(obs.status, 'sandbox')
self.assertEqual(obs.title, "Fried chicken microbiome 3")
exp = {'mixs_compliant': True, 'metadata_complete': False,
'reprocess': True, 'public_raw_download': False,
'funding': 'FundAgency', 'vamps_id': 'MBE_1111111',
'first_contact': datetime(2014, 10, 24, 12, 47),
'principal_investigator': qdb.study.StudyPerson(3),
'timeseries_type_id': 1,
'study_abstract': 'Exploring how a high fat diet changes the '
'gut microbiome',
'spatial_series': True,
'study_description': 'Microbiome of people who eat nothing '
'but fried chicken',
'study_alias': 'FCM',
'most_recent_contact': None,
'lab_person': qdb.study.StudyPerson(1),
'notes': 'an analysis was performed \n here and \n here'}
self.assertEqual(obs.info, exp)
self.assertEqual(obs.shared_with, [])
self.assertEqual(obs.publications, [])
self.assertEqual(obs.investigation, None)
self.assertEqual(obs.sample_template, None)
self.assertEqual(obs.data_types, [])
self.assertEqual(obs.owner, qdb.user.User('test@foo.bar'))
self.assertEqual(obs.environmental_packages, [])
self.assertEqual(obs._portals, ['QIITA'])
self.assertEqual(obs.ebi_study_accession, None)
self.assertEqual(obs.ebi_submission_status, "not submitted")
# testing Study.iter()
self.assertCountEqual(list(qdb.study.Study.iter()),
[qdb.study.Study(1), obs])
qdb.study.Study.delete(obs.id)
def test_create_missing_required(self):
""" Insert a study that is missing a required info key"""
self.info.pop("study_alias")
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.study.Study.create(
qdb.user.User('test@foo.bar'), "Fried Chicken Microbiome 4",
self.info)
def test_create_study_with_not_allowed_key(self):
"""Insert a study with key from _non_info present"""
self.info.update({"email": "wooo@sup.net"})
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.study.Study.create(
qdb.user.User('test@foo.bar'), "Fried Chicken Microbiome 6",
self.info)
def test_create_unknown_db_col(self):
""" Insert a study with an info key not in the database"""
self.info["SHOULDNOTBEHERE"] = "BWAHAHAHAHAHA"
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.study.Study.create(
qdb.user.User('test@foo.bar'), "Fried Chicken Microbiome 7",
self.info)
def test_delete(self):
title = "Fried chicken microbiome 8"
# the study is assigned to investigation 1
study = qdb.study.Study.create(
qdb.user.User('test@foo.bar'), title, self.info,
qdb.investigation.Investigation(1))
# sharing with other user
study.share(qdb.user.User("shared@foo.bar"))
study.delete(study.id)
self.assertFalse(study.exists(title))
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.study.Study.delete(1)
with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):
qdb.study.Study.delete(41)
def test_retrieve_title(self):
self.assertEqual(self.study.title, 'Identification of the Microbiomes'
' for Cannabis Soils')
def test_set_title(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 1',
self.info)
new.title = "Cannabis soils"
self.assertEqual(new.title, "Cannabis soils")
qdb.study.Study.delete(new.id)
def test_portals(self):
self.assertEqual(self.study._portals, ['QIITA'])
def test_ebi_study_accession(self):
self.assertEqual(self.study.ebi_study_accession, 'EBI123456-BB')
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 4',
self.info)
self.assertEqual(new.ebi_study_accession, None)
qdb.study.Study.delete(new.id)
def test_ebi_study_accession_setter(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'), 'Test', self.info)
self.assertEqual(new.ebi_study_accession, None)
new.ebi_study_accession = 'EBI654321-BB'
self.assertEqual(new.ebi_study_accession, 'EBI654321-BB')
# Raises an error if the study already has an EBI study accession
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.study.ebi_study_accession = 'EBI654321-BB'
qdb.study.Study.delete(new.id)
def test_ebi_submission_status(self):
self.assertEqual(self.study.ebi_submission_status, 'submitted')
# let's test that even with a failed job nothing changes
# add a failed job for an artifact (2) that can be submitted
user = qdb.user.User('test@foo.bar')
qp = qdb.software.Software.from_name_and_version('Qiita', 'alpha')
cmd = qp.get_command('submit_to_EBI')
params = qdb.software.Parameters.load(cmd, values_dict={
'artifact': 2, 'submission_type': 'ADD'})
job = qdb.processing_job.ProcessingJob.create(user, params, True)
job._set_error('Killed by Admin')
# and just to be careful add a failed job for an artifact (1) that
# cannot be submitted
qp = qdb.software.Software.from_name_and_version('Qiita', 'alpha')
cmd = qp.get_command('submit_to_EBI')
params = qdb.software.Parameters.load(cmd, values_dict={
'artifact': 1, 'submission_type': 'ADD'})
job = qdb.processing_job.ProcessingJob.create(user, params, True)
job._set_error('Killed by Admin')
# should still return submited
self.assertEqual(self.study.ebi_submission_status, 'submitted')
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 5',
self.info)
self.assertEqual(new.ebi_submission_status, 'not submitted')
qdb.study.Study.delete(new.id)
def test_set_info(self):
"""Set info in a study"""
newinfo = {
"timeseries_type_id": 2,
"metadata_complete": False,
"lab_person_id": qdb.study.StudyPerson(2),
"vamps_id": 'MBE_111222',
'notes': 'These are my notes!!! \n ... and more notes ...'
}
self.info['first_contact'] = "6/11/2014"
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 6',
self.info)
self.infoexp.update(newinfo)
new.info = newinfo
# add missing table cols
self.infoexp["funding"] = None
self.infoexp["spatial_series"] = None
self.infoexp["most_recent_contact"] = None
self.infoexp["reprocess"] = False
self.infoexp["first_contact"] = datetime(2014, 6, 11)
self.infoexp["lab_person"] = qdb.study.StudyPerson(2)
del self.infoexp["lab_person_id"]
self.assertEqual(new.info, self.infoexp)
qdb.study.Study.delete(new.id)
def test_set_info_public(self):
"""Tests for fail if editing info of a public study"""
self.study.info = {"vamps_id": "12321312"}
def test_set_info_public_error(self):
"""Tests for fail if trying to modify timeseries of a public study"""
with self.assertRaises(qdb.exceptions.QiitaDBStatusError):
self.study.info = {"timeseries_type_id": 2}
def test_set_info_disallowed_keys(self):
"""Tests for fail if sending non-info keys in info dict"""
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 7',
self.info)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
new.info = {"email": "fail@fail.com"}
qdb.study.Study.delete(new.id)
def test_info_empty(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 8',
self.info)
with self.assertRaises(IncompetentQiitaDeveloperError):
new.info = {}
qdb.study.Study.delete(new.id)
def test_retrieve_status(self):
self.assertEqual(self.study.status, "private")
def test_retrieve_shared_with(self):
self.assertEqual(self.study.shared_with,
[qdb.user.User('shared@foo.bar')])
def test_retrieve_publications_empty(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 9',
self.info)
self.assertEqual(new.publications, [])
def test_publication_setter(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'), 'New study', self.info)
self.assertEqual(new.publications, [])
new_values = [['10.100/654321', True],
['10.100/1101987', True],
['1101987', False]]
new.publications = new_values
self.assertEqual(new.publications, new_values)
qdb.study.Study.delete(new.id)
def test_publications_setter_typeerror(self):
with self.assertRaises(TypeError):
self.study.publications = '123456'
def test_retrieve_investigation(self):
self.assertEqual(self.study.investigation,
qdb.investigation.Investigation(1))
def test_retrieve_investigation_empty(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 10',
self.info)
self.assertEqual(new.investigation, None)
qdb.study.Study.delete(new.id)
def test_retrieve_sample_template(self):
self.assertEqual(
self.study.sample_template,
qdb.metadata_template.sample_template.SampleTemplate(1))
def test_retrieve_data_types(self):
self.assertEqual(self.study.data_types, ['18S'])
def test_retrieve_data_types_none(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 11',
self.info)
self.assertEqual(new.data_types, [])
qdb.study.Study.delete(new.id)
def test_retrieve_artifacts(self):
exp = [qdb.artifact.Artifact(1),
qdb.artifact.Artifact(2),
qdb.artifact.Artifact(3),
qdb.artifact.Artifact(4),
qdb.artifact.Artifact(5),
qdb.artifact.Artifact(6),
qdb.artifact.Artifact(7)]
self.assertEqual(self.study.artifacts(), exp)
self.assertEqual(self.study.artifacts(dtype="16S"), exp[-2:])
self.assertEqual(self.study.artifacts(dtype="18S"), exp[:-2])
self.assertEqual(self.study.artifacts(artifact_type="BIOM"),
[qdb.artifact.Artifact(4),
qdb.artifact.Artifact(5),
qdb.artifact.Artifact(6),
qdb.artifact.Artifact(7)])
self.assertEqual(self.study.artifacts(dtype="18S",
artifact_type="BIOM"),
[qdb.artifact.Artifact(4),
qdb.artifact.Artifact(5)])
def test_retrieve_artifacts_none(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 12',
self.info)
self.assertEqual(new.artifacts(), [])
qdb.study.Study.delete(new.id)
def test_retrieve_prep_templates(self):
self.assertCountEqual(
self.study.prep_templates(),
[qdb.metadata_template.prep_template.PrepTemplate(1),
qdb.metadata_template.prep_template.PrepTemplate(2)])
def test_retrieve_prep_templates_none(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 13',
self.info)
self.assertEqual(new.prep_templates(), [])
qdb.study.Study.delete(new.id)
def test_analyses(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 13',
self.info)
self.assertEqual(qdb.study.Study(1).analyses(), [
qdb.analysis.Analysis(1), qdb.analysis.Analysis(2),
qdb.analysis.Analysis(3)])
self.assertEqual(qdb.study.Study(2).analyses(), [])
qdb.study.Study.delete(new.id)
def test_environmental_packages(self):
obs = self.study.environmental_packages
exp = ['soil', 'plant-associated']
self.assertEqual(sorted(obs), sorted(exp))
def test_environmental_packages_setter(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 14',
self.info)
obs = new.environmental_packages
exp = []
self.assertEqual(obs, exp)
new_values = ['air', 'human-oral']
new.environmental_packages = new_values
obs = new.environmental_packages
self.assertEqual(sorted(obs), sorted(new_values))
qdb.study.Study.delete(new.id)
def test_environmental_packages_setter_typeerror(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 15',
self.info)
with self.assertRaises(TypeError):
new.environmental_packages = 'air'
qdb.study.Study.delete(new.id)
def test_environmental_packages_setter_valueerror(self):
new = qdb.study.Study.create(
qdb.user.User('test@foo.bar'),
'NOT Identification of the Microbiomes for Cannabis Soils 16',
self.info)
with self.assertRaises(ValueError):
new.environmental_packages = ['air', 'not a package']
qdb.study.Study.delete(new.id)
def test_environmental_packages_sandboxed(self):
with self.assertRaises(qdb.exceptions.QiitaDBStatusError):
self.study.environmental_packages = ['air']
def test_study_tags(self):
# testing empty tags
obs = qdb.study.Study.get_tags()
self.assertEqual(obs, {'admin': [], 'user': []})
# inserting new tags
user = qdb.user.User('test@foo.bar')
tags = ['this is my tag', 'I want GOLD!!', 'this is my tag']
qdb.study.Study.insert_tags(user, tags)
# now as admin
admin = qdb.user.User('admin@foo.bar')
admin_tags = ['actual GOLD!', 'this is my tag']
qdb.study.Study.insert_tags(admin, admin_tags)
# testing that insertion went fine
obs = qdb.study.Study.get_tags()
exp = {'user': ['I want GOLD!!', 'this is my tag'],
'admin': ['actual GOLD!']}
self.assertEqual(obs, exp)
# assigning the tags to study as user
study = qdb.study.Study(1)
tags = ['this is my tag', 'actual GOLD!']
message = study.update_tags(user, tags)
self.assertCountEqual(study.tags, tags[:1])
self.assertEqual(message, 'Only admins can assign: actual GOLD!')
# now like admin
message = study.update_tags(admin, tags)
self.assertCountEqual(study.tags, tags)
self.assertEqual(message, '')
# cleaning tags
message = study.update_tags(user, [])
self.assertEqual(study.tags, ['actual GOLD!'])
self.assertEqual(message, 'You cannot remove: actual GOLD!')
message = study.update_tags(admin, [])
self.assertEqual(study.tags, [])
self.assertEqual(message, '')
if __name__ == "__main__":
main()
|
{
"content_hash": "e262f1a78be2b64292f8f94055be08b7",
"timestamp": "",
"source": "github",
"line_count": 911,
"max_line_length": 79,
"avg_line_length": 42.50054884742042,
"alnum_prop": 0.5911204091120409,
"repo_name": "antgonza/qiita",
"id": "23663046d21b3eaca58039e45ae0475d41c57aae",
"size": "38718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiita_db/test/test_study.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2334"
},
{
"name": "HTML",
"bytes": "983129"
},
{
"name": "JavaScript",
"bytes": "95318"
},
{
"name": "Makefile",
"bytes": "6838"
},
{
"name": "PLpgSQL",
"bytes": "87575"
},
{
"name": "Python",
"bytes": "2615829"
},
{
"name": "Shell",
"bytes": "3016"
}
],
"symlink_target": ""
}
|
from testinfra.utils.ansible_runner import AnsibleRunner
testinfra_hosts = AnsibleRunner('.molecule/ansible_inventory').get_hosts('all')
def test_zabbixagent_running_and_enabled(Service, SystemInfo):
zabbixagent = Service("zabbix-agent")
assert zabbixagent.is_running
# Find out why it fails on debian
if SystemInfo.distribution != 'debian':
assert zabbixagent.is_enabled
def test_zabbix_agent_dot_conf(File):
passwd = File("/etc/zabbix/zabbix_agentd.conf")
assert passwd.user == "root"
assert passwd.group == "root"
assert passwd.mode == 0o644
assert passwd.contains("Server=192.168.3.33")
assert passwd.contains("ServerActive=192.168.3.33")
assert passwd.contains("ListenIP=0.0.0.0")
assert passwd.contains("DebugLevel=3")
def test_zabbix_include_dir(File):
zabbixagent = File("/etc/zabbix/zabbix_agentd.d")
assert zabbixagent.is_directory
assert zabbixagent.user == "root"
assert zabbixagent.group == "root"
def test_socker(Socket):
assert Socket("tcp://0.0.0.0:10050").is_listening
def test_zabbix_package(Package, SystemInfo):
zabbixagent = Package('zabbix-agent')
assert zabbixagent.is_installed
if SystemInfo.distribution == 'debian':
assert zabbixagent.version.startswith("1:3.2")
if SystemInfo.distribution == 'centos':
assert zabbixagent.version.startswith("3.2")
|
{
"content_hash": "2f15fbd3a098d318a723dfe2b15e9c56",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 79,
"avg_line_length": 31.704545454545453,
"alnum_prop": 0.7089605734767025,
"repo_name": "bititanb/ansible-taskmngr",
"id": "874c51ca274778ebae37063734f9b4358079b72c",
"size": "1395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roles/dj-wasabi.zabbix-agent/tests/test_docker.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Groovy",
"bytes": "973"
},
{
"name": "Python",
"bytes": "3807"
},
{
"name": "Ruby",
"bytes": "1970"
},
{
"name": "Shell",
"bytes": "3301"
}
],
"symlink_target": ""
}
|
import json
from webob import Request
class PrintJsonMiddleware(object):
""" JSON prettyprinter WSGI middleware.
If a special query parameter (by default "pj", for "Print JSON")
is found in the query string, assume the response body contains JSON data
which should be "printed" to the response as text/plain.
If the parameter has no value, the body is not touched (only the
Content-Type header changes).
If the parameter has a positive integer value, prettyprint the JSON
(sort the keys and treat the parm value as an indent value).
Note that the parameter is removed from the request query string
before the middleware invokes the wrapped application.
"""
def __init__(self, app, parm='pj'):
self.app = app
self.parm = parm
def __call__(self, environ, start_response):
pretty = False
indent = 0
req = Request(environ)
if self.parm in req.GET:
pretty = True
try:
indent = int(req.GET.get(self.parm, 0))
except:
pass
# Rewrite the query_string removing parm.
parms = []
for parm in req.query_string.split('&'):
name = parm.split('=')[0]
if name != self.parm:
parms.append(parm)
req.query_string = '&'.join(parms)
resp = req.get_response(self.app)
if pretty:
resp.content_type = 'text/plain'
if indent > 0:
resp.body = json.dumps(json.loads(resp.body),
indent=indent,
sort_keys=True)
return resp(environ, start_response)
def filter_factory(conf, **kwargs):
"""
Factory for creating :mod:`paste` filters. Full documentation can be found
in `the paste docs <http://pythonpaste.org/deploy/#paste-filter-factory>`_.
"""
def filter(app):
return PrintJsonMiddleware(app, **kwargs)
return filter
def filter_app_factory(app, conf, **kwargs):
"""
Creates a single :mod:`paste` filter. Full documentation can be found in
`the paste docs <http://pythonpaste.org/deploy/#paste-filter-factory>`_.
"""
return PrintJsonMiddleware(app, **kwargs)
|
{
"content_hash": "07e40b7ece40943dcaf62259df1cdc17",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 79,
"avg_line_length": 37.064516129032256,
"alnum_prop": 0.5918189730200174,
"repo_name": "sbrauer/print_json_middleware",
"id": "5a755b46deec2acf1cb32448b23c9d82e92a14c3",
"size": "2298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "print_json_middleware/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5328"
}
],
"symlink_target": ""
}
|
import sys
if "--noxp" in sys.argv:
import win32gui
else:
import winxpgui as win32gui
import win32gui_struct
import win32api
import win32con, winerror
import struct, array
import commctrl
import Queue
import os
IDC_SEARCHTEXT = 1024
IDC_BUTTON_SEARCH = 1025
IDC_BUTTON_DISPLAY = 1026
IDC_LISTBOX = 1027
WM_SEARCH_RESULT = win32con.WM_USER + 512
WM_SEARCH_FINISHED = win32con.WM_USER + 513
class _WIN32MASKEDSTRUCT:
def __init__(self, **kw):
full_fmt = ""
for name, fmt, default, mask in self._struct_items_:
self.__dict__[name] = None
if fmt == "z":
full_fmt += "pi"
else:
full_fmt += fmt
for name, val in kw.iteritems():
if name not in self.__dict__:
raise ValueError("LVITEM structures do not have an item '%s'" % (name,))
self.__dict__[name] = val
def __setattr__(self, attr, val):
if not attr.startswith("_") and attr not in self.__dict__:
raise AttributeError(attr)
self.__dict__[attr] = val
def toparam(self):
self._buffs = []
full_fmt = ""
vals = []
mask = 0
# calc the mask
for name, fmt, default, this_mask in self._struct_items_:
if this_mask is not None and self.__dict__.get(name) is not None:
mask |= this_mask
self.mask = mask
for name, fmt, default, this_mask in self._struct_items_:
val = self.__dict__[name]
if fmt == "z":
fmt = "Pi"
if val is None:
vals.append(0)
vals.append(0)
else:
# Note this demo still works with byte strings. An
# alternate strategy would be to use unicode natively
# and use the 'W' version of the messages - eg,
# LVM_SETITEMW etc.
val = val + "\0"
if isinstance(val, unicode):
val = val.encode("mbcs")
str_buf = array.array("b", val)
vals.append(str_buf.buffer_info()[0])
vals.append(len(val))
self._buffs.append(str_buf) # keep alive during the call.
else:
if val is None:
val = default
vals.append(val)
full_fmt += fmt
return struct.pack(*(full_fmt,) + tuple(vals))
# NOTE: See the win32gui_struct module for an alternative way of dealing
# with these structures
class LVITEM(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("iItem", "i", 0, None),
("iSubItem", "i", 0, None),
("state", "I", 0, commctrl.LVIF_STATE),
("stateMask", "I", 0, None),
("text", "z", None, commctrl.LVIF_TEXT),
("iImage", "i", 0, commctrl.LVIF_IMAGE),
("lParam", "i", 0, commctrl.LVIF_PARAM),
("iIdent", "i", 0, None),
]
class LVCOLUMN(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("fmt", "i", 0, commctrl.LVCF_FMT),
("cx", "i", 0, commctrl.LVCF_WIDTH),
("text", "z", None, commctrl.LVCF_TEXT),
("iSubItem", "i", 0, commctrl.LVCF_SUBITEM),
("iImage", "i", 0, commctrl.LVCF_IMAGE),
("iOrder", "i", 0, commctrl.LVCF_ORDER),
]
class DemoWindowBase:
def __init__(self):
win32gui.InitCommonControls()
self.hinst = win32gui.dllhandle
self.list_data = {}
def _RegisterWndClass(self):
className = "PythonDocSearch"
message_map = {}
wc = win32gui.WNDCLASS()
wc.SetDialogProc() # Make it a dialog class.
wc.hInstance = self.hinst
wc.lpszClassName = className
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor( 0, win32con.IDC_ARROW )
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.lpfnWndProc = message_map # could also specify a wndproc.
# C code: wc.cbWndExtra = DLGWINDOWEXTRA + sizeof(HBRUSH) + (sizeof(COLORREF));
wc.cbWndExtra = win32con.DLGWINDOWEXTRA + struct.calcsize("Pi")
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
## py.ico went away in python 2.5, load from executable instead
this_app=win32api.GetModuleHandle(None)
try:
wc.hIcon=win32gui.LoadIcon(this_app, 1) ## python.exe and pythonw.exe
except win32gui.error:
wc.hIcon=win32gui.LoadIcon(this_app, 135) ## pythonwin's icon
try:
classAtom = win32gui.RegisterClass(wc)
except win32gui.error, err_info:
if err_info.winerror!=winerror.ERROR_CLASS_ALREADY_EXISTS:
raise
return className
def _GetDialogTemplate(self, dlgClassName):
style = win32con.WS_THICKFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT | win32con.WS_MINIMIZEBOX
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
title = "Dynamic Dialog Demo"
# Window frame and title
dlg = [ [title, (0, 0, 210, 250), style, None, (8, "MS Sans Serif"), None, dlgClassName], ]
# ID label and text box
dlg.append([130, "Enter something", -1, (5, 5, 200, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, IDC_SEARCHTEXT, (5, 15, 200, 12), s])
# Search/Display Buttons
# (x positions don't matter here)
s = cs | win32con.WS_TABSTOP
dlg.append([128, "Fill List", IDC_BUTTON_SEARCH, (5, 35, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "Display", IDC_BUTTON_DISPLAY, (100, 35, 50, 14), s])
# List control.
# Can't make this work :(
## s = cs | win32con.WS_TABSTOP
## dlg.append(['SysListView32', "Title", IDC_LISTBOX, (5, 505, 200, 200), s])
return dlg
def _DoCreate(self, fn):
message_map = {
win32con.WM_SIZE: self.OnSize,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_NOTIFY: self.OnNotify,
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_CLOSE: self.OnClose,
win32con.WM_DESTROY: self.OnDestroy,
WM_SEARCH_RESULT: self.OnSearchResult,
WM_SEARCH_FINISHED: self.OnSearchFinished,
}
dlgClassName = self._RegisterWndClass()
template = self._GetDialogTemplate(dlgClassName)
return fn(self.hinst, template, 0, message_map)
def _SetupList(self):
child_style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_BORDER | win32con.WS_HSCROLL | win32con.WS_VSCROLL
child_style |= commctrl.LVS_SINGLESEL | commctrl.LVS_SHOWSELALWAYS | commctrl.LVS_REPORT
self.hwndList = win32gui.CreateWindow("SysListView32", None, child_style, 0, 0, 100, 100, self.hwnd, IDC_LISTBOX, self.hinst, None)
child_ex_style = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETEXTENDEDLISTVIEWSTYLE, 0, 0)
child_ex_style |= commctrl.LVS_EX_FULLROWSELECT
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETEXTENDEDLISTVIEWSTYLE, 0, child_ex_style)
# Add an image list - use the builtin shell folder icon - this
# demonstrates the problem with alpha-blending of icons on XP if
# winxpgui is not used in place of win32gui.
il = win32gui.ImageList_Create(
win32api.GetSystemMetrics(win32con.SM_CXSMICON),
win32api.GetSystemMetrics(win32con.SM_CYSMICON),
commctrl.ILC_COLOR32 | commctrl.ILC_MASK,
1, # initial size
0) # cGrow
shell_dll = os.path.join(win32api.GetSystemDirectory(), "shell32.dll")
large, small = win32gui.ExtractIconEx(shell_dll, 4, 1)
win32gui.ImageList_ReplaceIcon(il, -1, small[0])
win32gui.DestroyIcon(small[0])
win32gui.DestroyIcon(large[0])
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETIMAGELIST,
commctrl.LVSIL_SMALL, il)
# Setup the list control columns.
lvc = LVCOLUMN(mask = commctrl.LVCF_FMT | commctrl.LVCF_WIDTH | commctrl.LVCF_TEXT | commctrl.LVCF_SUBITEM)
lvc.fmt = commctrl.LVCFMT_LEFT
lvc.iSubItem = 1
lvc.text = "Title"
lvc.cx = 200
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
lvc.iSubItem = 0
lvc.text = "Order"
lvc.cx = 50
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
win32gui.UpdateWindow(self.hwnd)
def ClearListItems(self):
win32gui.SendMessage(self.hwndList, commctrl.LVM_DELETEALLITEMS)
self.list_data = {}
def AddListItem(self, data, *columns):
num_items = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETITEMCOUNT)
item = LVITEM(text=columns[0], iItem = num_items)
new_index = win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTITEM, 0, item.toparam())
col_no = 1
for col in columns[1:]:
item = LVITEM(text=col, iItem = new_index, iSubItem = col_no)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETITEM, 0, item.toparam())
col_no += 1
self.list_data[new_index] = data
def OnInitDialog(self, hwnd, msg, wparam, lparam):
self.hwnd = hwnd
# centre the dialog
desktop = win32gui.GetDesktopWindow()
l,t,r,b = win32gui.GetWindowRect(self.hwnd)
dt_l, dt_t, dt_r, dt_b = win32gui.GetWindowRect(desktop)
centre_x, centre_y = win32gui.ClientToScreen( desktop, ( (dt_r-dt_l)//2, (dt_b-dt_t)//2) )
win32gui.MoveWindow(hwnd, centre_x-(r//2), centre_y-(b//2), r-l, b-t, 0)
self._SetupList()
l,t,r,b = win32gui.GetClientRect(self.hwnd)
self._DoSize(r-l,b-t, 1)
def _DoSize(self, cx, cy, repaint = 1):
# right-justify the textbox.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_SEARCHTEXT)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
win32gui.MoveWindow(ctrl, l, t, cx-l-5, b-t, repaint)
# The button.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_BUTTON_DISPLAY)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
list_y = b + 10
w = r - l
win32gui.MoveWindow(ctrl, cx - 5 - w, t, w, b-t, repaint)
# The list control
win32gui.MoveWindow(self.hwndList, 0, list_y, cx, cy-list_y, repaint)
# The last column of the list control.
new_width = cx - win32gui.SendMessage(self.hwndList, commctrl.LVM_GETCOLUMNWIDTH, 0)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETCOLUMNWIDTH, 1, new_width)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self._DoSize(x,y)
return 1
def OnSearchResult(self, hwnd, msg, wparam, lparam):
try:
while 1:
params = self.result_queue.get(0)
self.AddListItem(*params)
except Queue.Empty:
pass
def OnSearchFinished(self, hwnd, msg, wparam, lparam):
print "OnSearchFinished"
def OnNotify(self, hwnd, msg, wparam, lparam):
info = win32gui_struct.UnpackNMITEMACTIVATE(lparam)
if info.code == commctrl.NM_DBLCLK:
print "Double click on item", info.iItem+1
return 1
def OnCommand(self, hwnd, msg, wparam, lparam):
id = win32api.LOWORD(wparam)
if id == IDC_BUTTON_SEARCH:
self.ClearListItems()
def fill_slowly(q, hwnd):
import time
for i in range(20):
q.put(("whatever", str(i+1), "Search result " + str(i) ))
win32gui.PostMessage(hwnd, WM_SEARCH_RESULT, 0, 0)
time.sleep(.25)
win32gui.PostMessage(hwnd, WM_SEARCH_FINISHED, 0, 0)
import threading
self.result_queue = Queue.Queue()
thread = threading.Thread(target = fill_slowly, args=(self.result_queue, self.hwnd) )
thread.start()
elif id == IDC_BUTTON_DISPLAY:
print "Display button selected"
sel = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETNEXTITEM, -1, commctrl.LVNI_SELECTED)
print "The selected item is", sel+1
# These function differ based on how the window is used, so may be overridden
def OnClose(self, hwnd, msg, wparam, lparam):
raise NotImplementedError
def OnDestroy(self, hwnd, msg, wparam, lparam):
pass
# An implementation suitable for use with the Win32 Window functions (ie, not
# a true dialog)
class DemoWindow(DemoWindowBase):
def CreateWindow(self):
# Create the window via CreateDialogBoxIndirect - it can then
# work as a "normal" window, once a message loop is established.
self._DoCreate(win32gui.CreateDialogIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.DestroyWindow(hwnd)
# We need to arrange to a WM_QUIT message to be sent to our
# PumpMessages() loop.
def OnDestroy(self, hwnd, msg, wparam, lparam):
win32gui.PostQuitMessage(0) # Terminate the app.
# An implementation suitable for use with the Win32 Dialog functions.
class DemoDialog(DemoWindowBase):
def DoModal(self):
return self._DoCreate(win32gui.DialogBoxIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.EndDialog(hwnd, 0)
def DemoModal():
w=DemoDialog()
w.DoModal()
def DemoCreateWindow():
w=DemoWindow()
w.CreateWindow()
# PumpMessages runs until PostQuitMessage() is called by someone.
win32gui.PumpMessages()
if __name__=='__main__':
DemoModal()
DemoCreateWindow()
|
{
"content_hash": "acb9ccad942f0b6f424e4588fd79a746",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 172,
"avg_line_length": 40.00840336134454,
"alnum_prop": 0.5939228453406147,
"repo_name": "PopCap/GameIdea",
"id": "4cf1fb13bb7b2249d5d107fb7ecd6197c03ba269",
"size": "14824",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "Engine/Source/ThirdParty/HTML5/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/win32/Demos/win32gui_dialog.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "238055"
},
{
"name": "Assembly",
"bytes": "184134"
},
{
"name": "Batchfile",
"bytes": "116983"
},
{
"name": "C",
"bytes": "84264210"
},
{
"name": "C#",
"bytes": "9612596"
},
{
"name": "C++",
"bytes": "242290999"
},
{
"name": "CMake",
"bytes": "548754"
},
{
"name": "CSS",
"bytes": "134910"
},
{
"name": "GLSL",
"bytes": "96780"
},
{
"name": "HLSL",
"bytes": "124014"
},
{
"name": "HTML",
"bytes": "4097051"
},
{
"name": "Java",
"bytes": "757767"
},
{
"name": "JavaScript",
"bytes": "2742822"
},
{
"name": "Makefile",
"bytes": "1976144"
},
{
"name": "Objective-C",
"bytes": "75778979"
},
{
"name": "Objective-C++",
"bytes": "312592"
},
{
"name": "PAWN",
"bytes": "2029"
},
{
"name": "PHP",
"bytes": "10309"
},
{
"name": "PLSQL",
"bytes": "130426"
},
{
"name": "Pascal",
"bytes": "23662"
},
{
"name": "Perl",
"bytes": "218656"
},
{
"name": "Python",
"bytes": "21593012"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "2889614"
},
{
"name": "Tcl",
"bytes": "1452"
}
],
"symlink_target": ""
}
|
from corehq.motech.repeaters.utils import RepeaterMigrationHelper
from corehq.motech.dhis2.repeaters import SQLDhis2EntityRepeater
class Command(RepeaterMigrationHelper):
@classmethod
def couch_doc_type(cls):
return 'Dhis2EntityRepeater'
@classmethod
def sql_class(cls):
return SQLDhis2EntityRepeater
@classmethod
def _get_string_props(cls):
return ["version", "include_app_id_param"]
@classmethod
def _get_list_props(cls):
return ['white_listed_case_types', 'black_listed_users']
@classmethod
def _get_schema_props(cls):
return ['dhis2_entity_config']
def get_sql_options_obj(self, doc):
return {
"options": {
"include_app_id_param": doc.get("include_app_id_param"),
"dhis2_entity_config": doc.get("dhis2_entity_config"),
"version": doc.get("version"),
"white_listed_case_types": doc.get("white_listed_case_types"),
"black_listed_users": doc.get("black_listed_users")
}
}
|
{
"content_hash": "4f7271f0f185f6c28ca131c0b7b61860",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 30.13888888888889,
"alnum_prop": 0.6156682027649769,
"repo_name": "dimagi/commcare-hq",
"id": "35d8ef95ca15ace26821b21cab37b69c01fbdb37",
"size": "1085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/motech/repeaters/management/commands/migrate_dhis2entityrepeater.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
import getpass
import mycuinfo
# fix for python 3.x
try:
raw_input = input
except NameError:
pass
# define the username & password for the cuSession
user0 = raw_input("username: ")
pass0 = getpass.getpass("password: ")
# create the cuLog Session
cu_student = mycuinfo.CUSession(user0, pass0)
# if the loggin session is a valid one
if cu_student.valid:
# example of how to get the books from CSCI 2700, Section 010 (Fall 2014)
print(cu_student.books("CSCI", "2270", "010", term=2167))
# example of how to get the info of the user
print(cu_student.info())
# example of how to get the classes of the user (Fall 2014)
print(cu_student.classes())
# example of how to get the GPA of the user
print(cu_student.GPA())
else:
print("Bad user. Check the username/password")
|
{
"content_hash": "83513dcc15ba5fc77a658e2da5819951",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 24.727272727272727,
"alnum_prop": 0.6911764705882353,
"repo_name": "719Ben/myCUinfo-API",
"id": "1712ba97b14b096c6c2db4bf808cfcafdc4f023d",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13120"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import os
import pytest
from IPython.display import display
from psd_tools.psd.descriptor import (
TYPES, Descriptor, Reference, Double, String, Bool, LargeInteger, Integer,
UnitFloat
)
from psd_tools.terminology import Unit
from ..utils import check_write_read, check_read_write, TEST_ROOT
DESCRIPTOR_DATA = ['0.dat', '1.dat']
@pytest.mark.parametrize('cls', [TYPES[key] for key in TYPES])
def test_empty_wr(cls):
check_write_read(cls())
@pytest.mark.parametrize('filename', DESCRIPTOR_DATA)
def test_descriptor_rw(filename):
filepath = os.path.join(TEST_ROOT, 'descriptors', filename)
with open(filepath, 'rb') as f:
check_read_write(Descriptor, f.read())
@pytest.mark.parametrize('filename', DESCRIPTOR_DATA)
def test_descriptor_display(filename):
filepath = os.path.join(TEST_ROOT, 'descriptors', filename)
with open(filepath, 'rb') as f:
value = Descriptor.frombytes(f.read())
display(value)
@pytest.mark.parametrize(
'fixture', [(
b'\x00\x00\x00\x01name\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00name\x00'
b'\x00\x00\x030j0W\x00\x00'
)]
)
def test_reference_rw(fixture):
check_read_write(Reference, fixture)
@pytest.mark.parametrize(
'kls, value', [
(Double, 1.),
(String, ''),
(Bool, True),
(LargeInteger, 1),
(Integer, 1),
]
)
def test_value_elements(kls, value):
fixture = kls(value)
assert fixture == value
@pytest.mark.parametrize(
'unit, value', [
(Unit.Pixels, 100.0),
(Unit.Points, 0.0),
]
)
def test_unit_float(unit, value):
fixture = UnitFloat(unit=unit, value=value)
assert fixture == value
assert fixture + 1.0
assert isinstance(float(fixture), float)
@pytest.mark.parametrize('fixture', [
b'RrCm\x00\x00\x00\x00\x00\x00\x00\x00',
b'#Pxl\x00\x00\x00\x00\x00\x00\x00\x00',
])
def test_unit_float_enum(fixture):
unitfloat = UnitFloat.frombytes(fixture)
|
{
"content_hash": "d1ac8a48e4a94550eed7e360e345ac70",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 25.620253164556964,
"alnum_prop": 0.6635375494071146,
"repo_name": "psd-tools/psd-tools",
"id": "c8c1a8968766902dce585d6d2d777309f2421335",
"size": "2024",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/psd_tools/psd/test_descriptor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "3590"
},
{
"name": "Python",
"bytes": "572455"
}
],
"symlink_target": ""
}
|
import acq4.pyqtgraph as pg
import numpy as np
import acq4.pyqtgraph.opengl as gl
import acq4.Manager
import acq4.analysis.atlas.CochlearNucleus as cn
man = acq4.Manager.getManager()
initialized = False
def __reload__(old): ## re-use existing objects if module is reloaded
global initialized, w, v, atlas
initialized = True
w = old['w']
v = old['v']
atlas = old['atlas']
if not initialized:
atlas = cn.CochlearNucleus()
w = pg.GraphicsWindow()
w.setRenderHints(pg.QtGui.QPainter.Antialiasing | pg.QtGui.QPainter.TextAntialiasing | pg.QtGui.QPainter.SmoothPixmapTransform)
w.setBackground(pg.mkColor('w'))
v = w.addViewBox()
v.setAspectLocked()
v.invertY()
w.show()
initialized = True
def show(dh=None):
"""
Display a graphic of the currently selected slice / cell
"""
global v, g, atlas
if dh is None:
dh = man.currentFile
v.clear()
if 'cell' in dh.shortName().lower():
cd = dh
sd = cd.parent()
else:
sd = dh
cd = None
atlas.loadState(sd)
g = atlas.schematicGraphicsItems()
v.addItem(g)
if cd is not None:
## small image to go over slice schematic
imgf = cd['morphology.png']
imgd = pg.colorToAlpha(imgf.read(), np.array([255,255,255]))
mimg1 = pg.ImageItem(imgd)
tr = pg.SRTTransform(imgf.info()['userTransform'])
mimg1.setTransform(tr)
mimg1.setParentItem(g.sliceGroup)
g.cellImg1 = mimg1
## larger image to be displayed above
cellGroup = pg.ItemGroup()
g.cellGroup = cellGroup
mimg2 = pg.ImageItem(imgd)
mimg2.setParentItem(cellGroup)
mimg2.setTransform(tr * g.sliceGroup.transform())
mimg2.scale(1.0 / g.sliceScaleFactor, 1.0 / g.sliceScaleFactor)
#angle = pg.SRTTransform(g.sliceGroup.transform()).getRotation()
#mimg2.rotate(angle)
cellScale = 50.
cellGroup.scale(cellScale, cellScale)
g.cellImg2 = mimg2
## reposition image above slice schematic
b1 = g.atlasGroup.mapRectToParent(g.atlasGroup.childrenBoundingRect())
b2 = g.sliceClip.mapRectToParent(g.sliceClip.boundingRect())
bounds = b1 | b2
cellGroup.setParentItem(g)
imgBounds = g.mapRectFromItem(mimg2, mimg2.boundingRect())
pos = pg.Point(bounds.center().x() - imgBounds.center().x(), bounds.top()-imgBounds.bottom())
cellGroup.setPos(pos)
## add scale bar
sbLength = 25e-6
g.cellScale = pg.QtGui.QGraphicsLineItem(0.0, 0.0, sbLength, 0.0)
g.cellScale.setPen(pg.mkPen(color=0.0, width=100e-6/cellScale, cosmetic=False))
g.cellScale.setParentItem(cellGroup)
g.cellScale.setZValue(10)
g.cellScale.text = pg.TextItem(u"25 µm", anchor=(0.5, 1), color=(0,0,0))
g.cellScale.text.setParentItem(g.cellScale)
g.cellScale.text.setPos(sbLength*0.5, -50e-6/cellScale)
corner = mimg2.mapToParent(mimg2.boundingRect()).boundingRect().bottomRight()
g.cellScale.setPos(corner + pg.Point(-sbLength/2., -sbLength/3.))
cell = dh
sl = cell.parent()
day = sl.parent()
name = day.shortName() + "_" + sl.shortName() + "_" + cell.shortName()
g.cellName = pg.TextItem(name, color=(0,0,0))
g.cellName.setParentItem(cellGroup)
g.cellName.setPos(corner + pg.Point(-sbLength*4,-sbLength/4.))
## auto-range the view
#bounds = bounds | g.mapFromItem(mimg2, mimg2.boundingRect()).boundingRect()
#v.setRange(bounds)
def showMap(dh=None):
"""
Display a graphic of an input map for the currently selected cell
"""
global v, g, atlas
if dh is None:
dh = man.currentFile
db = man.getModule('Data Manager').currentDatabase()
v.clear()
cd = dh
sd = cd.parent()
atlas.loadState(sd)
g = atlas.schematicGraphicsItems(contours=False, sliceScale=10, cellDir=cd)
v.addItem(g)
cellGroup = pg.ItemGroup()
g.cellGroup = cellGroup
cellScale = 10.
cellGroup.scale(cellScale, cellScale)
cellGroup.setParentItem(g)
g.atlasScale.hide()
g.arrowGroup.hide()
## reposition/rescale atlas group
b1 = g.atlasGroup.mapRectToParent(g.atlasGroup.childrenBoundingRect())
b2 = g.sliceClip.mapRectToParent(g.sliceClip.boundingRect())
g.atlasGroup.setPos(b2.right()-b1.left()+0.001, b2.top()-b1.top())
b1 = g.atlasGroup.mapRectToParent(g.atlasGroup.childrenBoundingRect())
bounds = b1 | b2
if cd.exists('morphology.png'):
## small image to go over slice schematic
imgf = cd['morphology.png']
imgd = pg.colorToAlpha(imgf.read(), np.array([255,255,255]))
mimg1 = pg.ImageItem(imgd)
tr = pg.SRTTransform(imgf.info()['userTransform'])
mimg1.setTransform(tr)
mimg1.setParentItem(g.sliceGroup)
mimg1.setZValue(100)
g.cellImg1 = mimg1
## larger image to be displayed above
mimg2 = pg.ImageItem(imgd)
mimg2.setParentItem(cellGroup)
mimg2.setTransform(tr * g.sliceGroup.transform())
mimg2.scale(1.0 / g.sliceScaleFactor, 1.0 / g.sliceScaleFactor)
#angle = pg.SRTTransform(g.sliceGroup.transform()).getRotation()
#mimg2.rotate(angle)
g.cellImg2 = mimg2
cellGroup.scale(5,5)
## reposition next to slice schematic
imgBounds = g.mapRectFromItem(mimg2, mimg2.boundingRect())
pos = pg.Point(bounds.right()-imgBounds.left(), bounds.bottom()-imgBounds.bottom())
cellGroup.setPos(pos)
## add scale bar
sbLength = 50e-6
g.cellScale = pg.QtGui.QGraphicsLineItem(0.0, 0.0, sbLength, 0.0)
g.cellScale.setPen(pg.mkPen(color=0.0, width=5))
g.cellScale.setZValue(10)
g.cellScale.text = pg.TextItem(u"%d µm" % int(sbLength*1e6), anchor=(0.5, 1), color=(0,0,0))
g.cellScale.text.setParentItem(g.cellScale)
g.cellScale.text.setPos(sbLength*0.5, -50e-6/cellScale)
#g.cellScale = pg.ScaleBar(sbLength)
g.cellScale.setParentItem(cellGroup)
corner = mimg2.mapToParent(mimg2.boundingRect()).boundingRect().bottomRight()
g.cellScale.setPos(corner + pg.Point(-sbLength/2., -sbLength/3.))
pos = pg.SRTTransform(cd.info()['userTransform']).map(pg.Point(0,0))
size = pg.Point(30e-6, 30e-6)
g.cellMarker = pg.QtGui.QGraphicsEllipseItem(pg.QtCore.QRectF(pos-size, pos+size))
g.cellMarker.setBrush(pg.mkBrush(100,100,255,150))
g.cellMarker.setPen(pg.mkPen('k', width=0.5))
g.cellMarker.setParentItem(g.sliceGroup)
g.cellMarker.setZValue(90)
sites = db.select('map_site_view', ['ProtocolDir', 'HasInput'], where={'CellDir': cd})
if len(sites) > 0:
tr = sites[0]['ProtocolDir'].parent().info().get('userTransform', None)
if tr is None:
tr = pg.SRTTransform()
else:
tr = pg.SRTTransform(tr)
pos = []
size = sites[0]['ProtocolDir'].info()['Scanner']['spotSize']
brushes = []
for site in sites:
pd = site['ProtocolDir']
x,y = pd.info()['Scanner']['position']
p2 = tr.map(pg.Point(x,y))
pos.append((p2.x(), p2.y()))
if site['HasInput']:
brushes.append(pg.mkBrush('w'))
else:
brushes.append(pg.mkBrush(None))
inputMap = pg.ScatterPlotItem(pos=np.array(pos), size=size, brush=brushes, pen=(0,0,0,50), pxMode=False, antialias=True)
g.sliceGroup.addItem(inputMap)
g.inputMap = inputMap
inputMap.setZValue(50)
cell = dh
sl = cell.parent()
day = sl.parent()
name = day.shortName() + "_" + sl.shortName() + "_" + cell.shortName()
rec = db.select('DirTable_Cell', '*', where={'Dir': cd})[0]
name += "\nType: " + str(rec['CellType']) + " Temp: " + str(rec['Temperature']) + " Internal: " + str(rec['Internal']) + " Age:" + str(rec['Age']) + " Raccess: " + str(rec['AccessResistance'])
name += "\nDirect Area: %s>0pA %s>20pA %s>100pA" % (str(rec['DirectAreaGt0']), str(rec['DirectAreaGt20']), str(rec['DirectAreaGt100']))
name += " Direct n spikes: " + str(rec['DirectNSpikes'])
name += "\nSpont Ex Decay: %s Spont In Decay: %s" % (str(rec['SpontExDecay1']), str(rec['SpontInDecay']))
name += "\nExcitatory input" if (rec['EvokedExDecay'] is not None or rec['EvokedExAmp'] is not None) else ""
print rec
#name += '\nDirect Slow Decay: %s %s' % (str(rec['DirectAreaGT0']), str(rec['DirectAreaGT0']))
g.cellName = pg.TextItem(name, color=(0,0,0))
g.cellName.setParentItem(g)
g.cellName.setPos(0, bounds.bottom())
## auto-range the view
#bounds = bounds | g.mapFromItem(mimg2, mimg2.boundingRect()).boundingRect()
#v.setRange(bounds)
def exportAll():
global v
with pg.ProgressDialog("exporting all..", 0, 1000) as dlg:
for day in man.baseDir.ls():
day = man.baseDir[day]
for sl in day.ls():
if 'slice' not in sl:
continue
sl = day[sl]
for cell in sl.ls():
if 'cell' not in cell:
continue
cell = sl[cell]
try:
m = cell['morphology.png']
except:
continue
show(cell)
pg.QtGui.QApplication.processEvents()
pg.QtGui.QApplication.processEvents()
name = day.shortName() + "_" + sl.shortName() + "_" + cell.shortName() + ".svg"
ex = pg.exporters.SVGExporter.SVGExporter(v.scene())
ex.export(name)
print name
if dlg.wasCanceled():
raise Exception("export cancelled")
def exportAllMaps():
global v
db = man.getModule('Data Manager').currentDatabase()
cells = db.select('DirTable_Cell', ['Dir'], where={'MapOK': 1})
cells.sort(key=lambda c: c['Dir'].name())
with pg.ProgressDialog("exporting all..", 0, 1000) as dlg:
for rec in cells:
cell = rec['Dir']
sl = cell.parent()
day = sl.parent()
showMap(cell)
pg.QtGui.QApplication.processEvents()
pg.QtGui.QApplication.processEvents()
name = 'map_' + day.shortName() + "_" + sl.shortName() + "_" + cell.shortName() + ".svg"
ex = pg.exporters.SVGExporter.SVGExporter(v.scene())
ex.export(name)
print name
if dlg.wasCanceled():
raise Exception("export cancelled")
|
{
"content_hash": "60cee48d81e516e740b68ae2f92d1daf",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 204,
"avg_line_length": 38.63636363636363,
"alnum_prop": 0.5790950226244344,
"repo_name": "hiuwo/acq4",
"id": "8c38ed9f60881d1bf6b8c2c56014647389ee1ada",
"size": "11076",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "acq4/analysis/scripts/sliceView.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "18652"
},
{
"name": "C",
"bytes": "1051646"
},
{
"name": "C++",
"bytes": "636100"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "Matlab",
"bytes": "1752"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "4925976"
},
{
"name": "Shell",
"bytes": "64"
}
],
"symlink_target": ""
}
|
import os
import sys
import yaml
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
from trello import TrelloClient, Unauthorized
class TrelloConnector(object):
"""
Retrieves trello credentials if they exist.
TODO: creates trello credentials if they don't exist.
Tests and returns a connection to Trello.
"""
def __init__(self):
self._api_key = None
self._token = None
def _set_creds(self):
config_file = os.path.join(os.path.expanduser('~'), '.tddc_config.yml')
if not os.path.isfile(config_file):
print('Trello configuration file not found at ' + config_file)
self._write_config()
while self._api_key is None or self._token is None:
try:
with open(config_file, 'rt') as ymlfile:
config = yaml.load(ymlfile)
self._extract_config(config)
except:
print('Invalid yaml file.')
self._write_config()
def _extract_config(self, config):
try:
self._api_key = config['trello']['api_key']
self._token = config['trello']['token']
except KeyError:
print('Invalid trello configuration file.')
self._write_config()
@lru_cache()
def get_client(self):
client = None
while client is None:
self._set_creds()
client = TrelloClient(api_key=self._api_key, token=self._token)
try:
client.list_hooks(self._token)
except Unauthorized:
print('Trello client is not authorized.')
client = None
self._write_config()
print('Trello client successfully authorized.')
return client
@staticmethod
def _write_config():
print('TrelloCredentials._write_configuration not yet implemented. For now, you can create a file '
'.tddc_config.yml in the user root directory with the format:')
print(' ')
print('trello:')
print(' api_key: <TRELL_API_KEY>')
print(' token: <TRELLO_TOKEN>')
print(' ')
print('You can get your Trello API key here: https://trello.com/app-key')
print('and your Trello token here: https://trello.com/1/authorize?expiration=1day&'
'scope=read,write,account&response_type=token&name=Server%20Token&key=<TRELLO_API_KEY>')
sys.exit(1)
|
{
"content_hash": "baabc09eb5f8ab307e86e28d47e8a44d",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 107,
"avg_line_length": 34.56164383561644,
"alnum_prop": 0.5830360681728102,
"repo_name": "DataKind-SG/test-driven-data-cleaning",
"id": "3d4127326785a8b2e5b8d4a4f660b396f020bfbe",
"size": "2523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tddc/credentials.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34022"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
}
|
from setuptools import find_packages, setup
setup(name='simplewheel',
version='1.0',
packages=find_packages()
)
|
{
"content_hash": "152051316933b59f9fde21a7cabf0584",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 43,
"avg_line_length": 21.833333333333332,
"alnum_prop": 0.6564885496183206,
"repo_name": "zvezdan/pip",
"id": "f54a4502c5d8dc6bb66046830598bd5f842555b8",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/data/src/simplewheel-1.0/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "3965"
},
{
"name": "HTML",
"bytes": "2342"
},
{
"name": "Python",
"bytes": "1073344"
},
{
"name": "Shell",
"bytes": "382"
}
],
"symlink_target": ""
}
|
class SomeClass(object):
def __init__(self):
pass
def some_method(self):
pass
class NewClass(object):
def new_method(self):
pass
|
{
"content_hash": "e6eb58a851cb8b3aaad68205b4422ea7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 26,
"avg_line_length": 16.7,
"alnum_prop": 0.5688622754491018,
"repo_name": "mkwiatkowski/pythoscope",
"id": "e328c2408d1efb4e8962c764f1ba877b1178b3a5",
"size": "167",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/data/appending_test_cases_module_modified.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1207"
},
{
"name": "Emacs Lisp",
"bytes": "3416"
},
{
"name": "Python",
"bytes": "596544"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponseRedirect, Http404, HttpResponseBadRequest, HttpResponse
from django.shortcuts import get_object_or_404
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from django.core.serializers.json import DjangoJSONEncoder
from django.views.generic import (
FormView, CreateView, UpdateView, DetailView, TemplateView, ListView,
RedirectView
)
from django.db.models import Min, Q, Count, F, Case, When, BooleanField
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import AccessMixin
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
from calendar import month_name
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from itertools import chain
from urllib.parse import unquote_plus, unquote
from braces.views import UserFormKwargsMixin, PermissionRequiredMixin, LoginRequiredMixin, StaffuserRequiredMixin
from cms.constants import RIGHT
from cms.models import Page
import re
import logging
import json
from .models import (
ClassDescription, Event, Series, PublicEvent, EventOccurrence, EventRole, EventRegistration,
StaffMember, Instructor, Invoice, Customer, EventCheckIn
)
from .forms import (
SubstituteReportingForm, StaffMemberBioChangeForm, RefundForm, EmailContactForm,
RepeatEventForm, InvoiceNotificationForm, EventAutocompleteForm
)
from .constants import getConstant, EMAIL_VALIDATION_STR, REFUND_VALIDATION_STR
from .mixins import (
EmailRecipientMixin, StaffMemberObjectMixin, FinancialContextMixin,
AdminSuccessURLMixin, EventOrderMixin, SiteHistoryMixin,
ReferralInfoMixin
)
from .signals import get_customer_data, get_eventregistration_data
from .utils.requests import getIntFromGet
from .utils.timezone import ensure_timezone, ensure_localtime
# Define logger for this file
logger = logging.getLogger(__name__)
class EventRegistrationSelectView(PermissionRequiredMixin, EventOrderMixin, FormView):
'''
This view is used to select an event for viewing registration data in
the EventRegistrationSummaryView
'''
template_name = 'core/events_viewregistration_list.html'
permission_required = 'core.view_registration_summary'
reverse_time_ordering = True
form_class = EventAutocompleteForm
def get_queryset(self):
''' Recent events are listed in link form. '''
return Event.objects.filter(
Q(startTime__gte=timezone.now() - timedelta(days=90)) & (
Q(series__isnull=False) | Q(publicevent__isnull=False)
)
).annotate(count=Count('eventregistration')).annotate(**self.get_annotations()).exclude(
Q(count=0) & Q(status__in=[
Event.RegStatus.hidden, Event.RegStatus.regHidden, Event.RegStatus.disabled
])
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
queryset = self.get_queryset()
context.update({'queryset': queryset, 'object_list': queryset, 'event_list': queryset})
return context
def form_valid(self, form):
return HttpResponseRedirect(reverse(
'viewregistrations',
args=(form.cleaned_data.get('event').id,)
))
class EventRegistrationSummaryView(PermissionRequiredMixin, SiteHistoryMixin, DetailView):
'''
This view is used to access the set of registrations for a given series or event
'''
template_name = 'core/view_eventregistrations.html'
permission_required = 'core.view_registration_summary'
def get_object(self, queryset=None):
return get_object_or_404(
Event.objects.filter(id=self.kwargs.get('event_id')))
def get_context_data(self, **kwargs):
''' Add the list of registrations for the given series '''
# Update the site session data so that registration processes know to send return links to
# the view class registrations page. set_return_page() is in SiteHistoryMixin.
self.set_return_page('viewregistrations', _('View Registrations'), event_id=self.object.id)
registrations = EventRegistration.objects.filter(
event=self.object, cancelled=False,
registration__final=True,
).select_related(
'registration', 'event', 'customer',
'invoiceItem', 'role', 'registration__invoice',
).order_by(
F('customer__last_name').asc(nulls_last=True),
F('customer__first_name').asc(nulls_last=True),
)
extras_dict = {x: [] for x in registrations.values_list('id', flat=True)}
if registrations:
extras = get_eventregistration_data.send(
sender=EventRegistrationSummaryView, eventregistrations=registrations
)
for k, v in chain.from_iterable([x.items() for x in [y[1] for y in extras if y[1]]]):
extras_dict[k].extend(v)
context = {
'event': self.object,
'registrations': registrations,
'extras': extras_dict,
}
context.update(kwargs)
return super().get_context_data(**context)
class EventRegistrationJsonView(PermissionRequiredMixin, ListView):
'''
This view is used to access a list of event registrations for a particular date.
'''
permission_required = 'core.view_registration_summary'
def post(self, request, *args, **kwargs):
''' Parse the date and customer information that is passed. '''
def recurse_listing(listing, obj, extras=None, startTime=None, checkInType='O'):
'''
Recursively go through a list of model attributes, including attributes that
are of linked models.
'''
this_dict = {}
if not isinstance(listing, list):
raise ValueError('Invalid listing for recursion.')
for item in listing:
if isinstance(item, str):
# Handle a couple of special cases
if item == 'checkedIn':
kwargs = {'checkInType': checkInType}
if isinstance(startTime, datetime):
kwargs['date'] = startTime.date()
this_dict[item] = getattr(obj, item, None)(**kwargs)
elif item == 'getNextOccurrenceForDate':
# This view always uses the beginning of the current day
# when searching for the next EventOccurrence, to avoid
# unexpected behavior when using it for at-the-door
# registration.
this_dict[item] = getattr(
getattr(obj, item, None)(startTime),
'id', None
)
else:
this_dict[item] = getattr(obj, item, None)
elif isinstance(item, tuple) and len(item) == 2 and isinstance(item[0], str):
this_item = getattr(obj, item[0], None)
# Added because of issues with polymorphic queries; we need
# to ensure we have the child model.
if item[0] == 'event':
this_item = getattr(
this_item, this_item.polymorphic_ctype.model, None
)
this_dict[item[0]] = recurse_listing(
item[1], this_item, startTime=startTime,
checkInType=checkInType
)
if (
isinstance(obj, EventRegistration) and
extras_dict is not None and
extras_dict.get(obj.id, None)
):
this_dict['extras'] = extras_dict[obj.id]
return this_dict
try:
post_data = json.loads(self.request.body)
except json.decoder.JSONDecodeError:
data = json.dumps(
{'code': 'invalid_json', 'message': _('Invalid JSON.')},
cls=DjangoJSONEncoder
)
return HttpResponse(data, content_type='application/json')
if post_data.get('date', None):
try:
self.startTime = ensure_localtime(datetime.strptime(post_data.get('date', ''), '%Y-%m-%d'))
self.endTime = self.startTime + timedelta(days=1)
except ValueError:
logger.warning('Invalid date passed to EventRegistrationJsonView.')
if post_data.get('id', None):
try:
self.customer = Customer.objects.get(id=post_data.get('id'))
except ObjectDoesNotExist:
logger.warning('Invalid customer passed to EventRegistrationJsonView.')
# Only set the attribute if passed, but the downstream uses of this
# attribute default to occurrence-based check-in unless otherwise
# specified. Ignore invalid choices.
if (
post_data.get('checkInType', None) in
[x[0] for x in EventCheckIn.CHECKIN_TYPE_CHOICES]
):
self.checkInType = post_data.get('checkInType')
queryset = self.get_queryset()
if post_data.get('eventList'):
queryset = queryset.filter(event__id__in=post_data.get('eventList'))
# These are all the various attributes that we want to be populated in the response JSON
attributeList = [
'id', 'dropIn', 'refundFlag', 'warningFlag',
'checkedIn', 'occurrenceId', 'occurrenceStartTime', 'student',
('customer', ['id', 'fullName', 'email', 'numClassSeries']),
('event', ['id', 'name', 'url',]),
('registration', [
'id', 'refundFlag', 'grossTotal', 'total', 'discounted', 'url',
('invoice', [
'id', 'grossTotal', 'total', 'adjustments', 'taxes', 'fees',
'outstandingBalance', 'statusLabel', 'url'
]),
]),
('invoiceItem', [
'id', 'grossTotal', 'total', 'adjustments', 'taxes', 'fees',
'revenueMismatch', 'revenueNotYetReceived', 'revenueReceived',
'revenueReported'
]),
('role', ['id', 'name']),
]
extras_dict = {}
if queryset:
extras = get_eventregistration_data.send(sender=EventRegistrationJsonView, eventregistrations=queryset)
extras_dict = {x: [] for x in queryset.values_list('id', flat=True)}
for k, v in chain.from_iterable([x.items() for x in [y[1] for y in extras if isinstance(y[1], dict)]]):
extras_dict[k].extend(v)
this_listing = [
recurse_listing(
attributeList, q, extras=extras_dict,
startTime=getattr(self, 'startTime', None),
checkInType=getattr(self, 'checkInType', 'O')
)
for q in queryset
]
data = json.dumps(this_listing, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type='application/json')
def get_queryset(self):
filters = {'cancelled': False}
if getattr(self, 'startTime', None):
filters['event__eventoccurrence__endTime__gte'] = self.startTime
if getattr(self, 'endTime', None):
filters['event__eventoccurrence__startTime__lte'] = self.endTime
if getattr(self, 'customer', None):
filters['customer'] = self.customer
dropInFilters = Q(dropIn=False) | (Q(dropIn=True) & Q(occurrences__id=F('occurrenceId')))
registrations = EventRegistration.objects.filter(
**filters
).annotate(
occurrenceId=F('event__eventoccurrence__id'),
occurrenceStartTime=F('event__eventoccurrence__startTime'),
).filter(dropInFilters).select_related(
'registration', 'event', 'customer',
'invoiceItem', 'role', 'registration__invoice',
).order_by('registration__firstName', 'registration__lastName')
return registrations
#################################
# Used for various form submission redirects (called by the AdminSuccessURLMixin)
class SubmissionRedirectView(SiteHistoryMixin, TemplateView):
template_name = 'cms/forms/submission_redirect.html'
def get_context_data(self, **kwargs):
'''
The URL to redirect to can be explicitly specified, or it can come
from the site session history, or it can be the default admin success page
as specified in the site settings.
'''
context = super().get_context_data(**kwargs)
redirect_url = unquote(self.request.GET.get('redirect_url', ''))
if not redirect_url:
redirect_url = self.get_return_page().get('url', '')
if not redirect_url:
try:
redirect_url = Page.objects.get(
pk=getConstant('general__defaultAdminSuccessPage')
).get_absolute_url(settings.LANGUAGE_CODE)
except ObjectDoesNotExist:
redirect_url = '/'
context.update({
'redirect_url': redirect_url,
'seconds': self.request.GET.get('seconds', 5),
})
return context
################################################
# For Viewing Invoices and sending notifications
class ViewInvoiceView(AccessMixin, FinancialContextMixin, SiteHistoryMixin, DetailView):
template_name = 'core/invoice.html'
model = Invoice
def get(self, request, *args, **kwargs):
'''
Invoices can be viewed only if the validation string is provided, unless
the user is logged in and has view_all_invoice permissions
'''
self.object = self.get_object()
user_has_permissions = request.user.has_perm('core.view_all_invoices')
user_has_validation_string = (
request.GET.get('v', None) == self.object.validationString
)
if user_has_validation_string or user_has_permissions:
context = self.get_context_data(
object=self.object,
user_has_permissions=user_has_permissions,
user_has_validation_string=user_has_validation_string
)
return self.render_to_response(context)
return self.handle_no_permission()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'invoice': self.object,
'payments': self.get_payments(),
})
# Update the session data so that subsequent views return to this page.
self.set_return_page('viewInvoice', _('Invoice'), pk=str(self.object.pk))
return context
def get_payments(self):
if not getattr(self, 'payments', None):
self.payments = self.object.get_payments()
return self.payments
class InvoiceNotificationView(FinancialContextMixin, AdminSuccessURLMixin,
PermissionRequiredMixin, StaffuserRequiredMixin, FormView):
success_message = _('Invoice notifications successfully sent.')
template_name = 'core/invoice_notification.html'
permission_required = 'core.send_invoices'
form_class = InvoiceNotificationForm
def form_valid(self, form):
invoice_ids = [
k.replace('invoice_', '') for k, v in form.cleaned_data.items() if 'invoice_' in k and v is True
]
invoices = [x for x in self.toNotify if str(x.id) in invoice_ids]
for invoice in invoices:
if invoice.get_default_recipients():
invoice.sendNotification()
messages.success(self.request, self.success_message)
return HttpResponseRedirect(self.get_success_url())
def dispatch(self, request, *args, **kwargs):
''' Get the set of invoices for which to permit notifications '''
if 'pk' in self.kwargs:
try:
self.invoices = Invoice.objects.filter(pk=self.kwargs.get('pk'))[:]
except ValueError:
raise Http404()
if not self.invoices:
raise Http404()
else:
ids = request.GET.get('invoices', '')
try:
self.invoices = Invoice.objects.filter(id__in=[x for x in ids.split(', ')])[:]
except ValueError:
return HttpResponseBadRequest(_('Invalid invoice identifiers specified.'))
if not self.invoices:
return HttpResponseBadRequest(_('No invoice identifiers specified.'))
toNotify = []
cannotNotify = []
for invoice in self.invoices:
if invoice.get_default_recipients():
toNotify.append(invoice)
else:
cannotNotify.append(invoice)
self.toNotify = toNotify
self.cannotNotify = cannotNotify
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
''' Pass the set of invoices to the form for creation '''
kwargs = super().get_form_kwargs()
kwargs['invoices'] = self.toNotify
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'toNotify': self.toNotify,
'cannotNotify': self.cannotNotify,
})
return context
#################################
# Refund processing and confirmation step views
class RefundConfirmationView(FinancialContextMixin, AdminSuccessURLMixin, PermissionRequiredMixin,
StaffuserRequiredMixin, SuccessMessageMixin, TemplateView):
success_message = _('Refund successfully processed.')
template_name = 'core/refund_confirmation.html'
permission_required = 'core.process_refunds'
def get(self, request, *args, **kwargs):
self.form_data = request.session.get(REFUND_VALIDATION_STR, {}).get('form_data', {})
if not self.form_data:
return HttpResponseRedirect(reverse('refundProcessing', args=(self.form_data.get('id'),)))
try:
self.invoice = Invoice.objects.get(id=self.form_data.get('id'))
except ObjectDoesNotExist:
return HttpResponseRedirect(reverse('refundProcessing', args=(self.form_data.get('id'),)))
self.payments = self.invoice.get_payments()
if request.GET.get('confirmed', '').lower() == 'true' and self.payments:
return self.process_refund()
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
total_refund_amount = self.form_data['total_refund_amount']
initial_refund_amount = self.form_data['initial_refund_amount']
amount_to_refund = max(total_refund_amount - initial_refund_amount, 0)
context.update({
'form_data': self.form_data,
'payments': self.payments,
'total_refund_amount': total_refund_amount,
'initial_refund_amount': initial_refund_amount,
'amount_to_refund': amount_to_refund,
})
return context
def process_refund(self):
refund_data = self.invoice.data.get('refunds', [])
total_refund_amount = self.form_data['total_refund_amount']
initial_refund_amount = self.form_data['initial_refund_amount']
amount_to_refund = max(total_refund_amount - initial_refund_amount, 0)
# Identify the items to which refunds should be allocated and update the adjustments line
# for those items. Fees are also allocated across the items for which the refund was requested.
refund_items = self.form_data.items()
item_refund_data = [(k.split('_')[2], v) for k, v in refund_items if k.startswith('item_refundamount_')]
adjustment_amounts = {x[0]: float(x[1]) for x in item_refund_data}
# Keep track of total refund fees as well as how much reamins to be
# refunded as we iterate through payments to refund them.
remains_to_refund = amount_to_refund
for this_payment in self.payments:
if remains_to_refund <= 0:
break
if not this_payment.refundable:
continue
this_payment_amount = this_payment.netAmountPaid or 0
this_refund_amount = min(this_payment_amount, remains_to_refund)
# This dictionary will be updated and then added to refund_data for
# this invoice whether the refund is successful or not
this_refund_response_data = {
'datetime': str(ensure_localtime(timezone.now())),
'id': this_payment.recordId,
'methodName': this_payment.methodName,
'amount': this_refund_amount,
}
this_refund_response = this_payment.refund(this_refund_amount)
if not this_refund_response:
# If no response is received, then we must stop because we cannot be sure that a refund has
# not already been processed.
this_refund_response_data.update({
'status': 'error',
'errorMessage': str(_('Error: No response from payment processing app. '
'Check payment processor records for refund status.')),
})
elif this_refund_response[0].get('status').lower() == 'success':
# A successful refund returns {'status': 'success'}
amount_refunded = this_refund_response[0].get('refundAmount', 0)
fees = this_refund_response[0].get('fees', 0)
this_refund_response_data.update({
'status': 'success',
'refundAmount': amount_refunded,
'fees': fees,
'response': [dict(this_refund_response[0]), ],
})
remains_to_refund -= amount_refunded
else:
this_refund_response_data.update({
'status': 'error',
'errorMessage': _('An unkown error has occurred. '
'Check payment processor records for refund status.'),
'response': [dict(this_refund_response[0]), ],
'id': this_payment.recordId,
'methodName': this_payment.methodName,
'invoice': self.invoice.id,
'refundAmount': this_refund_amount,
})
refund_data.append(this_refund_response_data)
if this_refund_response_data.get('status') == 'error':
logger.error(this_refund_response_data.get('errorMessage'))
logger.error(this_refund_response_data)
messages.error(self.request, this_refund_response_data.get('errorMessage'))
self.invoice.data['refunds'] = refund_data
total_applied = sum([x.get('refundAmount', 0) for x in refund_data if x.get('status') == 'success'])
total_fees = sum([x.get('fees', 0) for x in refund_data if x.get('status') == 'success'])
# Allocate whatever amount was previously successful across the
# items for which the refund was requested.
self.invoice.amountPaid -= total_applied
self.invoice.updateTotals(
save=True,
allocateAmounts={
'adjustments': -1*total_applied,
'fees': total_fees,
},
allocateWeights=adjustment_amounts
)
self.request.session.pop(REFUND_VALIDATION_STR, None)
return HttpResponseRedirect(self.get_success_url())
# If there were no errors, then check to ensure that the entire request refund was refunded.
# If so, then return success, otherwise return indicating that the refund was not completely
# applied.
if abs(remains_to_refund) <= 0.01:
messages.success(self.request, self.success_message)
else:
messages.error(
self.request,
_('Error, not all of the requested refund was applied. '
'Check invoice and payment processor records for details.')
)
self.invoice.data['refunds'] = refund_data
total_applied = sum([x.get('refundAmount', 0) for x in refund_data if x.get('status') == 'success'])
total_fees = sum([x.get('fees', 0) for x in refund_data if x.get('status') == 'success'])
self.invoice.amountPaid -= total_applied
if abs(
self.invoice.total + (self.invoice.taxes * self.invoice.buyerPaysSalesTax) +
self.invoice.adjustments - total_applied
) < 0.01:
self.invoice.status = Invoice.PaymentStatus.fullRefund
# Allocate whatever amount was previously successful across the
# items for which the refund was requested.
items = self.invoice.updateTotals(
save=True,
allocateAmounts={
'adjustments': -1*total_applied,
'fees': total_fees,
},
allocateWeights=adjustment_amounts
)
# If the refund is a complete refund and is associated with a registration,
# then cancel the EventRegistration entirely.
eventregs = EventRegistration.objects.filter(invoiceItem__in=items)
for this_item in items:
this_eventreg = eventregs.filter(invoiceItem=this_item).first()
if (
abs(
this_item.total + this_item.adjustments +
(this_item.taxes * self.invoice.buyerPaysSalesTax)
) < 0.01 and this_eventreg
):
this_eventreg.cancelled = True
this_eventreg.save()
self.request.session.pop(REFUND_VALIDATION_STR, None)
return HttpResponseRedirect(self.get_success_url())
class RefundProcessingView(FinancialContextMixin, PermissionRequiredMixin, StaffuserRequiredMixin, UpdateView):
template_name = 'core/process_refund.html'
form_class = RefundForm
permission_required = 'core.process_refunds'
model = Invoice
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'invoice': self.object,
'payments': self.get_payments(),
})
if getattr(self.object, 'registration', None):
context['registration'] = self.object.registration
return context
def form_valid(self, form):
# Avoid JSON serialization issues by passing the Invoice ID, not the object itself
clean_data = form.cleaned_data
clean_data['id'] = str(clean_data.get('id').id)
self.request.session[REFUND_VALIDATION_STR] = {
'form_data': clean_data,
'invoice': str(self.object.id),
}
return HttpResponseRedirect(reverse('refundConfirmation'))
def get_payments(self):
if not getattr(self, 'payments', None):
self.payments = self.object.get_payments()
return self.payments
#################################
# Email view function and form
class EmailConfirmationView(AdminSuccessURLMixin, PermissionRequiredMixin, TemplateView):
permission_required = 'core.send_email'
template_name = 'core/email_confirmation_page.html'
success_message = _('Email sent successfully.')
def get(self, request, *args, **kwargs):
self.form_data = request.session.get(EMAIL_VALIDATION_STR, {}).get('form_data', {})
if not self.form_data:
return HttpResponseRedirect(reverse('emailStudents'))
if request.GET.get('confirmed', '').lower() == 'true':
return self.send_email()
return super().get(request, *args, **kwargs)
def send_email(self):
subject = self.form_data.pop('subject')
message = self.form_data.pop('message')
html_message = self.form_data.pop('html_message', None)
richTextChoice = self.form_data.pop('richTextChoice')
cc_myself = self.form_data.pop('cc_myself')
testemail = self.form_data.pop('testemail')
month = self.form_data.pop('month')
series = self.form_data.pop('series')
customers = self.form_data.pop('customers', [])
email_kwargs = {
'from_name': self.form_data['from_name'],
'from_address': self.form_data['from_address'],
}
if richTextChoice == 'HTML':
email_kwargs.update({
'send_html': True,
'html_message': html_message,
})
items_to_send = []
if month is not None and month != '':
get_month = datetime.strptime(month, '%m-%Y').month
get_year = datetime.strptime(month, '%m-%Y').year
items_to_send += list(Series.objects.filter(month=get_month, year=get_year))
if series not in [None, '', [], ['']]:
items_to_send += list(Event.objects.filter(id__in=series))
if customers:
items_to_send.append(list(Customer.objects.filter(id__in=customers)))
# We always call one email per series so that the series-level tags
# can be passed. The entire list of customers is also a single item
# in the items_to_send list, because they can be processed all at once.
for s in items_to_send:
if isinstance(s, Event):
regs = EventRegistration.objects.filter(event=s, cancelled=False)
emails = []
for x in regs:
emails += x.get_default_recipients() or []
else:
# Customers are themselves the list.
regs = s
emails = [x.email for x in s]
email_kwargs['cc'] = []
if cc_myself:
email_kwargs['cc'].append(email_kwargs['from_address'])
email_kwargs['bcc'] = [email_kwargs['from_address'] or getConstant('email__defaultEmailFrom'), ]
if testemail:
message = str(_('Test email from %s to be sent to: ' % email_kwargs['from_address'])) + '\n\n'
message += ', '.join(email_kwargs['bcc']) + ', '.join(emails) + '\n\n'
message += str(_('Email body:')) + '\n\n' + message
email_kwargs['bcc'] = []
# If there are no context tags, then this can be sent as a single bulk email.
# Otherwise, send a separate email for each event registration
has_tags = re.search(r'\{\{.+\}\}', message)
if not has_tags:
email_kwargs['bcc'] += emails
# Avoid duplicate emails
email_kwargs['bcc'] = list(set(email_kwargs['bcc']))
# instantiate the recipient mixin directly
email_class = EmailRecipientMixin()
email_class.email_recipient(subject, message, **email_kwargs)
else:
for r in regs:
r.email_recipient(subject, message, **email_kwargs)
self.request.session.pop(EMAIL_VALIDATION_STR, None)
messages.success(self.request, self.success_message)
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(self.form_data)
month = self.form_data['month']
series = self.form_data['series']
customers = self.form_data.get('customers')
from_address = self.form_data['from_address']
cc_myself = self.form_data['cc_myself']
events_to_send = []
if month is not None and month != '':
get_month = datetime.strptime(month, '%m-%Y').month
get_year = datetime.strptime(month, '%m-%Y').year
events_to_send += Series.objects.filter(month=get_month, year=get_year)
if series not in [None, '', [], ['']]:
events_to_send += [Event.objects.get(id=x) for x in series]
# We always call one email per series so that the series-level tags
# can be passed.
regs = EventRegistration.objects.filter(event__in=events_to_send)
customerSet = Customer.objects.filter(id__in=customers) if customers else []
emails = [r.customer.email for r in regs] + [r.email for r in customerSet]
cc = []
if cc_myself:
cc.append(from_address)
bcc = [getConstant('email__defaultEmailFrom')]
context.update({
'events_to_send': events_to_send,
'customers_to_send': customerSet,
'emails': emails,
'cc': cc,
'bcc': bcc,
})
return context
class SendEmailView(PermissionRequiredMixin, UserFormKwargsMixin, FormView):
form_class = EmailContactForm
permission_required = 'core.send_email'
template_name = 'cms/forms/display_form_classbased_admin.html'
def dispatch(self, request, *args, **kwargs):
''' If a list of customers or groups was passed, then parse it '''
ids = request.GET.get('customers')
groups = request.GET.get('customergroup')
self.customers = None
if ids or groups:
# Initial filter applies to no one but allows appending by logical or
filters = Q(id__isnull=True)
if ids:
filters = filters | Q(id__in=[int(x) for x in ids.split(', ')])
if groups:
filters = filters | Q(groups__id__in=[int(x) for x in groups.split(', ')])
try:
self.customers = Customer.objects.filter(filters)
except ValueError:
return HttpResponseBadRequest(_('Invalid customer ids passed'))
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self, **kwargs):
'''
Get the list of recent months and recent series to pass to the form
'''
numMonths = 12
lastStart = (
Event.objects.annotate(Min('eventoccurrence__startTime'))
.order_by('-eventoccurrence__startTime__min')
.values_list('eventoccurrence__startTime__min', flat=True)
.first()
)
if lastStart:
month = lastStart.month
year = lastStart.year
else:
month = timezone.now().month
year = timezone.now().year
months = [('', _('None'))]
for i in range(0, numMonths):
newmonth = (month - i - 1) % 12 + 1
newyear = year
if month - i - 1 < 0:
newyear = year - 1
newdate = datetime(year=newyear, month=newmonth, day=1)
newdateStr = newdate.strftime("%m-%Y")
monthStr = newdate.strftime("%B, %Y")
months.append((newdateStr, monthStr))
cutoff = timezone.now() - timedelta(days=120)
allEvents = Event.objects.filter(startTime__gte=cutoff).order_by('-startTime')
recentSeries = [('', 'None')] + [(x.id, '%s %s: %s' % (month_name[x.month], x.year, x.name)) for x in allEvents]
kwargs = super().get_form_kwargs(**kwargs)
kwargs.update({
"months": months,
"recentseries": recentSeries,
"customers": self.customers,
})
return kwargs
def get_initial(self):
'''
If the user already submitted the form and decided to return from the
confirmation page, then re-populate the form
'''
initial = super().get_initial()
form_data = self.request.session.get(EMAIL_VALIDATION_STR, {}).get('form_data', {})
if form_data:
initial.update(form_data)
return initial
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_title': _('Email Students'),
'form_description': _('Use this form to contact current or recent students.'),
})
return context
def form_valid(self, form):
''' Pass form data to the confirmation view '''
form.cleaned_data.pop('template', None)
self.request.session[EMAIL_VALIDATION_STR] = {'form_data': form.cleaned_data}
return HttpResponseRedirect(reverse('emailConfirmation'))
############################################
# Customer and Instructor Stats Views
class AccountProfileView(LoginRequiredMixin, DetailView):
model = User
template_name = 'core/account_profile.html'
def get_object(self, queryset=None):
return self.request.user
def get_context_data(self, **kwargs):
context = {}
user = self.get_object()
context.update({
'primary_email': user.emailaddress_set.filter(primary=True).first(),
'verified_emails': user.emailaddress_set.filter(verified=True),
'unverified_emails': user.emailaddress_set.filter(verified=False),
})
if hasattr(user, 'customer'):
context.update({
'customer': user.customer,
'customer_verified': user.emailaddress_set.filter(email=user.customer.email, verified=True).exists(),
})
context['customer_eventregs'] = EventRegistration.objects.filter(customer=user.customer)
context['verified_eventregs'] = EventRegistration.objects.filter(
customer__email__in=[x.email for x in context['verified_emails']]
).exclude(
id__in=[x.id for x in context.get('customer_eventregs', [])]
)
context['submitted_eventregs'] = EventRegistration.objects.filter(
registration__invoice__submissionUser=self.request.user, registration__payAtDoor=False
).exclude(
id__in=[x.id for x in context.get('customer_eventregs', [])]
).exclude(
id__in=[x.id for x in context.get('verified_eventregs', [])]
)
if hasattr(user, 'staffmember'):
upcoming_events = Event.objects.filter(
endTime__gt=timezone.now(),
eventstaffmember__staffMember=user.staffmember).distinct().order_by('-startTime')
context.update({
'staffmember': user.staffmember,
'upcoming_events': upcoming_events,
})
# Get any extra context data passed by other apps. These data require unique keys, so when writing
# a handler for this signal, be sure to provide unique context keys.
if hasattr(user, 'customer'):
extra_customer_data = get_customer_data.send(
sender=AccountProfileView,
customer=user.customer,
)
for item in extra_customer_data:
if len(item) > 1 and isinstance(item[1], dict):
# Ensure that 'customer' is not overwritten and add everything else
item[1].pop('customer', None)
context.update(item[1])
return super().get_context_data(**context)
class OtherAccountProfileView(PermissionRequiredMixin, AccountProfileView):
permission_required = 'core.view_other_user_profiles'
def get_object(self, queryset=None):
if 'user_id' in self.kwargs:
return get_object_or_404(User.objects.filter(id=self.kwargs.get('user_id')))
else:
return self.request.user
class InstructorStatsView(StaffMemberObjectMixin, PermissionRequiredMixin, DetailView):
model = StaffMember
template_name = 'core/instructor_stats.html'
permission_required = 'core.view_own_instructor_stats'
def get_context_data(self, **kwargs):
instructor = self.object
context = {}
context.update({
'instructor': instructor,
'prior_series': Event.objects.filter(
startTime__lte=timezone.now(),
eventstaffmember__staffMember=instructor).order_by('-startTime'),
'upcoming_series': Event.objects.filter(
startTime__gt=timezone.now(),
eventstaffmember__staffMember=instructor).order_by('-startTime'),
})
if context['prior_series']:
context.update({'first_series': context['prior_series'].last(), })
context.update({
'teaching_since': month_name[context['first_series'].month] + ' ' + str(context['first_series'].year),
'student_count': sum([x.numRegistered for x in context['prior_series']]),
})
context.update({'series_count': len(context['prior_series']) + len(context['upcoming_series'])})
# Note: This get the detailview's context, not all the mixins. Supering itself led to an infinite loop.
return super(DetailView, self).get_context_data(**context)
class OtherInstructorStatsView(InstructorStatsView):
permission_required = 'core.view_other_instructor_stats'
def get_object(self, queryset=None):
if 'first_name' in self.kwargs and 'last_name' in self.kwargs:
first_name = re.sub('^_$','', self.kwargs['first_name'])
last_name = re.sub('^_$','', self.kwargs['last_name'])
return get_object_or_404(
StaffMember.objects.filter(
**{'firstName': unquote_plus(first_name).replace('_', ' '),
'lastName': unquote_plus(last_name).replace('_', ' ')})
)
else:
return None
#####################################
# Individual Class Series/Event Views
class IndividualClassReferralView(ReferralInfoMixin, RedirectView):
def get_redirect_url(self, *args, **kwargs):
if (
self.kwargs.get('session_slug', None) and
self.kwargs.get('year', None) and
self.kwargs.get('month', None)
):
return reverse('classViewSessionMonth', kwargs=kwargs)
elif (
self.kwargs.get('session_slug', None)
):
return reverse('classViewSession', kwargs=kwargs)
else:
return reverse('classView', kwargs=kwargs)
class IndividualPublicEventReferralView(ReferralInfoMixin, RedirectView):
def get_redirect_url(self, *args, **kwargs):
if (
self.kwargs.get('session_slug', None) and
self.kwargs.get('year', None) and
self.kwargs.get('month', None)
):
return reverse('eventViewSessionMonth', kwargs=kwargs)
elif (
self.kwargs.get('session_slug', None)
):
return reverse('eventViewSession', kwargs=kwargs)
else:
return reverse('eventView', kwargs=kwargs)
class IndividualEventView(ReferralInfoMixin, FinancialContextMixin, TemplateView):
model_class = Event
template_name = 'core/event_pages/individual_event.html'
def dispatch(self, request, *args, **kwargs):
# These are passed via the URL
year = self.kwargs.get('year')
month = self.kwargs.get('month')
session_slug = self.kwargs.get('session_slug')
slug = self.kwargs.get('slug', '')
if month:
try:
month_number = list(month_name).index(month or 0)
except ValueError:
raise Http404(_('Invalid month.'))
model_class = getattr(self, 'model_class', Event)
filters = ~Q(status=Event.RegStatus.hidden) \
& ~Q(status=Event.RegStatus.linkOnly)
if model_class == Series:
filters = filters & Q(classDescription__slug=slug)
elif model_class == PublicEvent:
filters = filters & Q(slug=slug)
if year and month:
filters = filters & Q(year=year or None) & Q(month=month_number or None)
if session_slug:
filters = filters & Q(session__slug=session_slug)
passedCase = Q(endTime__lt=timezone.now())
if getConstant('registration__displayLimitDays') or 0 > 0:
passedCase = passedCase | Q(
startTime__gte=timezone.now() + timedelta(
days=getConstant('registration__displayLimitDays')
)
)
self.event_set = model_class.objects.filter(
filters
).annotate(
registrationPassed=Case(
When(passedCase, then=True), default=False,
output_field=BooleanField()
)
)
if not self.event_set:
raise Http404(_('No events found.'))
return super().dispatch(request, *args, **kwargs)
def get_template_names(self):
templates = [x.template for x in self.event_set if getattr(x, 'template', None)]
if templates:
return [templates[0],]
else:
return super().get_template_names()
def get(self, request, *args, **kwargs):
# This will pass through to the context data by default
kwargs.update({'event_set': self.event_set})
model_lower = getattr(self, 'model_class', Event).__name__.lower()
app_name = getattr(self, 'app_name', 'core')
# For each Event in the set, add a button to the toolbar to edit the Event details
if (
hasattr(request, 'user') and
request.user.has_perm('%s.change_%s' % (app_name, model_lower))
):
for this_event in self.event_set:
this_title = _('Edit Event Details')
if len(self.event_set) > 1:
this_title += ' (#%s)' % this_event.id
change_link = reverse(
'admin:%s_%s_change' % (app_name, model_lower),
args=([this_event.id, ])
)
request.toolbar.add_button(this_title, change_link, side=RIGHT)
return super().get(request, *args, **kwargs)
class IndividualClassView(IndividualEventView):
model_class = Series
template_name = 'core/event_pages/individual_class.html'
class IndividualPublicEventView(IndividualEventView):
model_class = PublicEvent
template_name = 'core/event_pages/individual_event.html'
def get(self, request, *args, **kwargs):
# If an alternative link is given by one or more of these events, then redirect to that.
overrideLinks = [x.link for x in self.event_set if getattr(x, 'link', None)]
if overrideLinks:
return HttpResponseRedirect(overrideLinks[0])
return super().get(request, *args, **kwargs)
#####################################
# View for Repeating Events from admin
class RepeatEventsView(SuccessMessageMixin, AdminSuccessURLMixin, PermissionRequiredMixin, FormView):
'''
This view is for an admin action to repeat events.
'''
template_name = 'core/repeat_events.html'
form_class = RepeatEventForm
permission_required = 'core.add_event'
success_message = _('Repeated events created successfully.')
def dispatch(self, request, *args, **kwargs):
ids = request.GET.get('ids')
ct = getIntFromGet(request, 'ct')
try:
contentType = ContentType.objects.get(id=ct)
self.objectClass = contentType.model_class()
except (ValueError, ObjectDoesNotExist):
return HttpResponseBadRequest(_('Invalid content type passed.'))
# This view only deals with subclasses of Events (Public Events, Series, etc.)
if not isinstance(self.objectClass(), Event):
return HttpResponseBadRequest(_('Invalid content type passed.'))
try:
self.queryset = self.objectClass.objects.filter(id__in=[int(x) for x in ids.split(', ')])
except ValueError:
return HttpResponseBadRequest(_('Invalid ids passed'))
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'events': self.queryset,
})
return context
def form_valid(self, form):
''' For each object in the queryset, create the duplicated objects '''
startDate = form.cleaned_data.get('startDate')
repeatEvery = form.cleaned_data.get('repeatEvery')
periodicity = form.cleaned_data.get('periodicity')
quantity = form.cleaned_data.get('quantity')
endDate = form.cleaned_data.get('endDate')
# Create a list of start dates, based on the passed values of repeatEvery,
# periodicity, quantity and endDate. This list will be iterated through to
# create the new instances for each event.
if periodicity == 'D':
delta = {'days': repeatEvery}
elif periodicity == 'W':
delta = {'weeks': repeatEvery}
elif periodicity == 'M':
delta = {'months': repeatEvery}
repeat_list = []
this_date = startDate
if quantity:
for k in range(0, quantity):
repeat_list.append(this_date)
this_date = this_date + relativedelta(**delta)
elif endDate:
while (this_date <= endDate):
repeat_list.append(this_date)
this_date = this_date + relativedelta(**delta)
# Now, loop through the events in the queryset to create duplicates of them
for event in self.queryset:
# For each new occurrence, we determine the new startime by the distance from
# midnight of the first occurrence date, where the first occurrence date is
# replaced by the date given in repeat list
old_min_time = event.localStartTime.replace(hour=0, minute=0, second=0, microsecond=0)
old_occurrence_data = [
(x.startTime - old_min_time, x.endTime - old_min_time, x.cancelled)
for x in event.eventoccurrence_set.all()
]
old_role_data = [(x.role, x.capacity) for x in event.eventrole_set.all()]
for instance_date in repeat_list:
# Ensure that time zones are treated properly
combined_datetime = datetime.combine(instance_date, datetime.min.time())
new_datetime = ensure_timezone(combined_datetime, old_min_time.tzinfo)
# Removing the pk and ID allow new instances of the event to
# be created upon saving with automatically generated ids.
event.id = None
event.pk = None
event.save()
# Create new occurrences
for occurrence in old_occurrence_data:
EventOccurrence.objects.create(
event=event,
startTime=new_datetime + occurrence[0],
endTime=new_datetime + occurrence[1],
cancelled=occurrence[2],
)
# Create new event-specific role data
for role in old_role_data:
EventRole.objects.create(
event=event,
role=role[0],
capacity=role[1],
)
# Need to save twice to ensure that startTime etc. get
# updated properly.
event.save()
return super().form_valid(form)
############################################################
# View for instructors to report that they substitute taught
#
class SubstituteReportingView(AdminSuccessURLMixin, PermissionRequiredMixin, UserFormKwargsMixin,
SuccessMessageMixin, CreateView):
'''
This view is used to report substitute teaching.
'''
template_name = 'cms/forms/display_form_classbased_admin.html'
form_class = SubstituteReportingForm
permission_required = 'core.report_substitute_teaching'
success_message = _('Substitute teaching reported successfully.')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_title': _('Report Substitute Teaching'),
'form_description': _('Use this form to report substitute teaching.'),
})
return context
############################################################
# View for instructors to change their bio information
#
class StaffMemberBioChangeView(AdminSuccessURLMixin, StaffMemberObjectMixin, PermissionRequiredMixin,
SuccessMessageMixin, UpdateView):
'''
This view now permits changing the instructor's bio information.
'''
model = StaffMember
template_name = 'cms/forms/display_form_classbased_admin.html'
form_class = StaffMemberBioChangeForm
permission_required = 'core.update_instructor_bio'
success_message = _('Staff member information updated successfully.')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_title': _('Update Contact Information'),
'form_description': _('Use this form to update your contact information.'),
})
return context
############################################################
# View for Instructor/Staff directory
#
class StaffDirectoryView(PermissionRequiredMixin, ListView):
'''
This view shows a directory of instructors/staff
'''
template_name = 'core/staff_directory.html'
permission_required = 'core.view_staff_directory'
queryset = StaffMember.objects.exclude(instructor__status__in=[
Instructor.InstructorStatus.retired,
Instructor.InstructorStatus.retiredGuest,
Instructor.InstructorStatus.hidden,
])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
staff = context.get('staffmember_list', StaffMember.objects.none())
context.update({
'active_instructors_list': staff.filter(instructor__status='R'),
'assistant_instructors_list': staff.filter(instructor__status__in=['T', 'A']),
'guest_instructors_list': staff.filter(instructor__status='G'),
'other_staff_list': staff.filter(instructor__isnull=True),
})
return context
|
{
"content_hash": "6bddd906e11bb369a630e1fbf065f8fc",
"timestamp": "",
"source": "github",
"line_count": 1380,
"max_line_length": 120,
"avg_line_length": 39.82028985507247,
"alnum_prop": 0.5896600669675353,
"repo_name": "django-danceschool/django-danceschool",
"id": "7f14f2644d33185a673f38ebd9d6466f4d281368",
"size": "54952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "danceschool/core/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55309"
},
{
"name": "HTML",
"bytes": "334988"
},
{
"name": "JavaScript",
"bytes": "2008559"
},
{
"name": "Less",
"bytes": "21246"
},
{
"name": "Python",
"bytes": "1856445"
},
{
"name": "SCSS",
"bytes": "9564"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import post
admin.site.register(post)
|
{
"content_hash": "3a2620d005966c829bc2ad43d66132b1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 32,
"avg_line_length": 17.2,
"alnum_prop": 0.8023255813953488,
"repo_name": "Udayraj123/dashboard_IITG",
"id": "254909a9fd474e98688c31136dd8c0ea86be0e25",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Binder/discussions/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "356"
},
{
"name": "CSS",
"bytes": "2484053"
},
{
"name": "HTML",
"bytes": "5650437"
},
{
"name": "JavaScript",
"bytes": "8566070"
},
{
"name": "PHP",
"bytes": "535968"
},
{
"name": "Python",
"bytes": "205490"
}
],
"symlink_target": ""
}
|
"""
Contains the logic behind creating and configuring new plots
from a set of user-supplied arguments.
"""
# Standard library imports
import re
# Major library imports
from numpy import all, array, arange, asarray, reshape, shape, transpose
# Chaco imports
from chaco.api import (create_line_plot, create_scatter_plot,
ArrayDataSource, ImageData)
from chaco.tools.api import HighlightTool
# Local relative imports
from chaco_shell_error import ChacoShellError
# Normally I don't define an __all__, but this lets us distinguish
# the top level plot-producing functions from the various helper
# functions.
__all__ = ["do_plot", "do_imshow", "do_pcolor", "do_contour", "do_plotv",
"SizeMismatch", ]
#-----------------------------------------------------------------------------
# Exceptions
#-----------------------------------------------------------------------------
class SizeMismatch(ChacoShellError):
pass
#-----------------------------------------------------------------------------
# Utility functions
#-----------------------------------------------------------------------------
def is1D (a):
s = shape(a)
return ((len(s) == 1) or (s[0] == 1) or (s[1] == 1))
def is2D (a):
return (len(shape(a)) == 2)
def row ( a ):
return reshape( asarray( a ), [1,-1] )
def col ( a ):
return reshape( asarray( a ), [-1,1] )
#-----------------------------------------------------------------------------
# Plot commands for chaco-style plotv()
#-----------------------------------------------------------------------------
def do_plotv(session, *args, **kw):
""" Creates a list of plots from the data in ``*args`` and options in
``**kw``, according to the docstring on commands.plot().
"""
sort = kw.get("sort", "none")
sources_list = make_data_sources(session, sort, *args)
plot_type = kw.get("type", "line")
if plot_type == "scatter":
plots = [create_scatter_plot(sources) for sources in sources_list]
elif plot_type == "line":
plots = [create_line_plot(sources) for sources in sources_list]
else:
raise ChacoShellError, "Unknown plot type '%s'." % plot_type
for plot in plots:
plot.orientation = kw.get("orientation", "h")
return plots
def make_data_sources(session, index_sort="none", *args):
""" Given a list of arguments, returns a list of (index, value) datasources
to create plots from.
"""
# Make sure everything is a numpy array
data = []
for arg in args:
if isinstance(arg, list) or isinstance(arg, tuple):
data.append(array(arg))
else:
data.append(arg)
if len(data) == 0:
raise ChacoShellError, "Insufficient data for plot."
# 1D array(s)
if len(data[0].shape) == 1:
if len(data) == 1:
# Only a single array was provided
index_ds = ArrayDataSource(arange(len(data[0])), sort_order="ascending")
value_ds = ArrayDataSource(data[0], sort_order="none")
return [(index_ds, value_ds)]
else:
# multiple arrays were provided
index_ds = ArrayDataSource(data[0], sort_order=index_sort)
return [(index_ds, ArrayDataSource(v, sort_order="none")) for v in data[1:]]
# 2D arrays
elif len(data[0].shape) == 2:
sources = []
# Loop over all the 2D arrays
for ary in data:
if ary.shape[0] > ary.shape[1]:
index_ary = ary[:, 0]
value_arrays = ary[:, 1:]
else:
index_ary = ary[0]
value_arrays = transpose(ary[1:])
index_ds = ArrayDataSource(index_ary, sort_order=index_sort)
sources.extend([(index_ds, ArrayDataSource(v, sort_order="none")) for v in value_arrays])
return sources
# Not a two-dimensional array, error.
else:
raise ChacoShellError, "Unable to create plot data sources from array of" \
"shape " + str(data[1].shape) + "."
#-----------------------------------------------------------------------------
# Plot commands for matlab-compatible plot() function
#-----------------------------------------------------------------------------
# Regular expressions for parsing the format string
color_re = re.compile('[ymcrgbwk]')
color_trans = {
'y': 'yellow',
'm': 'magenta',
'c': 'cyan',
'r': 'red',
'g': 'green',
'b': 'blue',
'w': 'white',
'k': 'black'
}
# This one isn't quite right:
marker_re = re.compile('[ox+s^v]|(?:[^-])[.]')
marker_trans = {
'.': 'dot',
'o': 'circle',
'x': 'cross',
'+': 'plus',
's': 'square',
'^': 'triangle',
'v': 'down triangle'
}
line_re = re.compile('--|-\.|[-:]')
line_trans = {
'-': 'solid',
':': 'dot',
'-.': 'dot dash',
'--': 'dash'
}
def _process_format(format):
"""
Converts a format string into a (color, line, marker, marker_color) tuple.
"""
if format == '':
return ('black', 'solid', None, None)
color, line, marker, marker_color = 'black', None, None, None
m = color_re.findall(format)
if len(m) > 0:
color = marker_color = color_trans[m[0]]
if len(m) > 1:
marker_color = color_trans[m[1]]
m = marker_re.findall(format)
# The -1 takes care of 'r.', etc:
if len(m) > 0:
marker = marker_trans[m[0][-1]]
m = line_re.findall(format)
if len(m):
line = line_trans[m[0]]
return (color, line, marker, marker_color)
def _process_group(group, plot_data=None):
""" Returns a (x_1D, y_1D, format_str) tuple from an input tuple
of 1 to 3 elements: (x,y,format_str).
A PlotData object can be optionally provided to disambiguate the cases
when exactly two strings are passed in. The two strings could be the
names of the x and y datasources, or they could be the name of the y
datasource and a format string. By checking the second string against
the plot_data's list of datasources, the method can determine what it is meant
to be.
"""
# Interpret and split the 'group' tuple into x, y, and plotinfo
plotinfo = ''
if len(group) == 1:
y = group[0]
y_data = plot_data.get_data(y)
x = plot_data.set_data("", arange(len(y_data)), generate_name=True)
elif len(group) == 2:
# There are two possibilities here; a single y was provided along
# with a format string, or an x and y were provided. If PlotData
# was provided, use that to disambiguate; otherwise, assume that the
# second string is a format string.
if isinstance(group[1], basestring):
if plot_data and group[1] in plot_data.list_data():
x = group[0]
y = group[1]
else:
plotinfo = group[1]
y = group[0]
y_data = plot_data.get_data(y)
x = plot_data.set_data("", arange(len(y_data)), generate_name=True)
else:
x, y = group
elif len(group) == 3:
x, y, plotinfo = group
else:
raise ChacoShellError("Found too many elements in group while" \
" constructing plot.")
return x, y, plotinfo
def _check_sort_order(data):
diffs = data[1:] - data[:-1]
if all(diffs >= 0):
return "ascending"
elif all(diffs <= 0):
return "descending"
else:
return "none"
def do_plot(plotdata, active_plot, *data_and_formats, **kwtraits):
""" Takes a list of data (arrays or names) and format string arguments
and creates new plots on the active_plot. Returns a list of plot names
on the active plot.
"""
# The list of data and formats is broken up by format strings,
# so we break it up by arguments that are strings.
cur_group = []
groups = []
valid_names = plotdata.list_data()
for arg in data_and_formats:
if not isinstance(arg, basestring):
# an array was passed in
cur_group.append(plotdata.set_data("", arg, generate_name=True))
elif arg in valid_names:
# the name of an existing plotdata item was passed in
cur_group.append(arg)
else:
# String that is not in plotdata is interpreted as a format
# string, thereby terminating this group
cur_group.append(arg)
groups.append(cur_group)
cur_group = []
if len(cur_group) > 0:
groups.append(cur_group)
# Process the list of groups and create a list of plots;
# broadcast the keyword traits to all of them.
plots = []
for group in groups:
x, y, format_str = _process_group(group, plot_data = plotdata)
linecolor, line, marker, markercolor = _process_format(format_str)
plot_type = []
format = kwtraits.copy()
if line is not None:
plot_type.append("line")
format["line_style"] = line
format["color"] = linecolor
if marker is not None:
plot_type.append("scatter")
format["marker"] = marker
format["color"] = markercolor
# Check the data sort order, but only if it will create a new datasource
if x not in active_plot.datasources:
x_sort_order = _check_sort_order(plotdata.get_data(x))
plots.extend(active_plot.plot((x,y), type=",".join(plot_type), **format))
# Set the sort order
x_ds = active_plot.datasources[x]
if isinstance(x_ds, ArrayDataSource):
x_ds.sort_order = x_sort_order
# Check to see if the active_plot has a highlighter tool already; if not,
# then add it.
for tool in active_plot.tools:
if isinstance(tool, HighlightTool):
break
else:
active_plot.tools.append(HighlightTool(active_plot))
return plots
def do_imread(*data, **kwargs):
""" Returns image file as array. """
# Check to see if the data given is either a file path or a file object
if isinstance(data[0], basestring) or isinstance(data[0], file):
return ImageData.fromfile(data[0])
else:
raise ValueError("do_imread takes a string filename")
def do_imshow(plotdata, active_plot, *data, **kwargs):
""" Creates an image plot on the active plot, given either
a filename or data.
"""
if len(data) != 1:
raise ValueError("do_imshow takes one data source")
x = None
y = None
try:
z = _get_or_create_plot_data(data[0], plotdata)
except ValueError:
# z is the name of the file
# create plot data
image = do_imread(data[0], *data, **kwargs)
z = plotdata.set_data("", image, generate_name=True)
plot_list = [active_plot.img_plot(z, xbounds=x, ybounds=y, **kwargs)]
return plot_list
def do_pcolor(plotdata, colormap, active_plot, *data, **kwargs ):
""" Creates a pseudocolor image plot on the active plot, given a 2-D
scalar data and a colormap.
"""
# if we get just one data source, it is assumed to be the scalar field
if len(data) == 1:
x = None
y = None
z = _get_or_create_plot_data(data[0], plotdata)
# three data sources means we got x-y grid data of some sort, too
elif len(data) == 3:
x = _get_or_create_plot_data(data[0], plotdata)
y = _get_or_create_plot_data(data[1], plotdata)
z = _get_or_create_plot_data(data[2], plotdata)
else:
raise ValueError("do_pcolor takes one or three data sources")
plot_list = [active_plot.img_plot(z, xbounds=x, ybounds=y,
colormap=colormap, **kwargs)]
return plot_list
def do_contour(plotdata, colormap, active_plot, type, *data, **kwargs ):
""" Creates a contour plot on the active plot, given a 2-D
scalar data and a colormap.
"""
# if we get just one data source, it is assumed to be the scalar field
if len(data) == 1:
x = None
y = None
z = _get_or_create_plot_data(data[0], plotdata)
# three data sources means we got x-y grid data of some sort, too
elif len(data) == 3:
x = _get_or_create_plot_data(data[0], plotdata)
y = _get_or_create_plot_data(data[1], plotdata)
z = _get_or_create_plot_data(data[2], plotdata)
else:
raise ValueError("do_contour takes one or three data sources")
# we have to do slightly different calls here because of the different
# handling of colormaps
if type is 'poly':
plot_list = [active_plot.contour_plot(z, type, xbounds=x, ybounds=y,
poly_cmap=colormap,
**kwargs)]
else:
plot_list = [active_plot.contour_plot(z, type, xbounds=x, ybounds=y,
colors=colormap,
**kwargs)]
return plot_list
def _get_or_create_plot_data(data, plotdata):
"""Create a new name for `data` if necessary, or check it is a valid name.
"""
valid_names = plotdata.list_data()
if not isinstance(data, basestring):
name = plotdata.set_data("", data, generate_name=True)
else:
if data not in valid_names:
msg = '{} is not an existing name for plot data'
raise ValueError(msg.format(data))
name = data
return name
# EOF
|
{
"content_hash": "9a4f759079ba54a72076815934486260",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 101,
"avg_line_length": 32.39088729016787,
"alnum_prop": 0.5578588879840083,
"repo_name": "ContinuumIO/chaco",
"id": "64fd3f21ba491f50ee436032c8fa986015c89381",
"size": "13507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chaco/shell/plot_maker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "57003"
},
{
"name": "C++",
"bytes": "9881"
},
{
"name": "JavaScript",
"bytes": "449791"
},
{
"name": "Python",
"bytes": "2301293"
}
],
"symlink_target": ""
}
|
import urllib2
import urllib
import json
######
#
# cd ~/sc/showcase-workshop-apis/rest-api/simple-example/
# ./rest-api-sample.py
#
#####
### CONFIGURE HERE
DEV_KEY = 'XXX'
### END CONFIGURATION
dst_url = 'https://app.showcaseworkshop.com/api/v1/data'
access_token_part = '?access_token=' + DEV_KEY
#
# Add some data
#
if 1 == 1: # change to 1 == 2 to disable
print
data = {
"data_name": "Form1",
"showcase_id": "22",
"user_email": "paul@showcaseworkshop.com",
"content": json.dumps({
"form field one": "This is a field value",
"multi line form field": "line 1\n line 2"
}),
"date_entered": "2013-01-28T13:01:01Z"
}
encoded_data = urllib.urlencode(data)
req = urllib2.Request(dst_url + access_token_part, encoded_data)
print '%s %s' % (req.get_method(), req.get_full_url())
response = urllib2.urlopen(req)
result = response.read()
if response.getcode() == 200:
#SUCCESS
obj_return = json.loads(result)
else:
#ERROR
raise Exception('Error while inserting data')
guid = ''
#
# List data
#
if 1 == 1: # change to 1 == 2 to disable
print
req = urllib2.Request(dst_url + access_token_part)
print '%s %s' % (req.get_method(), req.get_full_url())
response = urllib2.urlopen(req)
result = response.read()
if response.getcode() == 200:
#SUCCESS
obj_results = json.loads(result)
print obj_results
for obj in obj_results:
guid = obj['guid']
else:
#ERROR
raise Exception('Error while listing data')
#
# Get one item of data
#
if 1 == 1: # change to 1 == 2 to disable
print
req = urllib2.Request(dst_url + '/' + guid + access_token_part)
print '%s %s' % (req.get_method(), req.get_full_url())
response = urllib2.urlopen(req)
result = response.read()
guid = ''
if response.getcode() == 200:
#SUCCESS
obj_result = json.loads(result)
print obj_result
guid = obj_result['guid']
else:
#ERROR
raise Exception('Error while getting data')
#
# Deleting one item of data
#
if 1 == 1: # change to 1 == 2 to disable
print
opener = urllib2.build_opener(urllib2.HTTPHandler)
req = urllib2.Request(dst_url+'/'+guid+access_token_part)
req.get_method = lambda: 'DELETE'
print '%s %s' % (req.get_method(), req.get_full_url())
url = opener.open(req)
|
{
"content_hash": "b9d7895d19a0b36aaf8b2a958e9a2e16",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 68,
"avg_line_length": 24.02912621359223,
"alnum_prop": 0.5846464646464646,
"repo_name": "ShowcaseSoftwareLtd/showcase-workshop-apis",
"id": "9052c88ebf9b077b603fbadc87702e9bcf154ee1",
"size": "2521",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "rest-api/simple-example/rest-api-sample.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1304"
},
{
"name": "HTML",
"bytes": "14795"
},
{
"name": "JavaScript",
"bytes": "140596"
},
{
"name": "Python",
"bytes": "2521"
}
],
"symlink_target": ""
}
|
"""`main` is the top level module for your Flask application."""
__author__ = 'rohitsm'
__page__ = 'https://github.com/rohitsm/spsoldboys'
# Python
import urllib2
import json
import sys
import cgi
# Flask
from flask import Flask
from flask import request, redirect, url_for
from flask import render_template
# App Engine
from google.appengine.ext import ndb
import logging
# Application related files
import config
from db import Oldboy
app = Flask(__name__, static_url_path='/static')
# URL format: recaptcha_url? + secret=your_secret & response=response_string&remoteip=user_ip_address'
recaptcha_url = 'https://www.google.com/recaptcha/api/siteverify'
# ReCAPTCHA secret key
recaptcha_secret = config.conf['SHARED_KEY']
def verify_captcha(recaptcha_response):
res = recaptcha_url + \
"?secret=" + recaptcha_secret + \
"&response=" + recaptcha_response
# resp = True|False Type=bool
resp = json.load(urllib2.urlopen(res))["success"]
# print "resp[success] = %r" %resp
return resp
@app.route('/')
def index():
"""Return a friendly HTTP greeting."""
# To add entry to DB, uncomment below line. set_record() reads from csv input.
# num_of_records = Oldboy.set_record()
# print "No of records written = " + str(num_of_records)
# return "helloWorld!"
return render_template('index.html')
@app.route('/search', methods=['GET', 'POST'])
def authentication():
# Verify reCaptcha input and render page correctly if captcha verified
if request.method == 'POST':
if(verify_captcha(request.form['g-recaptcha-response'])):
return render_template('search.html')
return render_template('search.html') #Delete this line and uncomment 2 above
# For GET requests
return redirect(url_for('index'))
# Send data from DB to 'results' page
@app.route('/results', methods=['GET', 'POST'])
def search_request():
# Get search terms
record = []
# For table headers of HTML tables
headers = {}
if request.method == 'POST':
try:
firstName = cgi.escape(request.form['firstname'], True).lower().replace(' ', '')
lastName = cgi.escape(request.form['lastname'], True).lower().replace(' ', '')
year = cgi.escape(request.form['year'], True)
# print 'firstname = %s \nlastName = %s, \nyear =%s ' %(firstName, lastName, year)
if(not year):
year = None
if( (not firstName) or (firstName.isspace()) ):
firstName = None
if( (not lastName) or (lastName.isspace()) ):
lastName = None
# Retrieve query from the datastore.
# record = DB query results
# header = for rendering table headers
record = Oldboy.get_record(firstName, lastName, year)
# print "record = %s" %(record)
if (record is not None):
count = len(record)
headers = record[0]
# Records sorted by Last names
sorted_records = sorted(record, key=lambda k: k['Last Name'])
# print "Dict = ", sorted_records
return render_template('results.html', records = sorted_records, \
headers = headers, \
count = count)
return render_template('notfound.html')
except Exception as e:
print "Woah horsey! This shouldn't be happening!"
logging.error(sys.exc_info())
print e
# Redirect to "not_found" page
return render_template('notfound.html')
# For GET requests
return redirect(url_for('index'))
@app.route('/addrecord')
def addrecord():
""" Page contains Google form embedded for entering new record."""
return render_template('addrecord.html')
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
@app.errorhandler(500)
def page_not_found(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
|
{
"content_hash": "a257d38d094d6e393ccc37a710c8db76",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 102,
"avg_line_length": 30.611510791366907,
"alnum_prop": 0.5945945945945946,
"repo_name": "rohitsm/spsoldboys",
"id": "59d576430d651f0f7b93ba1ca0a67df228911f26",
"size": "4255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4904"
},
{
"name": "HTML",
"bytes": "12147"
},
{
"name": "JavaScript",
"bytes": "1143"
},
{
"name": "Python",
"bytes": "15099"
}
],
"symlink_target": ""
}
|
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._disks_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_grant_access_request,
build_list_by_resource_group_request,
build_list_request,
build_revoke_access_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DisksOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2018_06_01.aio.ComputeManagementClient`'s
:attr:`disks` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
async def _create_or_update_initial(
self, resource_group_name: str, disk_name: str, disk: Union[_models.Disk, IO], **kwargs: Any
) -> _models.Disk:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Disk]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(disk, (IO, bytes)):
_content = disk
else:
_json = self._serialize.body(disk, "Disk")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
disk_name=disk_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Disk", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("Disk", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
disk_name: str,
disk: _models.Disk,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Disk]:
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk operation. Required.
:type disk: ~azure.mgmt.compute.v2018_06_01.models.Disk
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_06_01.models.Disk]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
disk_name: str,
disk: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Disk]:
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk operation. Required.
:type disk: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_06_01.models.Disk]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self, resource_group_name: str, disk_name: str, disk: Union[_models.Disk, IO], **kwargs: Any
) -> AsyncLROPoller[_models.Disk]:
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk operation. Is either a model type
or a IO type. Required.
:type disk: ~azure.mgmt.compute.v2018_06_01.models.Disk or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_06_01.models.Disk]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Disk]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Disk", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
async def _update_initial(
self, resource_group_name: str, disk_name: str, disk: Union[_models.DiskUpdate, IO], **kwargs: Any
) -> _models.Disk:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Disk]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(disk, (IO, bytes)):
_content = disk
else:
_json = self._serialize.body(disk, "DiskUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
disk_name=disk_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Disk", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("Disk", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
@overload
async def begin_update(
self,
resource_group_name: str,
disk_name: str,
disk: _models.DiskUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Disk]:
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk operation. Required.
:type disk: ~azure.mgmt.compute.v2018_06_01.models.DiskUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_06_01.models.Disk]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
disk_name: str,
disk: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Disk]:
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk operation. Required.
:type disk: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_06_01.models.Disk]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self, resource_group_name: str, disk_name: str, disk: Union[_models.DiskUpdate, IO], **kwargs: Any
) -> AsyncLROPoller[_models.Disk]:
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk operation. Is either a model
type or a IO type. Required.
:type disk: ~azure.mgmt.compute.v2018_06_01.models.DiskUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_06_01.models.Disk]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Disk]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial( # type: ignore
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Disk", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
@distributed_trace_async
async def get(self, resource_group_name: str, disk_name: str, **kwargs: Any) -> _models.Disk:
"""Gets information about a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Disk or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_06_01.models.Disk
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.Disk]
request = build_get_request(
resource_group_name=resource_group_name,
disk_name=disk_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Disk", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, disk_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
disk_name=disk_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
@distributed_trace_async
async def begin_delete(self, resource_group_name: str, disk_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
"""Deletes a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
disk_name=disk_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.Disk"]:
"""Lists all the disks under a resource group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Disk or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2018_06_01.models.Disk]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DiskList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiskList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks"} # type: ignore
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.Disk"]:
"""Lists all the disks under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Disk or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2018_06_01.models.Disk]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DiskList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiskList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks"} # type: ignore
async def _grant_access_initial(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: Union[_models.GrantAccessData, IO],
**kwargs: Any
) -> Optional[_models.AccessUri]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.AccessUri]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(grant_access_data, (IO, bytes)):
_content = grant_access_data
else:
_json = self._serialize.body(grant_access_data, "GrantAccessData")
request = build_grant_access_request(
resource_group_name=resource_group_name,
disk_name=disk_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._grant_access_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("AccessUri", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess"} # type: ignore
@overload
async def begin_grant_access(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: _models.GrantAccessData,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.AccessUri]:
"""Grants access to a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:param grant_access_data: Access data object supplied in the body of the get disk access
operation. Required.
:type grant_access_data: ~azure.mgmt.compute.v2018_06_01.models.GrantAccessData
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AccessUri or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_06_01.models.AccessUri]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_grant_access(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.AccessUri]:
"""Grants access to a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:param grant_access_data: Access data object supplied in the body of the get disk access
operation. Required.
:type grant_access_data: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AccessUri or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_06_01.models.AccessUri]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_grant_access(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: Union[_models.GrantAccessData, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.AccessUri]:
"""Grants access to a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:param grant_access_data: Access data object supplied in the body of the get disk access
operation. Is either a model type or a IO type. Required.
:type grant_access_data: ~azure.mgmt.compute.v2018_06_01.models.GrantAccessData or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AccessUri or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_06_01.models.AccessUri]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AccessUri]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._grant_access_initial( # type: ignore
resource_group_name=resource_group_name,
disk_name=disk_name,
grant_access_data=grant_access_data,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("AccessUri", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess"} # type: ignore
async def _revoke_access_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, disk_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_revoke_access_request(
resource_group_name=resource_group_name,
disk_name=disk_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._revoke_access_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revoke_access_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess"} # type: ignore
@distributed_trace_async
async def begin_revoke_access(
self, resource_group_name: str, disk_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Revokes access to a disk.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters. Required.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._revoke_access_initial( # type: ignore
resource_group_name=resource_group_name,
disk_name=disk_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess"} # type: ignore
|
{
"content_hash": "15e1b66f4b8e3308b9e716d1a1f0e98e",
"timestamp": "",
"source": "github",
"line_count": 1145,
"max_line_length": 190,
"avg_line_length": 48.73187772925764,
"alnum_prop": 0.6367790960249471,
"repo_name": "Azure/azure-sdk-for-python",
"id": "b7ab2e3e848b14d24c3f515bc41bf90da08dc05f",
"size": "56298",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/aio/operations/_disks_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from twisted.internet import reactor
from autobahn.wamp1 import protocol as wamp
from autobahn.twisted.websocket import listenWS
from pubsubclub import (
ConsumerMixin, ProducerMixin, ConsumerServer, ProducerClient, consul,
)
class WampServerProtocol(wamp.WampServerProtocol):
def onSessionOpen(self):
print("Whoa")
self.registerForRpc(self, "http://example.com/pubsub#")
self.registerForPubSub('http://example.com/mytopic')
self.registerForPubSub('http://example.com/mytopic2')
self.registerForPubSub('http://example.com/mytopic3')
@wamp.exportRpc('publish')
def _publish(self, data):
try:
self.dispatch(
data['channel'],
data['content'],
exclude=[self],
)
except:
import traceback
traceback.print_exc()
return {}
class WampServerFactory(ConsumerMixin, ProducerMixin, wamp.WampServerFactory):
protocol = WampServerProtocol
if __name__ == '__main__':
# import logging
# logging.basicConfig(level=logging.INFO)
from twisted.python import log
import sys
log.startLogging(sys.stderr)
consumer = ConsumerServer('0.0.0.0', 19000)
WampServerFactory.consumer = consumer
producer = ProducerClient([])
WampServerFactory.producer = producer
server = WampServerFactory('ws://localhost:9900')
listenWS(server)
consumer.processor = server
discovery = consul.ConsulDiscovery(
'http://localhost:8500/', 'pubsubclub', producer,
)
discovery.start()
print('Starting...')
reactor.run()
|
{
"content_hash": "ae548a245ec8d9092e4885e6c81b0eb9",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 27.728813559322035,
"alnum_prop": 0.6552567237163814,
"repo_name": "luhn/pubsubclub",
"id": "97355aa1367153d891cf233b418caa2d0403b0f4",
"size": "1636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1425"
},
{
"name": "JavaScript",
"bytes": "111239"
},
{
"name": "Python",
"bytes": "40787"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2015 Christian Uhsat <christian@uhsat.de>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import random
import string
import sys
import time
from tc import TinyCrypt
try:
import pytest
except ImportError:
sys.exit("Requires py.test (https://pytest.org)")
def setup_module(module):
"""
Sets unit test specific salt to avoid conflicts.
"""
TinyCrypt.SALT = b"TinyCrypt Unit Tests"
class TestTinyCrypt:
"""
TinyCrypt unit tests.
"""
def test_push_pull(self):
"""
Simple fuzzy push/pull test.
"""
key = "".join([random.choice(string.printable) for c in range(32)])
msg = "".join([random.choice(string.printable) for c in range(64)])
tinycrypt = TinyCrypt()
tinycrypt.push(key, msg)
time.sleep(10)
assert tinycrypt.pull(key) == msg
def main(*args):
"""
Starts unit testing and passes all command line arguments to py.test.
"""
return pytest.main(list(args))
if __name__ == "__main__":
sys.exit(main(*sys.argv))
|
{
"content_hash": "f954e50fe154a4c3b68e10241d038719",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 28.16216216216216,
"alnum_prop": 0.7111324376199616,
"repo_name": "cuhsat/tinycrypt",
"id": "a19c0ffa3d44fa52d3602caf9b73b99b7a35d6a8",
"size": "2106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tc_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7678"
}
],
"symlink_target": ""
}
|
import time
script = u'''
// Time stamp: %s
// (This ensures the source text is *not* a byte-for-byte match with any
// previously-fetched version of this script.)
// This no-op fetch handler is necessary to bypass explicitly the no fetch
// handler optimization by which this service worker script can be skipped.
addEventListener('fetch', event => {
return;
});
addEventListener('install', event => {
event.waitUntil(self.skipWaiting());
});
addEventListener('activate', event => {
event.waitUntil(self.clients.claim());
});'''
def main(request, response):
return [(b'Content-Type', b'application/javascript')], script % time.time()
|
{
"content_hash": "63fb300b0845aa1bf7dbbc25a7cd46e6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 77,
"avg_line_length": 27.541666666666668,
"alnum_prop": 0.6959152798789713,
"repo_name": "chromium/chromium",
"id": "64914a9dfe5f13d7ae0f6b97ce8d18a5b46c30b4",
"size": "661",
"binary": false,
"copies": "23",
"ref": "refs/heads/main",
"path": "third_party/blink/web_tests/external/wpt/service-workers/service-worker/resources/update-claim-worker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import division
if __name__ == '__main__':
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
import json
import os
import platform
import string
import subprocess
import sys
import threading
from PyQt4 import QtCore, QtGui
encode_list = [['\xe2\x80\x90', '-'],
['\xe2\x80\x9d', '"'],
['\xe2\x80\x9c', '"']]
SUFFIX = '.clt'
def default_dir():
systemType = platform.system()
if systemType in ['Windows', 'Microsoft']:
if len(os.environ['HOMEPATH']) == 0:
root = '\\'
else:
root = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
else:
root = os.getenv('HOME')
default_dir = os.path.join(root, '.vistrails', 'CLTools')
if not os.path.exists(default_dir):
os.mkdir(default_dir)
return default_dir
def quote_arg(arg):
arg = arg.replace('\\', '\\\\')
if '"' in arg or any(c in arg for c in string.whitespace):
return '"%s"' % arg.replace('"', '\\"')
else:
return arg
# From: https://gist.github.com/kirpit/1306188
class Command(object):
"""
Enables to run subprocess commands in a different thread with TIMEOUT option.
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
self.command = command
def run(self, timeout=5, **kwargs):
""" Run a command then return: (status, output, error). """
def target(**kwargs):
try:
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except Exception:
import traceback
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
print "calling with kwargs", target, kwargs
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
class QCLToolsWizard(QtGui.QWidget):
def __init__(self, parent, reload_scripts=None):
QtGui.QWidget.__init__(self, parent)
self.vbox = QtGui.QVBoxLayout()
self.vbox.setContentsMargins(5,5,5,5)
self.setLayout(self.vbox)
self.setTitle()
self.file = None
self.conf = None
self.reload_scripts = reload_scripts
self.toolBar = QtGui.QToolBar()
self.layout().addWidget(self.toolBar)
self.newFileAction = QtGui.QAction(
self.get_icon('document-new'), 'New', self)
self.newFileAction.setToolTip('Start on a new Wrapper')
self.connect(self.newFileAction, QtCore.SIGNAL('triggered()'),
self.newFile)
self.toolBar.addAction(self.newFileAction)
self.openFileAction = QtGui.QAction(
self.get_icon('document-open'), 'Open', self)
self.openFileAction.setToolTip('Open an existing wrapper')
self.connect(self.openFileAction, QtCore.SIGNAL('triggered()'),
self.openFile)
self.toolBar.addAction(self.openFileAction)
self.saveFileAction = QtGui.QAction(
self.get_icon('document-save'), 'Save', self)
self.saveFileAction.setToolTip('Save wrapper')
self.connect(self.saveFileAction, QtCore.SIGNAL('triggered()'),
self.save)
self.toolBar.addAction(self.saveFileAction)
self.saveFileAsAction = QtGui.QAction(
self.get_icon('document-save-as'), 'Save As', self)
self.saveFileAsAction.setToolTip('Save wrapper as a new file')
self.connect(self.saveFileAsAction, QtCore.SIGNAL('triggered()'),
self.saveAs)
self.toolBar.addAction(self.saveFileAsAction)
if self.reload_scripts:
self.reloadAction = QtGui.QAction(
self.get_icon('view-refresh'), 'Refresh', self)
self.reloadAction.setToolTip('Save and Reload CLTools Modules in VisTrails')
self.connect(self.reloadAction, QtCore.SIGNAL('triggered()'),
self.refresh)
self.toolBar.addAction(self.reloadAction)
self.toolBar.addSeparator()
self.addAction = QtGui.QAction(
self.get_icon('list-add'), 'Add', self)
self.addAction.setToolTip('Add a new argument')
self.connect(self.addAction, QtCore.SIGNAL('triggered()'),
self.addArgument)
self.toolBar.addAction(self.addAction)
self.removeAction = QtGui.QAction(
self.get_icon('list-remove'), 'Remove', self)
self.removeAction.setToolTip('Remove the selected argument')
self.connect(self.removeAction, QtCore.SIGNAL('triggered()'),
self.removeArgument)
self.toolBar.addAction(self.removeAction)
self.upAction = QtGui.QAction(
self.get_icon('go-up'), 'Move up', self)
self.upAction.setToolTip('Move argument up one position')
self.connect(self.upAction, QtCore.SIGNAL('triggered()'),
self.moveUp)
self.toolBar.addAction(self.upAction)
self.downAction = QtGui.QAction(
self.get_icon('go-down'), 'Move down', self)
self.downAction.setToolTip('Move argument down one position')
self.connect(self.downAction, QtCore.SIGNAL('triggered()'),
self.moveDown)
self.toolBar.addAction(self.downAction)
self.toolBar.addSeparator()
self.showStdin = QtGui.QAction('stdin', self)
self.showStdin.setToolTip('Check to use standard input as an input port')
self.showStdin.setCheckable(True)
self.toolBar.addAction(self.showStdin)
self.showStdout = QtGui.QAction("stdout", self)
self.showStdout.setToolTip('Check to use standard output as an output port')
self.showStdout.setCheckable(True)
self.toolBar.addAction(self.showStdout)
self.showStderr = QtGui.QAction("stderr", self)
self.showStderr.setToolTip('Check to use standard error as an output port')
self.showStderr.setCheckable(True)
self.toolBar.addAction(self.showStderr)
self.envPort = QtGui.QAction("env", self)
self.envPort.setToolTip('Check to add the "env" input port for specifying environment variables')
self.envPort.setCheckable(True)
self.toolBar.addAction(self.envPort)
self.toolBar.addSeparator()
self.stdAsFiles = QtGui.QAction('std file processing', self)
self.stdAsFiles.setToolTip('Check to make pipes communicate using files instead of strings\nOnly useful when processing large files')
self.stdAsFiles.setCheckable(True)
self.toolBar.addAction(self.stdAsFiles)
self.failWithCmd = QtGui.QAction('fail execution if return != 0', self)
self.failWithCmd.setToolTip('If selected, VisTrails will check the exitcode, and abort the execution if not 0')
self.failWithCmd.setCheckable(True)
self.failWithCmd.setChecked(True)
self.toolBar.addAction(self.failWithCmd)
self.toolBar.addSeparator()
self.previewPorts = QtGui.QAction('preview', self)
self.previewPorts.setToolTip('Check which ports will be available for this module')
self.connect(self.previewPorts, QtCore.SIGNAL('triggered()'),
self.preview_ports)
self.toolBar.addAction(self.previewPorts)
self.envOption = None
self.commandLayout = QtGui.QHBoxLayout()
self.commandLayout.setContentsMargins(5,5,5,5)
tooltip = 'The command to execute'
label = QtGui.QLabel("Command:")
label.setFixedWidth(80)
label.setToolTip(tooltip)
self.commandLayout.addWidget(label)
self.command = QtGui.QLineEdit()
self.command.setToolTip(tooltip)
self.commandLayout.addWidget(self.command)
tooltip = 'Sets directory to execute from. Leave blank to ignore.'
label = QtGui.QLabel("Directory:")
label.setToolTip(tooltip)
self.commandLayout.addWidget(label)
self.dir = QtGui.QLineEdit()
self.dir.setFixedWidth(140)
self.dir.setToolTip(tooltip)
self.commandLayout.addWidget(self.dir)
self.vbox.addLayout(self.commandLayout)
self.previewLayout = QtGui.QHBoxLayout()
self.previewLayout.setContentsMargins(5,5,5,5)
self.previewLayout.setAlignment(QtCore.Qt.AlignLeft)
tooltip = 'Shows what the command will look like when executed in the command line'
label = QtGui.QLabel("Preview:")
label.setToolTip(tooltip)
label.setFixedWidth(80)
self.previewLayout.addWidget(label)
self.preview = QtGui.QLabel()
self.preview.setToolTip(tooltip)
self.preview.setMaximumWidth(600)
self.previewLayout.addWidget(self.preview)
self.vbox.addLayout(self.previewLayout)
self.importLayout = QtGui.QHBoxLayout()
self.importLayout.setContentsMargins(5,5,5,5)
self.importLayout.setAlignment(QtCore.Qt.AlignLeft)
self.importLayout.addWidget(QtGui.QLabel("Man page:"))
self.viewManButton = QtGui.QPushButton("view")
self.viewManButton.setToolTip('View the man page for the current command')
self.connect(self.viewManButton, QtCore.SIGNAL('clicked()'),
self.viewManPage)
self.importLayout.addWidget(self.viewManButton)
self.importManButton = QtGui.QPushButton("import")
self.importManButton.setToolTip('Import arguments from the man page for the current command')
self.connect(self.importManButton, QtCore.SIGNAL('clicked()'),
self.generateFromManPage)
self.importLayout.addWidget(self.importManButton)
self.importLayout.addWidget(QtGui.QLabel("help page (-h):"))
self.viewHelpButton = QtGui.QPushButton("view")
self.viewHelpButton.setToolTip('View the help (-h) page for the current command')
self.connect(self.viewHelpButton, QtCore.SIGNAL('clicked()'),
self.viewHelpPage)
self.importLayout.addWidget(self.viewHelpButton)
self.importHelpButton = QtGui.QPushButton("import")
self.importHelpButton.setToolTip('Import arguments from the help (-h) page for the current command')
self.connect(self.importHelpButton, QtCore.SIGNAL('clicked()'),
self.generateFromHelpPage)
self.importLayout.addWidget(self.importHelpButton)
self.importLayout.addWidget(QtGui.QLabel("help page (--help):"))
self.viewHelpButton2 = QtGui.QPushButton("view")
self.viewHelpButton2.setToolTip('View the help (--help) page for the current command')
self.connect(self.viewHelpButton2, QtCore.SIGNAL('clicked()'),
self.viewHelpPage2)
self.importLayout.addWidget(self.viewHelpButton2)
self.importHelpButton2 = QtGui.QPushButton("import")
self.importHelpButton2.setToolTip('Import arguments from the help (--help) page for the current command')
self.connect(self.importHelpButton2, QtCore.SIGNAL('clicked()'),
self.generateFromHelpPage2)
self.importLayout.addWidget(self.importHelpButton2)
self.vbox.addLayout(self.importLayout)
self.stdinWidget = QArgWidget('stdin')
self.stdinGroup = QtGui.QGroupBox('Standard input')
self.stdinGroup.setLayout(QtGui.QHBoxLayout())
self.stdinGroup.layout().addWidget(self.stdinWidget)
self.layout().addWidget(self.stdinGroup)
self.stdinGroup.setVisible(False)
self.stdoutWidget = QArgWidget('stdout')
self.stdoutGroup = QtGui.QGroupBox('Standard output')
self.stdoutGroup.setLayout(QtGui.QHBoxLayout())
self.stdoutGroup.layout().addWidget(self.stdoutWidget)
self.layout().addWidget(self.stdoutGroup)
self.stdoutGroup.setVisible(False)
self.stderrWidget = QArgWidget('stderr')
self.stderrGroup = QtGui.QGroupBox('Standard error')
self.stderrGroup.setLayout(QtGui.QHBoxLayout())
self.stderrGroup.layout().addWidget(self.stderrWidget)
self.layout().addWidget(self.stderrGroup)
self.stderrGroup.setVisible(False)
self.connect(self.showStdin, QtCore.SIGNAL('toggled(bool)'),
self.stdinGroup.setVisible)
self.connect(self.showStdout, QtCore.SIGNAL('toggled(bool)'),
self.stdoutGroup.setVisible)
self.connect(self.showStderr, QtCore.SIGNAL('toggled(bool)'),
self.stderrGroup.setVisible)
self.argList = QtGui.QListWidget()
self.layout().addWidget(self.argList)
def get_icon(self, name):
return QtGui.QIcon(os.path.join(os.path.dirname(__file__),
"icons",
"%s.png" % name))
def setTitle(self, file=None):
self.parent().setWindowTitle("CLTools Wizard - " + (file if file else "untitled"))
def newFile(self):
self.conf = None
self.file = None
self.command.clear()
self.dir.clear()
self.showStdin.setChecked(False)
self.showStdout.setChecked(False)
self.showStderr.setChecked(False)
self.envPort.setChecked(False)
self.envOption = None
while self.argList.count():
item = self.argList.item(0)
itemWidget = self.argList.itemWidget(item)
itemWidget.setVisible(False)
itemWidget.setParent(None)
self.argList.removeItemWidget(item)
item.setHidden(True)
self.argList.takeItem(0)
self.argList.hide()
self.layout().takeAt(self.layout().indexOf(self.argList))
self.argList = QtGui.QListWidget()
self.layout().addWidget(self.argList)
self.stdAsFiles.setChecked(False)
self.failWithCmd.setChecked(True)
self.setTitle()
self.generate_preview()
def openFile(self):
fileName = QtGui.QFileDialog.getOpenFileName(self,
"Open Wrapper",
self.file if self.file else default_dir(),
"Wrappers (*%s)" % SUFFIX)
if not fileName:
return
try:
conf = json.load(open(fileName))
except ValueError as exc:
print "Error opening Wrapper '%s': %s" % (fileName, exc)
return
self.newFile()
self.file = fileName
self.setTitle(self.file)
self.command.setText(conf.get('command', ''))
self.dir.setText(conf.get('dir', ''))
if 'stdin' in conf:
self.stdinWidget.fromList(conf['stdin'])
self.stdinGroup.setVisible('stdin' in conf)
self.showStdin.setChecked('stdin' in conf)
if 'stdout' in conf:
self.stdoutWidget.fromList(conf['stdout'])
self.stdoutGroup.setVisible('stdout' in conf)
self.showStdout.setChecked('stdout' in conf)
if 'stderr' in conf:
self.stderrWidget.fromList(conf['stderr'])
self.stderrGroup.setVisible('stderr' in conf)
self.showStderr.setChecked('stderr' in conf)
for argConf in conf.get('args', []):
arg = QArgWidget()
arg.fromList(argConf)
item = QtGui.QListWidgetItem()
item.setSizeHint(arg.sizeHint())
self.argList.addItem(item)
self.argList.setItemWidget(item, arg)
self.envPort.setChecked('options' in conf and
'env_port' in conf['options'])
self.stdAsFiles.setChecked('options' in conf and
'std_using_files' in conf['options'])
self.failWithCmd.setChecked('options' in conf and
'fail_with_cmd' in conf['options'])
self.envOption = conf['options']['env'] \
if ('options' in conf and 'env' in conf['options']) else None
self.conf = conf
self.generate_preview()
def get_current_conf(self):
conf = {}
conf['command'] = self.command.text()
dir = self.dir.text()
if dir:
conf['dir'] = dir
if self.stdinGroup.isVisible():
conf['stdin'] = self.stdinWidget.toList()
if self.stdoutGroup.isVisible():
conf['stdout'] = self.stdoutWidget.toList()
if self.stderrGroup.isVisible():
conf['stderr'] = self.stderrWidget.toList()
args = []
for row in xrange(self.argList.count()):
arg = self.argList.itemWidget(self.argList.item(row))
args.append(arg.toList())
conf['args'] = args
options = {}
if self.stdAsFiles.isChecked():
options['std_using_files'] = ''
if self.failWithCmd.isChecked():
options['fail_with_cmd'] = ''
if self.envPort.isChecked():
options['env_port'] = ''
if self.envOption:
options['env'] = self.envOption
if options:
conf['options'] = options
return conf
def save(self):
if not self.file:
self.saveAs()
if not self.file:
return
self.conf = self.get_current_conf()
f = open(self.file, "w")
json.dump(self.conf, f, sort_keys=True, indent=4)
f.close()
self.generate_preview()
def saveAs(self):
fileName = QtGui.QFileDialog.getSaveFileName(self,
"Save Wrapper as",
self.file if self.file else default_dir(),
"Wrappers (*%s)" % SUFFIX)
if fileName:
self.file = fileName
if not self.file.endswith(SUFFIX):
self.file += SUFFIX
self.save()
self.setTitle(self.file)
def refresh(self):
self.save()
self.reload_scripts()
def generate_preview(self):
# generate preview from self.conf
c = self.get_current_conf()
if not c:
self.preview.setText('')
self.preview.setToolTip('')
return
text = ''
# show env as bash-style prefix
if 'options' in c:
o = c['options']
# env_port
if 'env_port' in o:
text += '<env_port>'
# env
if 'env' in o:
if text:
text += ' '
text += c['options']['env']
if text:
text += ' '
# command
text += c['command']
# args
if 'args' in c:
for type, name, klass, opts in c['args']:
type = type.lower()
klass = klass.lower()
text += ' '
if type == 'constant':
if 'flag' in opts:
text += opts['flag'] + ' '
text += quote_arg(name)
continue
if 'required' not in opts:
text += '['
if klass in 'list':
text += '{'
if 'flag' in opts:
text += opts['flag']
if klass != 'flag':
text += ' '
if 'prefix' in opts:
text += opts['prefix']
if type!='input' or klass != 'flag':
text += '<'
if klass == 'list':
text += opts['type'].lower() \
if ('type' in opts and opts['type']) else 'string'
elif type=='input' and klass == 'flag':
if 'flag' not in opts:
text += quote_arg(name)
elif type in ['output', 'inputoutput']:
text += 'file'
else:
text += klass
if type!='input' or klass != 'flag':
text += '>'
if klass == 'list':
text += '}'
if 'required' not in opts:
text += ']'
self.preview.setText(text)
self.preview.setToolTip(text)
def preview_ports(self):
# show dialog with the ports that this module will have
self.generate_preview()
conf = self.get_current_conf()
if not conf:
return
intext = []
outtext = []
if 'stdin' in conf:
name, type, options = conf['stdin']
optional = " (visible)" if "required" in options else ""
intext.append("%s: %s%s" % (name, type, optional))
if 'stdout' in conf:
name, type, options = conf['stdout']
optional = " (visible)" if "required" in options else ""
outtext.append("%s: %s%s" % (name, type, optional))
if 'stderr' in conf:
name, type, options = conf['stderr']
optional = " (visible)" if "required" in options else ""
outtext.append("%s: %s%s" % (name, type, optional))
if 'options' in conf and 'env_port' in conf['options']:
intext.append('env: String')
for type, name, klass, options in conf['args']:
optional = " (visible)" if "required" in options else ""
if 'input' == type.lower():
intext.append("%s: %s%s" % (name, klass, optional))
elif 'output' == type.lower():
outtext.append("%s: %s%s" % (name, klass, optional))
elif 'inputoutput' == type.lower():
intext.append("%s: %s%s" % (name, 'File', optional))
outtext.append("%s: %s%s" % (name, 'File', optional))
intext = ''.join(['Input %s. %s\n' % (i+1, t)
for i, t in zip(xrange(len(intext)), intext)])
outtext = ''.join(['Output %s. %s\n' % (i+1, t)
for i, t in zip(xrange(len(outtext)), outtext)])
self.helppageView = QManpageDialog("Module Ports for this Wrapper",
intext + "\n" + outtext, self)
self.helppageView.show()
def loadFromCommand(self, argv):
self.command.setText(argv[0])
pos = 0
for argName in argv[1:]:
arg = QArgWidget()
arg.guess(argName, pos)
pos += 1
item = QtGui.QListWidgetItem()
item.setSizeHint(arg.sizeHint())
self.argList.addItem(item)
self.argList.setItemWidget(item, arg)
def addArgument(self):
arg = QArgWidget()
item = QtGui.QListWidgetItem()
item.setSizeHint(arg.sizeHint())
self.argList.addItem(item)
self.argList.setItemWidget(item, arg)
def moveUp(self):
currentRow = self.argList.currentRow()
if currentRow < 1:
return
# moving widgets does not work so we just switch contents
w0 = self.argList.itemWidget(self.argList.item(currentRow-1))
w1 = self.argList.itemWidget(self.argList.item(currentRow))
arg0 = w0.toList()
arg1 = w1.toList()
w0.fromList(arg1)
w1.fromList(arg0)
self.argList.setCurrentRow(currentRow - 1)
def moveDown(self):
currentRow = self.argList.currentRow()
if currentRow<0 or currentRow>self.argList.count()-2:
return
# moving widgets does not work so we just switch contents
w1 = self.argList.itemWidget(self.argList.item(currentRow))
w2 = self.argList.itemWidget(self.argList.item(currentRow+1))
arg1 = w1.toList()
arg2 = w2.toList()
w1.fromList(arg2)
w2.fromList(arg1)
self.argList.setCurrentRow(currentRow + 1)
def removeArgument(self):
currentRow = self.argList.currentRow()
if currentRow<0:
return
self.argList.takeItem(currentRow)
def keyPressEvent(self, event):
if event.key() in [QtCore.Qt.Key_Delete, QtCore.Qt.Key_Backspace]:
self.removeArgument()
else:
QtGui.QWidget.keyPressEvent(self, event)
def parse(self, text):
""" parse(self, text)
parses the description of a command and extracts possible arguments
works on both man pages and help (-h) pages
"""
lines = text.split('\n')
args = []
arg = None
for line in lines:
line = line.strip()
if len(line)>1 and line[0] == '-':
# make sure description is removed
flag = line.split(' ')[0].strip()
# use the last in the list of equal flags
#it is assumed to be most descriptive
flag = flag.split(',')[-1].strip()
# remove any special characters after flag
for i in ['=', '[', ' ']:
flag = flag.split(i)[0].strip()
# new arg (type, flag, description)
arg = ['Flag', flag, line]
if '=' in line: # assume string attribute with prefix
arg[0] = 'String'
args.append(arg)
elif arg:
if not line:
arg = None
else:
arg[2] += ' %s' % line.strip()
#print "args:"
#for arg in args:
# print arg
return args
def runProcess(self, args):
try:
command = Command(args)
status, text, stderr = command.run(stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
if not (text and len(text)):
text = stderr
if not (text and len(text)) or (text and text.startswith('No ')):
return None
# fix weird formatting
for a, b in encode_list:
text = text.replace(a, b)
return text
except Exception:
return None
def generateFromManPage(self):
command = self.command.text()
if command == '':
return
text = self.runProcess(['-c', 'man %s | col -b' % command])
if not text:
QtGui.QMessageBox.warning(self, "Man page not found",
"For command '%s'" % command)
return
args = self.parse(text)
title = "Import arguments from man page for '%s'" % command
self.manpageImport = QManpageImport(title, args, self)
self.connect(self.manpageImport,
QtCore.SIGNAL("importArgs(PyQt_PyObject)"),
self.importArgs)
self.manpageImport.show()
def generateFromHelpPage(self):
command = self.command.text()
if command == '':
return
text = self.runProcess(['-c', command + ' -h'])
if not text:
QtGui.QMessageBox.warning(self, "Help page (-h) not found",
"For command '%s'" % command)
return
args = self.parse(text)
title = "Import arguments from help page (-h) for '%s'" % command
self.helppageImport = QManpageImport(title, args, self)
self.connect(self.helppageImport,
QtCore.SIGNAL("importArgs(PyQt_PyObject)"),
self.importArgs)
self.helppageImport.show()
def generateFromHelpPage2(self):
command = self.command.text()
if command == '':
return
text = self.runProcess(['-c', command + ' --help'])
if not text:
QtGui.QMessageBox.warning(self, "Help page (--help) not found",
"For command '%s'" % command)
return
args = self.parse(text)
title = "Import arguments from help page (--help) for '%s'" % command
self.helppageImport = QManpageImport(title, args, self)
self.connect(self.helppageImport,
QtCore.SIGNAL("importArgs(PyQt_PyObject)"),
self.importArgs)
self.helppageImport.show()
def viewManPage(self):
command = self.command.text()
if command == '':
return
text = self.runProcess(['-c', 'man %s | col -b' % command])
if not text:
QtGui.QMessageBox.warning(self, "Man page not found",
"For command '%s'" % command)
return
title = "man page for '%s'" % command
self.manpageView = QManpageDialog(title, text, self)
self.manpageView.show()
def viewHelpPage(self):
command = self.command.text()
if command == '':
return
text = self.runProcess(['-c', command + ' -h'])
if not text:
QtGui.QMessageBox.warning(self, "Help page (-h) not found",
"For command '%s'" % command)
return
title = "Help page for '%s'" % command
self.helppageView = QManpageDialog(title, text, self)
self.helppageView.show()
def viewHelpPage2(self):
command = self.command.text()
if command == '':
return
text = self.runProcess(['-c', command + ' --help'])
if not text:
QtGui.QMessageBox.warning(self, "Help page (--help) not found",
"For command '%s'" % command)
return
title = "Help page for '%s'" % command
self.helppageView = QManpageDialog(title, text, self)
self.helppageView.show()
def importArgs(self, args):
for arg in args:
item = QtGui.QListWidgetItem()
item.setSizeHint(arg.sizeHint())
self.argList.insertItem(0, item)
self.argList.setItemWidget(item, arg)
class QArgWidget(QtGui.QWidget):
""" Widget for configuring an argument """
KLASSES = {
'input': ['flag', 'file', 'path', 'directory',
'string', 'integer', 'float', 'list'],
'output': ['file', 'string'],
'inputoutput': ['file'],
'stdin': ['file', 'string'],
'stdout': ['file', 'string'],
'stderr': ['file', 'string'],
}
KLASSNAMES = {
'flag': 'Boolean flag',
'string': 'String',
'integer': 'Integer',
'float': 'Float',
'file': 'File',
'path': 'Path',
'directory': 'Directory',
'list': 'List',
}
TYPES = ['input', 'output', 'inputoutput', 'constant']
TYPENAMES = {
'input': 'Input Port',
'output': 'Output Port',
'inputoutput': 'InputOutput Port',
'constant': 'Constant',
}
def __init__(self, argtype='Input', name='untitled', klass='Flag', options={}, parent=None):
QtGui.QWidget.__init__(self, parent)
self.stdTypes = ['stdin', 'stdout', 'stderr']
self.stdLabels = ['Standard input', 'Standard output', 'Standard error']
self.stdDict = dict(zip(self.stdTypes, self.stdLabels))
self.argtype = argtype.lower()
self.name = name
self.klass = klass.lower()
self.options = options
layout = QtGui.QVBoxLayout()
self.setLayout(layout)
self.buildWidget()
def buildWidget(self):
layout = self.layout()
layout.setContentsMargins(2,2,2,2)
# remove any previous layout
layout1 = QtGui.QHBoxLayout()
layout1.setContentsMargins(2,2,2,2)
layout.addLayout(layout1)
if self.argtype not in self.stdTypes:
layout2 = QtGui.QHBoxLayout()
layout2.setContentsMargins(2,2,2,2)
layout.addLayout(layout2)
else:
layout2 = layout1
# type of argument
if self.argtype not in self.stdTypes:
self.typeList = QtGui.QComboBox()
self.typeDict = {}
for i, n in enumerate(self.TYPES):
self.typeList.addItem(self.TYPENAMES[n], n)
self.typeDict[n] = i
self.typeList.setCurrentIndex(self.typeDict.get(self.argtype, 0))
#label = QtGui.QLabel('Type:')
tt = "Select if argument will be an input port, output port, both, or a hidden constant. InputOutput's are always files."
#label.setToolTip(tt)
self.typeList.setToolTip(tt)
#layout1.addWidget(label)
layout1.addWidget(self.typeList)
else:
self.typeList = None
# type of port
self.klassList = QtGui.QComboBox()
klasses = self.KLASSES[self.argtype]
self.klassDict = {}
for i, n in enumerate(klasses):
self.klassList.addItem(self.KLASSNAMES[n], n)
self.klassDict[n] = i
self.klassList.setCurrentIndex(self.klassDict.get(self.klass, 0))
#label = QtGui.QLabel('Class:')
tt = 'Port Type. Can be String, Integer, Float, File/Directory/Path or Boolean Flag. List means an input list of one of the other types. Only File and String should be used for output ports.'
self.klassList.setToolTip(tt)
#label.setToolTip(tt)
#layout1.addWidget(label)
layout1.addWidget(self.klassList)
# name of port
self.nameLine = QtGui.QLineEdit(self.name)
label = QtGui.QLabel('Name:')
tt = 'Name of the port, or the value for constants'
label.setToolTip(tt)
self.nameLine.setToolTip(tt)
layout1.addWidget(label)
layout1.addWidget(self.nameLine)
# options are different for each widget
if self.argtype not in self.stdTypes:
# all args can have flag
self.flag = QtGui.QLineEdit(self.options.get('flag', ''))
label = QtGui.QLabel('Flag:')
tt = 'a short-style flag before your input. Example: "-f" -> "-f yourinput"'
label.setToolTip(tt)
self.flag.setToolTip(tt)
self.flag.setFixedWidth(100)
layout1.addWidget(label)
layout1.addWidget(self.flag)
# all args can have prefix
self.prefix = QtGui.QLineEdit(self.options.get('prefix', ''))
label = QtGui.QLabel('Prefix:')
tt = 'a long-style prefix to your input. Example: "--X=" -> "--X=yourinput"'
label.setToolTip(tt)
self.prefix.setToolTip(tt)
layout1.addWidget(label)
layout1.addWidget(self.prefix)
# all can be required
self.required = QtGui.QCheckBox()
self.required.setChecked('required' in self.options)
label = QtGui.QLabel('Visible:')
tt = 'Check to make port always visible in VisTrails'
label.setToolTip(tt)
self.required.setToolTip(tt)
layout2.addWidget(label)
layout2.addWidget(self.required)
# subtype
self.subList = ['String', 'Integer', 'Float', 'File', 'Directory', 'Path']
self.subDict = dict(zip(self.subList, xrange(len(self.subList))))
self.subDict.update(dict(zip([s.lower() for s in self.subList], xrange(len(self.subList)))))
self.subtype = QtGui.QComboBox()
self.subtype.addItems(self.subList)
self.subtype.setCurrentIndex(self.subDict.get(self.options.get('type', 'String'), 0))
self.listLabel = QtGui.QLabel('List type:')
self.subtype.setVisible(False)
tt = 'Choose type of values in List'
self.subtype.setToolTip(tt)
self.listLabel.setToolTip(tt)
layout2.addWidget(self.listLabel)
layout2.addWidget(self.subtype)
self.listLabel.setVisible(False)
self.subtype.setVisible(False)
# input files and inputoutput's can set file suffix
self.suffix = QtGui.QLineEdit(self.options.get('suffix', ''))
self.suffixLabel = QtGui.QLabel('File suffix:')
tt = 'Sets the specified file ending on the created file, like for example: ".txt"'
self.suffixLabel.setToolTip(tt)
self.suffix.setToolTip(tt)
self.suffix.setFixedWidth(50)
layout2.addWidget(self.suffixLabel)
layout2.addWidget(self.suffix)
self.typeChanged()
self.klassChanged()
# description
self.desc = QtGui.QLineEdit(self.options.get('desc', ''))
label = QtGui.QLabel('Description:')
tt = 'Add a helpful description of the port'
label.setToolTip(tt)
self.desc.setToolTip(tt)
layout2.addWidget(label)
layout2.addWidget(self.desc)
if self.argtype not in self.stdTypes:
self.connect(self.klassList, QtCore.SIGNAL('currentIndexChanged(int)'),
self.klassChanged)
self.connect(self.typeList, QtCore.SIGNAL('currentIndexChanged(int)'),
self.typeChanged)
def getValues(self):
""" get the values from the widgets and store them """
self.klass = self.klassList.itemData(self.klassList.currentIndex())
self.name = self.nameLine.text()
self.options = {}
if self.argtype not in self.stdTypes:
flag = self.flag.text()
if flag:
self.options['flag'] = flag
prefix = self.prefix.text()
if prefix:
self.options['prefix'] = prefix
desc = self.desc.text()
if desc:
self.options['desc'] = desc
if self.required.isChecked():
self.options['required'] = ''
if self.klass == 'list':
subtype = self.subtype.currentText()
if subtype:
self.options['type'] = subtype
type = self.argtype.lower()
suffix = self.suffix.text()
if (type == 'output' or type == 'inputoutput') and suffix:
self.options['suffix'] = suffix
def setValues(self):
if self.argtype not in self.stdTypes:
self.typeList.setCurrentIndex(self.typeDict.get(self.argtype, 0))
self.nameLine.setText(self.name)
self.klassList.setCurrentIndex(self.klassDict[self.klass])
if self.argtype not in self.stdTypes:
self.flag.setText(self.options.get('flag', ''))
self.prefix.setText(self.options.get('prefix', ''))
self.subtype.setCurrentIndex(self.subDict.get(self.options.get('type', 'String')))
self.subtype.setCurrentIndex(self.subDict.get(self.options.get('type', 'String'), 0))
self.required.setChecked('required' in self.options)
self.desc.setText(self.options.get('desc', ''))
type = self.argtype.lower()
if type == 'output' or type == 'inputoutput':
self.suffix.setText(self.options.get('suffix', ''))
self.typeChanged()
self.klassChanged()
def toList(self):
self.getValues()
if self.argtype not in self.stdTypes:
return [self.argtype, self.name, self.klass, dict(self.options)]
else:
return [self.name, self.klass, dict(self.options)]
def fromList(self, arg):
if self.argtype not in self.stdTypes:
self.argtype, self.name, klass, self.options = arg
else:
self.name, klass, self.options = arg
self.klass = klass.lower()
self.setValues()
def klassChanged(self, index=None):
if self.argtype in self.stdTypes:
return
klass = self.klassList.itemData(self.klassList.currentIndex())
type = self.typeList.itemData(self.typeList.currentIndex())
self.listLabel.setVisible(klass == "list" and type == 'input')
self.subtype.setVisible(klass == "list" and type == 'input')
def typeChanged(self, index=None):
if self.argtype in self.stdTypes:
return
type = self.typeList.itemData(self.typeList.currentIndex())
if index is not None and type == self.argtype:
return
self.argtype = type
if type in ('constant', 'inputoutput'):
self.klassList.hide()
else:
self.klassList.show()
self.klassList.clear()
klasses = self.KLASSES[self.argtype]
self.klassDict = {}
for i, n in enumerate(klasses):
self.klassList.addItem(self.KLASSNAMES[n], n)
self.klassDict[n] = i
self.klassList.setCurrentIndex(self.klassDict.get(self.klass, 0))
self.suffixLabel.setVisible(type == 'output' or type == 'inputoutput')
self.suffix.setVisible(type == 'output' or type == 'inputoutput')
def guess(self, name, count=0):
""" add argument by guessing what the arg might be """
if '.' in name or '/' in name or '\\' in name: # guess path
if os.path.isfile(name):
guessed, type_ = 'file', 'File'
elif os.path.isdir(name):
guessed, type_ = 'directory', 'Directory'
else:
guessed, type_ = 'path', 'Path'
self.fromList(['Input', '%s%d' % (guessed, count), type_,
{'desc':'"%s" guessed to be an Input %s' % (name, guessed)}])
elif name.startswith('-'): # guess flag
self.fromList(['Input', 'flag%s' % name, 'Flag',
{'desc':'"%s" guessed to be a flag' % name,
'flag':name}])
else: # guess string
self.fromList(['Input', 'input%s' % count, 'String',
{'desc':'"%s" guessed to be an input string' % name}])
class QManpageDialog(QtGui.QDialog):
def __init__(self, title, text, parent=None):
QtGui.QDialog.__init__(self, parent)
self.setWindowTitle(title)
text = "<pre>%s</pre>" % text
self.textEdit = QtGui.QTextEdit(text)
self.textEdit.setReadOnly(True)
self.setLayout(QtGui.QHBoxLayout())
self.layout().addWidget(self.textEdit)
self.resize(800,600)
class QManpageImport(QtGui.QDialog):
def __init__(self, title, args, parent=None):
QtGui.QDialog.__init__(self, parent)
self.setWindowTitle(title)
self.argLayout = QtGui.QVBoxLayout()
for arg in args:
if arg[0] == 'Flag':
w = QArgWidget(name=arg[1], options={'flag':arg[1],
'desc':arg[2]})
else:
#if arg[0] == 'String':
w = QArgWidget(klass='String', name=arg[1],
options={'prefix':arg[1] + '=',
'desc':arg[2]})
widgetLayout = QtGui.QHBoxLayout()
widgetLayout.addWidget(QtGui.QCheckBox())
widgetLayout.addWidget(w)
self.argLayout.addLayout(widgetLayout)
scroll = QtGui.QScrollArea()
w = QtGui.QWidget()
w.setLayout(self.argLayout)
scroll.setWidget(w)
scroll.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
scroll.setWidgetResizable(True)
self.setLayout(QtGui.QVBoxLayout())
self.layout().addWidget(scroll)
layout2 = QtGui.QHBoxLayout()
self.layout().addLayout(layout2)
self.closeButton = QtGui.QPushButton('Close')
self.closeButton.setToolTip('Close this window')
self.connect(self.closeButton, QtCore.SIGNAL('clicked()'),
self.close)
layout2.addWidget(self.closeButton)
self.selectAllButton = QtGui.QPushButton('Select All')
self.selectAllButton.setToolTip('Select All arguments')
self.connect(self.selectAllButton, QtCore.SIGNAL('clicked()'),
self.selectAll)
layout2.addWidget(self.selectAllButton)
self.selectNoneButton = QtGui.QPushButton('Select None')
self.selectNoneButton.setToolTip('Unselect All arguments')
self.connect(self.selectNoneButton, QtCore.SIGNAL('clicked()'),
self.selectNone)
layout2.addWidget(self.selectNoneButton)
self.addSelectedButton = QtGui.QPushButton('Import Selected')
self.addSelectedButton.setToolTip('Import all selected arguments')
self.connect(self.addSelectedButton, QtCore.SIGNAL('clicked()'),
self.addSelected)
layout2.addWidget(self.addSelectedButton)
self.resize(800,600)
def selectAll(self):
for i in xrange(self.argLayout.count()):
w = self.argLayout.layout().itemAt(i)
w.layout().itemAt(0).widget().setChecked(True)
def selectNone(self):
for i in xrange(self.argLayout.count()):
w = self.argLayout.layout().itemAt(i)
w.layout().itemAt(0).widget().setChecked(False)
def addSelected(self):
# collect selected arguments and send through signal
args = []
remove_list = []
for i in xrange(self.argLayout.count()):
w = self.argLayout.layout().itemAt(i)
if w.layout().itemAt(0).widget().isChecked():
remove_list.append(w)
args.append(w.layout().itemAt(1).widget())
for w in remove_list:
w.layout().itemAt(0).widget().setChecked(False)
w.layout().itemAt(0).widget().hide()
w.layout().itemAt(1).widget().hide()
self.argLayout.removeItem(w)
self.emit(QtCore.SIGNAL('importArgs(PyQt_PyObject)'), args)
class QCLToolsWizardWindow(QtGui.QMainWindow):
def __init__(self, parent=None, reload_scripts=None):
QtGui.QMainWindow.__init__(self, parent)
self.wizard = QCLToolsWizard(self, reload_scripts)
self.setCentralWidget(self.wizard)
self.setWindowTitle("CLTools Wizard")
self.resize(1000,600)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = QCLToolsWizardWindow()
if len(sys.argv)>2 and sys.argv[1] == '-c':
# read command from command line
window.wizard.loadFromCommand(sys.argv[2:])
window.show()
app.exec_()
|
{
"content_hash": "fef3897792bbf052aa469c542e3f9b16",
"timestamp": "",
"source": "github",
"line_count": 1152,
"max_line_length": 199,
"avg_line_length": 40.85503472222222,
"alnum_prop": 0.5674280250717093,
"repo_name": "hjanime/VisTrails",
"id": "ad17e14b19552878b30b72f3be42c050faadd650",
"size": "48979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/packages/CLTools/wizard.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19550"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19803915"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "Shell",
"bytes": "35024"
},
{
"name": "TeX",
"bytes": "145333"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
"""Provides the Session class and related utilities."""
import weakref
from itertools import chain
from sqlalchemy import util, sql, engine, log, exc as sa_exc
from sqlalchemy.sql import util as sql_util, expression
from sqlalchemy.orm import (
SessionExtension, attributes, exc, query, unitofwork, util as mapperutil, state
)
from sqlalchemy.orm.util import object_mapper as _object_mapper
from sqlalchemy.orm.util import class_mapper as _class_mapper
from sqlalchemy.orm.util import (
_class_to_mapper, _state_mapper,
)
from sqlalchemy.orm.mapper import Mapper, _none_set
from sqlalchemy.orm.unitofwork import UOWTransaction
from sqlalchemy.orm import identity
from sqlalchemy import event
from sqlalchemy.orm.events import SessionEvents
import sys
__all__ = ['Session', 'SessionTransaction', 'SessionExtension']
def sessionmaker(bind=None, class_=None, autoflush=True, autocommit=False,
expire_on_commit=True, **kwargs):
"""Generate a custom-configured :class:`~sqlalchemy.orm.session.Session` class.
The returned object is a subclass of ``Session``, which, when instantiated
with no arguments, uses the keyword arguments configured here as its
constructor arguments.
It is intended that the `sessionmaker()` function be called within the
global scope of an application, and the returned class be made available
to the rest of the application as the single class used to instantiate
sessions.
e.g.::
# global scope
Session = sessionmaker(autoflush=False)
# later, in a local scope, create and use a session:
sess = Session()
Any keyword arguments sent to the constructor itself will override the
"configured" keywords::
Session = sessionmaker()
# bind an individual session to a connection
sess = Session(bind=connection)
The class also includes a special classmethod ``configure()``, which
allows additional configurational options to take place after the custom
``Session`` class has been generated. This is useful particularly for
defining the specific ``Engine`` (or engines) to which new instances of
``Session`` should be bound::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite:///foo.db'))
sess = Session()
For options, see the constructor options for :class:`.Session`.
"""
kwargs['bind'] = bind
kwargs['autoflush'] = autoflush
kwargs['autocommit'] = autocommit
kwargs['expire_on_commit'] = expire_on_commit
if class_ is None:
class_ = Session
class Sess(object):
def __init__(self, **local_kwargs):
for k in kwargs:
local_kwargs.setdefault(k, kwargs[k])
super(Sess, self).__init__(**local_kwargs)
@classmethod
def configure(self, **new_kwargs):
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite://'))
"""
kwargs.update(new_kwargs)
return type("Session", (Sess, class_), {})
class SessionTransaction(object):
"""A Session-level transaction.
This corresponds to one or more :class:`~sqlalchemy.engine.Transaction`
instances behind the scenes, with one ``Transaction`` per ``Engine`` in
use.
Direct usage of ``SessionTransaction`` is not necessary as of SQLAlchemy
0.4; use the ``begin()`` and ``commit()`` methods on ``Session`` itself.
The ``SessionTransaction`` object is **not** thread-safe.
.. index::
single: thread safety; SessionTransaction
"""
_rollback_exception = None
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
self._parent = parent
self.nested = nested
self._active = True
self._prepared = False
if not parent and nested:
raise sa_exc.InvalidRequestError(
"Can't start a SAVEPOINT transaction when no existing "
"transaction is in progress")
if self.session._enable_transaction_accounting:
self._take_snapshot()
@property
def is_active(self):
return self.session is not None and self._active
def _assert_is_active(self):
self._assert_is_open()
if not self._active:
if self._rollback_exception:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"due to a previous exception during flush."
" To begin a new transaction with this Session, "
"first issue Session.rollback()."
" Original exception was: %s"
% self._rollback_exception
)
else:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"by a nested rollback() call. To begin a new "
"transaction, issue Session.rollback() first."
)
def _assert_is_open(self, error_msg="The transaction is closed"):
if self.session is None:
raise sa_exc.ResourceClosedError(error_msg)
@property
def _is_transaction_boundary(self):
return self.nested or not self._parent
def connection(self, bindkey, **kwargs):
self._assert_is_active()
engine = self.session.get_bind(bindkey, **kwargs)
return self._connection_for_bind(engine)
def _begin(self, nested=False):
self._assert_is_active()
return SessionTransaction(
self.session, self, nested=nested)
def _iterate_parents(self, upto=None):
if self._parent is upto:
return (self,)
else:
if self._parent is None:
raise sa_exc.InvalidRequestError(
"Transaction %s is not on the active transaction list" % (
upto))
return (self,) + self._parent._iterate_parents(upto)
def _take_snapshot(self):
if not self._is_transaction_boundary:
self._new = self._parent._new
self._deleted = self._parent._deleted
return
if not self.session._flushing:
self.session.flush()
self._new = weakref.WeakKeyDictionary()
self._deleted = weakref.WeakKeyDictionary()
def _restore_snapshot(self):
assert self._is_transaction_boundary
for s in set(self._new).union(self.session._new):
self.session._expunge_state(s)
if s.key:
del s.key
for s in set(self._deleted).union(self.session._deleted):
if s.deleted:
#assert s in self._deleted
del s.deleted
self.session._update_impl(s)
assert not self.session._deleted
for s in self.session.identity_map.all_states():
s.expire(s.dict, self.session.identity_map._modified)
def _remove_snapshot(self):
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s.expire(s.dict, self.session.identity_map._modified)
def _connection_for_bind(self, bind):
self._assert_is_active()
if bind in self._connections:
return self._connections[bind][0]
if self._parent:
conn = self._parent._connection_for_bind(bind)
if not self.nested:
return conn
else:
if isinstance(bind, engine.Connection):
conn = bind
if conn.engine in self._connections:
raise sa_exc.InvalidRequestError(
"Session already has a Connection associated for the "
"given Connection's Engine")
else:
conn = bind.contextual_connect()
if self.session.twophase and self._parent is None:
transaction = conn.begin_twophase()
elif self.nested:
transaction = conn.begin_nested()
else:
transaction = conn.begin()
self._connections[conn] = self._connections[conn.engine] = \
(conn, transaction, conn is not bind)
self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
if self._parent is not None or not self.session.twophase:
raise sa_exc.InvalidRequestError(
"Only root two phase transactions of can be prepared")
self._prepare_impl()
def _prepare_impl(self):
self._assert_is_active()
if self._parent is None or self.nested:
self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.commit()
if not self.session._flushing:
self.session.flush()
if self._parent is None and self.session.twophase:
try:
for t in set(self._connections.values()):
t[1].prepare()
except:
self.rollback()
raise
self._deactivate()
self._prepared = True
def commit(self):
self._assert_is_open()
if not self._prepared:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
def rollback(self, _capture_exception=False):
self._assert_is_open()
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.close()
if self.is_active or self._prepared:
for transaction in self._iterate_parents():
if transaction._parent is None or transaction.nested:
transaction._rollback_impl()
transaction._deactivate()
break
else:
transaction._deactivate()
self.close()
if self._parent and _capture_exception:
self._parent._rollback_exception = sys.exc_info()[1]
return self._parent
def _rollback_impl(self):
for t in set(self._connections.values()):
t[1].rollback()
if self.session._enable_transaction_accounting:
self._restore_snapshot()
self.session.dispatch.after_rollback(self.session)
def _deactivate(self):
self._active = False
def close(self):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in \
set(self._connections.values()):
if autoclose:
connection.close()
else:
transaction.close()
if not self.session.autocommit:
self.session.begin()
self._deactivate()
self.session = None
self._connections = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._assert_is_open("Cannot end transaction context. The transaction "
"was closed from within the context")
if self.session.transaction is None:
return
if type is None:
try:
self.commit()
except:
self.rollback()
raise
else:
self.rollback()
class Session(object):
"""Manages persistence operations for ORM-mapped objects.
The Session's usage paradigm is described at :ref:`session_toplevel`.
"""
public_methods = (
'__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested',
'close', 'commit', 'connection', 'delete', 'execute', 'expire',
'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind',
'is_modified',
'merge', 'query', 'refresh', 'rollback',
'scalar')
def __init__(self, bind=None, autoflush=True, expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False, twophase=False,
weak_identity_map=True, binds=None, extension=None,
query_cls=query.Query):
"""Construct a new Session.
See also the :func:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autocommit: Defaults to ``False``. When ``True``, the ``Session``
does not keep a persistent transaction running, and will acquire
connections from the engine on an as-needed basis, returning them
immediately after their use. Flushes will begin and commit (or possibly
rollback) their own transaction if no transaction is present. When using
this mode, the `session.begin()` method may be used to begin a
transaction explicitly.
Leaving it on its default value of ``False`` means that the ``Session``
will acquire a connection and begin a transaction the first time it is
used, which it will maintain persistently until ``rollback()``,
``commit()``, or ``close()`` is called. When the transaction is released
by any of these methods, the ``Session`` is ready for the next usage,
which will again acquire and maintain a new connection/transaction.
:param autoflush: When ``True``, all query operations will issue a
``flush()`` call to this ``Session`` before proceeding. This is a
convenience feature so that ``flush()`` need not be called repeatedly
in order for database queries to retrieve results. It's typical that
``autoflush`` is used in conjunction with ``autocommit=False``. In this
scenario, explicit calls to ``flush()`` are rarely needed; you usually
only need to call ``commit()`` (which flushes) to finalize changes.
:param bind: An optional ``Engine`` or ``Connection`` to which this
``Session`` should be bound. When specified, all SQL operations
performed by this session will execute via this connectable.
:param binds: An optional dictionary which contains more granular "bind"
information than the ``bind`` parameter provides. This dictionary can
map individual ``Table`` instances as well as ``Mapper`` instances to
individual ``Engine`` or ``Connection`` objects. Operations which
proceed relative to a particular ``Mapper`` will consult this
dictionary for the direct ``Mapper`` instance as well as the mapper's
``mapped_table`` attribute in order to locate an connectable to use.
The full resolution is described in the ``get_bind()`` method of
``Session``. Usage looks like::
Session = sessionmaker(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
somemapper: create_engine('postgresql://engine2'),
some_table: create_engine('postgresql://engine3'),
})
Also see the :meth:`.Session.bind_mapper` and :meth:`.Session.bind_table` methods.
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the returned
class. This is the only argument that is local to the
``sessionmaker()`` function, and is not sent directly to the
constructor for ``Session``.
:param _enable_transaction_accounting: Defaults to ``True``. A
legacy-only flag which when ``False`` disables *all* 0.5-style object
accounting on transaction boundaries, including auto-expiry of
instances on rollback and commit, maintenance of the "new" and
"deleted" lists upon rollback, and autoflush of pending changes upon
begin(), all of which are interdependent.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each ``commit()``, so that all
attribute/object access subsequent to a completed transaction will load
from the most recent database state.
:param extension: An optional
:class:`~.SessionExtension` instance, or a list
of such instances, which will receive pre- and post- commit and flush
events, as well as a post-rollback event. **Deprecated.**
Please see :class:`.SessionEvents`.
:param query_cls: Class which should be used to create new Query objects,
as returned by the ``query()`` method. Defaults to
:class:`~sqlalchemy.orm.query.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a ``commit()``,
after ``flush()`` has been issued for all attached databases, the
``prepare()`` method on each database's ``TwoPhaseTransaction`` will
be called. This allows each database to roll back the entire
transaction, before each transaction is committed.
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
:class:`.Session` is closed. **Deprecated** - this option
is obsolete.
"""
if weak_identity_map:
self._identity_cls = identity.WeakInstanceDict
else:
util.warn_deprecated("weak_identity_map=False is deprecated. "
"This feature is not needed.")
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self.transaction = None
self.hash_key = id(self)
self.autoflush = autoflush
self.autocommit = autocommit
self.expire_on_commit = expire_on_commit
self._enable_transaction_accounting = _enable_transaction_accounting
self.twophase = twophase
self._query_cls = query_cls
if extension:
for ext in util.to_list(extension):
SessionExtension._adapt_listener(self, ext)
if binds is not None:
for mapperortable, bind in binds.iteritems():
if isinstance(mapperortable, (type, Mapper)):
self.bind_mapper(mapperortable, bind)
else:
self.bind_table(mapperortable, bind)
if not self.autocommit:
self.begin()
_sessions[self.hash_key] = self
dispatch = event.dispatcher(SessionEvents)
connection_callable = None
def begin(self, subtransactions=False, nested=False):
"""Begin a transaction on this Session.
If this Session is already within a transaction, either a plain
transaction or nested transaction, an error is raised, unless
``subtransactions=True`` or ``nested=True`` is specified.
The ``subtransactions=True`` flag indicates that this :meth:`~.Session.begin`
can create a subtransaction if a transaction is already in progress.
For documentation on subtransactions, please see :ref:`session_subtransactions`.
The ``nested`` flag begins a SAVEPOINT transaction and is equivalent
to calling :meth:`~.Session.begin_nested`. For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
"""
if self.transaction is not None:
if subtransactions or nested:
self.transaction = self.transaction._begin(
nested=nested)
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun. Use subtransactions=True "
"to allow subtransactions.")
else:
self.transaction = SessionTransaction(
self, nested=nested)
return self.transaction # needed for __enter__/__exit__ hook
def begin_nested(self):
"""Begin a `nested` transaction on this Session.
The target database(s) must support SQL SAVEPOINTs or a
SQLAlchemy-supported vendor implementation of the idea.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
"""
return self.begin(nested=True)
def rollback(self):
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
This method rolls back the current transaction or nested transaction
regardless of subtransactions being in effect. All subtransactions up
to the first real transaction are closed. Subtransactions occur when
begin() is called multiple times.
"""
if self.transaction is None:
pass
else:
self.transaction.rollback()
def commit(self):
"""Flush pending changes and commit the current transaction.
If no transaction is in progress, this method raises an
InvalidRequestError.
By default, the :class:`.Session` also expires all database
loaded state on all ORM-managed attributes after transaction commit.
This so that subsequent operations load the most recent
data from the database. This behavior can be disabled using
the ``expire_on_commit=False`` option to :func:`.sessionmaker` or
the :class:`.Session` constructor.
If a subtransaction is in effect (which occurs when begin() is called
multiple times), the subtransaction will be closed, and the next call
to ``commit()`` will operate on the enclosing transaction.
For a session configured with autocommit=False, a new transaction will
be begun immediately after the commit, but note that the newly begun
transaction does *not* use any connection resources until the first
SQL is actually emitted.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.commit()
def prepare(self):
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
InvalidRequestError.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an InvalidRequestError is raised.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.prepare()
def connection(self, mapper=None, clause=None,
bind=None,
close_with_result=False,
**kw):
"""Return a :class:`.Connection` object corresponding to this
:class:`.Session` object's transactional state.
If this :class:`.Session` is configured with ``autocommit=False``,
either the :class:`.Connection` corresponding to the current transaction
is returned, or if no transaction is in progress, a new one is begun
and the :class:`.Connection` returned.
Alternatively, if this :class:`.Session` is configured with ``autocommit=True``,
an ad-hoc :class:`.Connection` is returned using :meth:`.Engine.contextual_connect`
on the underlying :class:`.Engine`.
Ambiguity in multi-bind or unbound :class:`.Session` objects can be resolved through
any of the optional keyword arguments. This ultimately makes usage of the
:meth:`.get_bind` method for resolution.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes precedence
over ``mapper``, ``clause``.
:param mapper:
Optional :func:`.mapper` mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause``.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.) which will be used to locate a bind, if a bind
cannot otherwise be identified.
:param close_with_result: Passed to :meth:`Engine.connect`, indicating
the :class:`.Connection` should be considered "single use", automatically
closing when the first result set is closed. This flag only has
an effect if this :class:`.Session` is configured with ``autocommit=True``
and does not already have a transaction in progress.
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind,
close_with_result=close_with_result)
def _connection_for_bind(self, engine, **kwargs):
if self.transaction is not None:
return self.transaction._connection_for_bind(engine)
else:
return engine.contextual_connect(**kwargs)
def execute(self, clause, params=None, mapper=None, bind=None, **kw):
"""Execute a clause within the current transaction.
Returns a :class:`.ResultProxy` representing
results of the statement execution, in the same manner as that of an
:class:`.Engine` or
:class:`.Connection`.
:meth:`~.Session.execute` accepts any executable clause construct, such
as :func:`~.sql.expression.select`,
:func:`~.sql.expression.insert`,
:func:`~.sql.expression.update`,
:func:`~.sql.expression.delete`, and
:func:`~.sql.expression.text`, and additionally accepts
plain strings that represent SQL statements. If a plain string is
passed, it is first converted to a
:func:`~.sql.expression.text` construct, which here means
that bind parameters should be specified using the format ``:param``.
The statement is executed within the current transactional context of
this :class:`.Session`, using the same behavior as that of
the :meth:`.Session.connection` method to determine the active
:class:`.Connection`. The ``close_with_result`` flag is
set to ``True`` so that an ``autocommit=True`` :class:`.Session`
with no active transaction will produce a result that auto-closes
the underlying :class:`.Connection`.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`, etc.) or string SQL statement to be executed. The clause
will also be used to locate a bind, if this :class:`.Session`
is not bound to a single engine already, and the ``mapper``
and ``bind`` arguments are not passed.
:param params:
Optional dictionary of bind names mapped to values.
:param mapper:
Optional :func:`.mapper` or mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause`` when locating a bind.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes
precedence over ``mapper`` and ``clause`` when locating
a bind.
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
clause = expression._literal_as_text(clause)
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind, close_with_result=True).execute(
clause, params or {})
def scalar(self, clause, params=None, mapper=None, bind=None, **kw):
"""Like :meth:`~.Session.execute` but return a scalar result."""
return self.execute(clause, params=params, mapper=mapper, bind=bind, **kw).scalar()
def close(self):
"""Close this Session.
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
"""
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_parents():
transaction.close()
@classmethod
def close_all(cls):
"""Close *all* sessions in memory."""
for sess in _sessions.values():
sess.close()
def expunge_all(self):
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
for state in self.identity_map.all_states() + list(self._new):
state.detach()
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
# TODO: need much more test coverage for bind_mapper() and similar !
# TODO: + crystalize + document resolution order vis. bind_mapper/bind_table
def bind_mapper(self, mapper, bind):
"""Bind operations for a mapper to a Connectable.
mapper
A mapper instance or mapped class
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this mapper will use the given
`bind`.
"""
if isinstance(mapper, type):
mapper = _class_mapper(mapper)
self.__binds[mapper.base_mapper] = bind
for t in mapper._all_tables:
self.__binds[t] = bind
def bind_table(self, table, bind):
"""Bind operations on a Table to a Connectable.
table
A ``Table`` instance
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this ``Table`` will use the
given `bind`.
"""
self.__binds[table] = bind
def get_bind(self, mapper, clause=None):
"""Return an engine corresponding to the given arguments.
All arguments are optional.
mapper
Optional, a ``Mapper`` or mapped class
clause
Optional, A ClauseElement (i.e. select(), text(), etc.)
"""
if mapper is clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding.")
c_mapper = mapper is not None and _class_to_mapper(mapper) or None
# manually bound?
if self.__binds:
if c_mapper:
if c_mapper.base_mapper in self.__binds:
return self.__binds[c_mapper.base_mapper]
elif c_mapper.mapped_table in self.__binds:
return self.__binds[c_mapper.mapped_table]
if clause is not None:
for t in sql_util.find_tables(clause, include_crud=True):
if t in self.__binds:
return self.__binds[t]
if self.bind:
return self.bind
if isinstance(clause, sql.expression.ClauseElement) and clause.bind:
return clause.bind
if c_mapper and c_mapper.mapped_table.bind:
return c_mapper.mapped_table.bind
context = []
if mapper is not None:
context.append('mapper %s' % c_mapper)
if clause is not None:
context.append('SQL expression')
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session" % (
', '.join(context)))
def query(self, *entities, **kwargs):
"""Return a new ``Query`` object corresponding to this ``Session``."""
return self._query_cls(entities, self, **kwargs)
def _autoflush(self):
if self.autoflush and not self._flushing:
self.flush()
def _finalize_loaded(self, states):
for state, dict_ in states.items():
state.commit_all(dict_, self.identity_map)
def refresh(self, instance, attribute_names=None, lockmode=None):
"""Expire and refresh the attributes on the given instance.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
Lazy-loaded relational attributes will remain lazily loaded, so that
the instance-wide refresh operation will be followed immediately by
the lazy load of that attribute.
Eagerly-loaded relational attributes will eagerly load within the
single refresh operation.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction - usage of
:meth:`~Session.refresh` usually only makes sense if non-ORM SQL
statement were emitted in the ongoing transaction, or if autocommit
mode is turned on.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query`
as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
if self.query(_object_mapper(instance))._load_on_ident(
state.key, refresh_state=state,
lockmode=lockmode,
only_load_props=attribute_names) is None:
raise sa_exc.InvalidRequestError(
"Could not refresh instance '%s'" %
mapperutil.instance_str(instance))
def expire_all(self):
"""Expires all persistent instances within this Session.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` should not be needed when
autocommit is ``False``, assuming the transaction is isolated.
"""
for state in self.identity_map.all_states():
state.expire(state.dict, self.identity_map._modified)
def expire(self, instance, attribute_names=None):
"""Expire the attributes on an instance.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
def _expire_state(self, state, attribute_names):
self._validate_persistent(state)
if attribute_names:
state.expire_attributes(state.dict, attribute_names)
else:
# pre-fetch the full cascade since the expire is going to
# remove associations
cascaded = list(state.manager.mapper.cascade_iterator(
'refresh-expire', state))
self._conditional_expire(state)
for o, m, st_, dct_ in cascaded:
self._conditional_expire(st_)
def _conditional_expire(self, state):
"""Expire a state if persistent, else expunge if pending"""
if state.key:
state.expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
state.detach()
@util.deprecated("0.7", "The non-weak-referencing identity map "
"feature is no longer needed.")
def prune(self):
"""Remove unreferenced instances cached in the identity map.
Note that this method is only meaningful if "weak_identity_map" is set
to False. The default weak identity map is self-pruning.
Removes any object in this Session's identity map that is not
referenced in user code, modified, new or scheduled for deletion.
Returns the number of objects pruned.
"""
return self.identity_map.prune()
def expunge(self, instance):
"""Remove the `instance` from this ``Session``.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Instance %s is not present in this Session" %
mapperutil.state_str(state))
cascaded = list(state.manager.mapper.cascade_iterator(
'expunge', state))
self._expunge_state(state)
for o, m, st_, dct_ in cascaded:
self._expunge_state(st_)
def _expunge_state(self, state):
if state in self._new:
self._new.pop(state)
state.detach()
elif self.identity_map.contains_state(state):
self.identity_map.discard(state)
self._deleted.pop(state, None)
state.detach()
elif self.transaction:
self.transaction._deleted.pop(state, None)
def _register_newly_persistent(self, state):
mapper = _state_mapper(state)
# prevent against last minute dereferences of the object
obj = state.obj()
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
if _none_set.issubset(instance_key[1]) and \
not mapper.allow_partial_pks or \
_none_set.issuperset(instance_key[1]):
raise exc.FlushError(
"Instance %s has a NULL identity key. If this is an "
"auto-generated value, check that the database table "
"allows generation of new primary key values, and that "
"the mapped Column object is configured to expect these "
"generated values. Ensure also that this flush() is "
"not occurring at an inappropriate time, such as within "
"a load() event." % mapperutil.state_str(state)
)
if state.key is None:
state.key = instance_key
elif state.key != instance_key:
# primary key switch. use discard() in case another
# state has already replaced this one in the identity
# map (see test/orm/test_naturalpks.py ReversePKsTest)
self.identity_map.discard(state)
state.key = instance_key
self.identity_map.replace(state)
state.commit_all(state.dict, self.identity_map)
# remove from new last, might be the last strong ref
if state in self._new:
if self._enable_transaction_accounting and self.transaction:
self.transaction._new[state] = True
self._new.pop(state)
def _remove_newly_deleted(self, state):
if self._enable_transaction_accounting and self.transaction:
self.transaction._deleted[state] = True
self.identity_map.discard(state)
self._deleted.pop(state, None)
state.deleted = True
def add(self, instance):
"""Place an object in the ``Session``.
Its state will be persisted to the database on the next flush
operation.
Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
is ``expunge()``.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._save_or_update_state(state)
def add_all(self, instances):
"""Add the given collection of instances to this ``Session``."""
for instance in instances:
self.add(instance)
def _save_or_update_state(self, state):
self._save_or_update_impl(state)
mapper = _state_mapper(state)
for o, m, st_, dct_ in mapper.cascade_iterator(
'save-update',
state,
halt_on=self._contains_state):
self._save_or_update_impl(st_)
def delete(self, instance):
"""Mark an instance as deleted.
The database delete operation occurs upon ``flush()``.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
mapperutil.state_str(state))
if state in self._deleted:
return
# ensure object is attached to allow the
# cascade operation to load deferred attributes
# and collections
self._attach(state)
# grab the cascades before adding the item to the deleted list
# so that autoflush does not delete the item
# the strong reference to the instance itself is significant here
cascade_states = list(state.manager.mapper.cascade_iterator(
'delete', state))
self._deleted[state] = state.obj()
self.identity_map.add(state)
for o, m, st_, dct_ in cascade_states:
self._delete_impl(st_)
def merge(self, instance, load=True, **kw):
"""Copy the state an instance onto the persistent instance with the
same identifier.
If there is no persistent instance currently associated with the
session, it will be loaded. Return the persistent instance. If the
given instance is unsaved, save a copy of and return it as a newly
persistent instance. The given instance does not become associated
with the session.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
"""
if 'dont_load' in kw:
load = not kw['dont_load']
util.warn_deprecated('dont_load=True has been renamed to '
'load=False.')
_recursive = {}
if load:
# flush current contents if we expect to load data
self._autoflush()
_object_mapper(instance) # verify mapped
autoflush = self.autoflush
try:
self.autoflush = False
return self._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load, _recursive=_recursive)
finally:
self.autoflush = autoflush
def _merge(self, state, state_dict, load=True, _recursive=None):
mapper = _state_mapper(state)
if state in _recursive:
return _recursive[state]
new_instance = False
key = state.key
if key is None:
if not load:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects transient (i.e. unpersisted) objects. flush() "
"all changes on mapped instances before merging with "
"load=False.")
key = mapper._identity_key_from_state(state)
if key in self.identity_map:
merged = self.identity_map[key]
elif not load:
if state.modified:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False.")
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_state.key = key
self._update_impl(merged_state)
new_instance = True
elif not _none_set.issubset(key[1]) or \
(mapper.allow_partial_pks and
not _none_set.issuperset(key[1])):
merged = self.query(mapper.class_).get(key[1])
else:
merged = None
if merged is None:
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
new_instance = True
self._save_or_update_state(merged_state)
else:
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
# check that we didn't just pull the exact same
# state out.
if state is not merged_state:
# version check if applicable
if mapper.version_id_col is not None:
existing_version = mapper._get_state_attr_by_column(
state,
state_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
merged_version = mapper._get_state_attr_by_column(
merged_state,
merged_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
if existing_version is not attributes.PASSIVE_NO_RESULT and \
merged_version is not attributes.PASSIVE_NO_RESULT and \
existing_version != merged_version:
raise exc.StaleDataError(
"Version id '%s' on merged state %s "
"does not match existing version '%s'. "
"Leave the version attribute unset when "
"merging to update the most recent version."
% (
existing_version,
mapperutil.state_str(merged_state),
merged_version
))
merged_state.load_path = state.load_path
merged_state.load_options = state.load_options
for prop in mapper.iterate_properties:
prop.merge(self, state, state_dict,
merged_state, merged_dict,
load, _recursive)
if not load:
# remove any history
merged_state.commit_all(merged_dict, self.identity_map)
if new_instance:
merged_state.manager.dispatch.load(merged_state, None)
return merged
@classmethod
def identity_key(cls, *args, **kwargs):
return mapperutil.identity_key(*args, **kwargs)
@classmethod
def object_session(cls, instance):
"""Return the ``Session`` to which an object belongs."""
return object_session(instance)
def _validate_persistent(self, state):
if not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persistent within this Session" %
mapperutil.state_str(state))
def _save_impl(self, state):
if state.key is not None:
raise sa_exc.InvalidRequestError(
"Object '%s' already has an identity - it can't be registered "
"as pending" % mapperutil.state_str(state))
self._attach(state)
if state not in self._new:
self._new[state] = state.obj()
state.insert_order = len(self._new)
def _update_impl(self, state):
if (self.identity_map.contains_state(state) and
state not in self._deleted):
return
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
mapperutil.state_str(state))
if state.deleted:
raise sa_exc.InvalidRequestError(
"Instance '%s' has been deleted. Use the make_transient() "
"function to send this object back to the transient state." %
mapperutil.state_str(state)
)
self._attach(state)
self._deleted.pop(state, None)
self.identity_map.add(state)
def _save_or_update_impl(self, state):
if state.key is None:
self._save_impl(state)
else:
self._update_impl(state)
def _delete_impl(self, state):
if state in self._deleted:
return
if state.key is None:
return
self._attach(state)
self._deleted[state] = state.obj()
self.identity_map.add(state)
def _attach(self, state):
if state.key and \
state.key in self.identity_map and \
not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError("Can't attach instance "
"%s; another instance with key %s is already "
"present in this session."
% (mapperutil.state_str(state), state.key))
if state.session_id and state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')" % (mapperutil.state_str(state),
state.session_id, self.hash_key))
if state.session_id != self.hash_key:
state.session_id = self.hash_key
if self.dispatch.after_attach:
self.dispatch.after_attach(self, state.obj())
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
The instance may be pending or persistent within the Session for a
result of True.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
return self._contains_state(state)
def __iter__(self):
"""Iterate over all pending or persistent instances within this Session."""
return iter(list(self._new.values()) + self.identity_map.values())
def _contains_state(self, state):
return state in self._new or self.identity_map.contains_state(state)
def flush(self, objects=None):
"""Flush all the object changes to the database.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver..
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
For ``autocommit`` Sessions with no active manual transaction, flush()
will create a transaction on the fly that surrounds the entire set of
operations int the flush.
objects
Optional; a list or tuple collection. Restricts the flush operation
to only these objects, rather than all pending changes.
Deprecated - this flag prevents the session from properly maintaining
accounting among inter-object relations and can cause invalid results.
"""
if objects:
util.warn_deprecated(
"The 'objects' argument to session.flush() is deprecated; "
"Please do not add objects to the session which should not "
"yet be persisted.")
if self._flushing:
raise sa_exc.InvalidRequestError("Session is already flushing")
try:
self._flushing = True
self._flush(objects)
finally:
self._flushing = False
def _flush(self, objects=None):
if (not self.identity_map.check_modified() and
not self._deleted and not self._new):
return
dirty = self._dirty_states
if not dirty and not self._deleted and not self._new:
self.identity_map._modified.clear()
return
flush_context = UOWTransaction(self)
if self.dispatch.before_flush:
self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
deleted = set(self._deleted)
new = set(self._new)
dirty = set(dirty).difference(deleted)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = set()
for o in objects:
try:
state = attributes.instance_state(o)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(o)
objset.add(state)
else:
objset = None
# store objects whose fate has been decided
processed = set()
# put all saves/updates into the flush context. detect top-level
# orphans and throw them into deleted.
if objset:
proc = new.union(dirty).intersection(objset).difference(deleted)
else:
proc = new.union(dirty).difference(deleted)
for state in proc:
is_orphan = _state_mapper(state)._is_orphan(state) and state.has_identity
flush_context.register_object(state, isdelete=is_orphan)
processed.add(state)
# put all remaining deletes into the flush context.
if objset:
proc = deleted.intersection(objset).difference(processed)
else:
proc = deleted.difference(processed)
for state in proc:
flush_context.register_object(state, isdelete=True)
if not flush_context.has_work:
return
flush_context.transaction = transaction = self.begin(
subtransactions=True)
try:
flush_context.execute()
self.dispatch.after_flush(self, flush_context)
flush_context.finalize_flush_changes()
# useful assertions:
#if not objects:
# assert not self.identity_map._modified
#else:
# assert self.identity_map._modified == \
# self.identity_map._modified.difference(objects)
self.dispatch.after_flush_postexec(self, flush_context)
transaction.commit()
except:
transaction.rollback(_capture_exception=True)
raise
def is_modified(self, instance, include_collections=True,
passive=attributes.PASSIVE_OFF):
"""Return ``True`` if instance has modified attributes.
This method retrieves a history instance for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously committed value.
``include_collections`` indicates if multivalued collections should be
included in the operation. Setting this to False is a way to detect
only local-column based properties (i.e. scalar columns or many-to-one
foreign keys) that would result in an UPDATE for this instance upon
flush.
The ``passive`` flag indicates if unloaded attributes and collections
should not be loaded in the course of performing this test.
Allowed values include :attr:`.PASSIVE_OFF`, :attr:`.PASSIVE_NO_INITIALIZE`.
A few caveats to this method apply:
* Instances present in the 'dirty' collection may result in a value
of ``False`` when tested with this method. This because while
the object may have received attribute set events, there may be
no net changes on its state.
* Scalar attributes may not have recorded the "previously" set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally only if the attribute
container has the "active_history" flag set to ``True``. This flag
is set typically for primary key attributes and scalar references
that are not a simple many-to-one.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
dict_ = state.dict
if passive is True:
passive = attributes.PASSIVE_NO_INITIALIZE
elif passive is False:
passive = attributes.PASSIVE_OFF
for attr in state.manager.attributes:
if \
(
not include_collections and
hasattr(attr.impl, 'get_collection')
) or not hasattr(attr.impl, 'get_history'):
continue
(added, unchanged, deleted) = \
attr.impl.get_history(state, dict_, passive=passive)
if added or deleted:
return True
return False
@property
def is_active(self):
"""True if this Session has an active transaction."""
return self.transaction and self.transaction.is_active
@property
def _dirty_states(self):
"""The set of all persistent states considered dirty.
This method returns all states that were modified including
those that were possibly deleted.
"""
return self.identity_map._dirty_states()
@property
def dirty(self):
"""The set of all persistent instances considered dirty.
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the is_modified() method.
"""
return util.IdentitySet(
[state.obj()
for state in self._dirty_states
if state not in self._deleted])
@property
def deleted(self):
"The set of all instances marked as 'deleted' within this ``Session``"
return util.IdentitySet(self._deleted.values())
@property
def new(self):
"The set of all instances marked as 'new' within this ``Session``."
return util.IdentitySet(self._new.values())
_sessions = weakref.WeakValueDictionary()
def make_transient(instance):
"""Make the given instance 'transient'.
This will remove its association with any
session and additionally will remove its "identity key",
such that it's as though the object were newly constructed,
except retaining its values. It also resets the
"deleted" flag on the state if this object
had been explicitly deleted by its session.
Attributes which were "expired" or deferred at the
instance level are reverted to undefined, and
will not trigger any loads.
"""
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
s._expunge_state(state)
# remove expired state and
# deferred callables
state.callables.clear()
if state.key:
del state.key
if state.deleted:
del state.deleted
def object_session(instance):
"""Return the ``Session`` to which instance belongs.
If the instance is not a mapped instance, an error is raised.
"""
try:
return _state_session(attributes.instance_state(instance))
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
def _state_session(state):
if state.session_id:
try:
return _sessions[state.session_id]
except KeyError:
pass
return None
|
{
"content_hash": "d4619814e77bda39aba500697a3e06da",
"timestamp": "",
"source": "github",
"line_count": 1726,
"max_line_length": 100,
"avg_line_length": 38.137891077636155,
"alnum_prop": 0.6010391030899644,
"repo_name": "eunchong/build",
"id": "fe38f01be8314aa4bc3a394228d4f43777771799",
"size": "66059",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/sqlalchemy_0_7_1/sqlalchemy/orm/session.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3128"
},
{
"name": "CSS",
"bytes": "211818"
},
{
"name": "HTML",
"bytes": "429981"
},
{
"name": "JavaScript",
"bytes": "75624"
},
{
"name": "Makefile",
"bytes": "21204"
},
{
"name": "Python",
"bytes": "6143109"
},
{
"name": "Shell",
"bytes": "23512"
}
],
"symlink_target": ""
}
|
__doc__="""
Distributes the selected nodes horizontally.
"""
import GlyphsApp
Font = Glyphs.font
selectedLayer = Font.selectedLayers[0]
try:
try:
# until v2.1:
selection = selectedLayer.selection()
except:
# since v2.2:
selection = selectedLayer.selection
selectionXList = [ n.x for n in selection ]
leftMostX, rightMostX = min( selectionXList ), max( selectionXList )
diffX = abs(leftMostX-rightMostX)
Font.disableUpdateInterface()
increment = diffX / float( len(selection) - 1 )
sortedSelection = sorted( selection, key=lambda n: n.x)
for thisNodeIndex in range( len(selection) - 1 ):
sortedSelection[thisNodeIndex].x = leftMostX + ( thisNodeIndex * increment )
Font.enableUpdateInterface()
except Exception, e:
if selection == ():
print "Cannot distribute nodes: nothing selected in frontmost layer."
else:
print "Error. Cannot distribute nodes:", selection
print e
|
{
"content_hash": "ee0d1a6b7e656f85a7bb6d9aa19f7d2b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 25.305555555555557,
"alnum_prop": 0.7233809001097695,
"repo_name": "schriftgestalt/Mekka-Scripts",
"id": "56247ed79acd3001e4011bc8bef03ec36b27e57e",
"size": "977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Paths/Distribute Nodes x.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "526540"
}
],
"symlink_target": ""
}
|
import os
import sys
import shodan
import tools
import module_locater
import logging
import datetime
import getopt
import shodan_modules
import optparse
import whois_modules
import time
import dns.resolver
import dns_modules
SHODAN_API_KEY = "tENIC6XGeNrYJHt0xzGYN5NT7RaZAvq6"
scriptdir = module_locater.module_path()
myDate = datetime.datetime.now().strftime("%y-%m-%d")
myTime = datetime.datetime.now().strftime("%H:%M")
myDateTime = datetime.datetime.now().strftime("%y-%m-%d %H:%M")
###Logging Setup
##logger = logging.getLogger('Spoor')
##logdate = datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
##logfile = scriptdir + '\\Logs\\Spoor-' + logdate + '.log'
##hdlr = logging.FileHandler(logfile)
##formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
##hdlr.setFormatter(formatter)
##logger.addHandler(hdlr)
##logger.setLevel(logging.INFO)
header = """
***************************************
* *
* Spoor Network Enumeration Tool *
* *
***************************************
\n"""
#Main Menu function
def main_menu():
os.system('cls')
print(header)
print()
print ("Main Menu:")
print (" 1. SHODAN")
print (" 2. WHOIS")
print (" 3. NSLOOKUP/DNS Queries")
print (" 4. Exit Spoor")
function = int(input("Please choose a function: "))
if function == 1:
SHODAN_MENU()
return True
elif function == 2:
WHOIS_MENU()
return True
elif function == 3:
DNS_MENU()
return True
elif function == 4:
print ("Thanks for using Spoor. Happy hunting.")
sys.exit()
else:
print ("Unknown option.")
return False
#TODO: Find a way to use these False and True returns
def WHOIS_MENU():
os.system('cls')
print (header)
print()
print("WHOIS Menu:")
print(" 1. Search by site")
print(" 2. Return to Main Menu")
function = int(input("Please enter a WHOIS option: "))
if function == 1:
host = raw_input('Please enter the site address to search: ')
host_check = tools.query_yes_no("You entered \'%s\'. Is this correct? " % host)
if host_check is True:
whois_modules.WHOIS(host)
return False
else:
host = raw_input("Sorry about that. Please enter the correct site address to search on: ")
whois_modules.WHOIS(host)
return False
if function == 2:
main_menu
return False
def DNS_MENU():
os.system('cls')
print(header)
print()
print("DNS Menu:")
print(" 1. Search by host and record type")
print(" 2. Return to Main Menu")
function = int(input("Please enter a DNS option:"))
if function == 1:
host = raw_input("Please enter a host name to search for: ")
#TODO: Do I need a new function for each record type?
print("Please choose a DNS record type from the list below: ")
print(" 1. NS - Basic Name Server Lookup")
print(" 2. MX - Mail record lookup")
print(" 3. A - Basic reverse lookup, providing an IPv4 address")
print(" 4. AAAA - Basic reverse lookup, providing an IPv6 address")
print(" 5. TXT - Search for a text-only record.")
rec_choice = int(input("Please enter the record type to search for: "))
if rec_choice == 1:
recordtype = 'NS'
if rec_choice == 2:
recordtype = 'MX'
if rec_choice == 3:
recordtype = 'A'
if rec_choice == 4:
recordtype = 'AAAA'
if rec_choice == 5:
recordtype = 'TXT'
print("Searching for \'%s\' with a record type of \'%s\'..." % (host, recordtype))
dns_modules.DNS(host, recordtype)
raw_input("Press <ENTER> to continue...")
DNS_MENU()
return False
else:
main_menu()
return False
def SHODAN_MENU():
os.system('cls')
print(header)
print()
print("SHODAN Menu:")
print(" 1. Search by host")
print(" 2. Search by keyword")
print(" 3. Return to Main Menu")
function = int(input("Please enter a SHODAN search mode: "))
if function == 1:
keyword = raw_input('Please enter the host IP to search: ')
kw_correct = tools.query_yes_no("You entered %s. Is this corect? " % keyword)
if kw_correct is True:
shodan_modules.SHODAN_HOSTNAME(keyword)
return False
else:
keyword = raw_input('Sorry about that. Please enter the correct host IP to search: ')
shodan_modules.SHODAN_HOSTNAME(keyword)
return False
elif function == 2:
keyword = raw_input("Please enter a search term: ")
kw_correct = tools.query_yes_no("You entered %s. Is this corect? " % keyword)
if kw_correct is True:
shodan_modules.SHODAN_KEYWORD(keyword)
return False
else:
keyword = raw_input('Sorry about that. Please enter the correct host IP to search: ')
shodan_modules.SHODAN_KEYWORD(keyword)
return False
elif function == 3:
main_menu()
return False
else:
print("Unknown option")
return False
def main():
main_menu()
## if shodan_modules.key1:
## print shodan_modules.key1
## else:
## pass
## if shodan_modules.IP1:
## print shodan_modules.IP1
## else:
## pass
if __name__ == '__main__':
main()
|
{
"content_hash": "83b270bc6ac5fc51cb6bab12e67c7126",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 102,
"avg_line_length": 31.255555555555556,
"alnum_prop": 0.5657660860291504,
"repo_name": "dmartinez7500/Spoor",
"id": "7599c77d0ec5e6ce9f074c881ee3e9676e0aa4f1",
"size": "5882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Spoor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16545"
}
],
"symlink_target": ""
}
|
class StreamObject:
def __init__(self, data=None): self.data = data; self.accs = False
def is_accs(self): return self.accs
def get(self, accs=True): self.accs = accs; return self.data
def show(self, accs=True): print(self.get(accs))
class Stream:
def __init__(self): self.stream = []
def get_last(self): return self.stream[-1]
def get_unaccs(self):
for s in self.stream:
if not s.is_accs(): yield s
def show_last(self): self.get_last().show()
def show_unaccs(self):
for s in list(self.get_unaccs()): s.show()
def push(self, data): self.stream.append(StreamObject(data))
def get_input(self, prompt=None): self.push(raw_input(prompt))
|
{
"content_hash": "03fbbfc2c68dca9e4ef595c3b1658459",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 41.705882352941174,
"alnum_prop": 0.6332863187588152,
"repo_name": "chuckwoodjohn/py_console_a1.0",
"id": "91fe7832c478aa3b906b6a00ee7b2e4d230d987d",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "console_a10/stream.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2922"
}
],
"symlink_target": ""
}
|
import base64
import binascii
import requests
from jcsclient import exception
from jcsclient import utils
from jcsclient import requestify
def describe_instances(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-ids', nargs='+', required=False)
# Right now filters functionality is broken, it works only
# for cases like --filters "Name=abc,Values=def"
parser.add_argument('--filters', nargs='+', required=False)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def start_instances(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-ids', nargs='+', required=True)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def stop_instances(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-ids', nargs='+', required=True)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def reboot_instances(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-ids', nargs='+', required=True)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def terminate_instances(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-ids', nargs='+', required=True)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def describe_instance_types(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-type-ids', nargs='+', required=False)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def run_instances(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-type-id', required=True)
parser.add_argument('--image-id', required=True)
parser.add_argument('--subnet-id', required=False)
parser.add_argument('--security-group-ids', nargs='+', required=False)
parser.add_argument('--key-name', required=False)
parser.add_argument('--instance-count', type=int, required=False)
parser.add_argument('--private-ip-address', required=False)
parser.add_argument('--block-device-mappings', nargs='+', required=False)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def decrypt_instance_password(password, private_key_file, passphrase):
key = utils.import_ssh_key(private_key_file, passphrase)
encrypted_data = base64.b64decode(base64.b64decode(password))
ciphertext = int(binascii.hexlify(encrypted_data), 16)
plaintext = key.decrypt(ciphertext)
decrypted_data = utils.long_to_bytes(plaintext)
unpadded_data = utils.pkcs1_unpad(decrypted_data)
return unpadded_data
def get_password_data(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-id', required=True)
processed, remaining = parser.parse_known_args(args)
utils.populate_params_from_cli_args(params, processed)
response = requestify.make_request(url, verb, headers, params)
parser = utils.get_argument_parser()
parser.add_argument('--private-key-file', required=False, default=None)
parser.add_argument('--key-passphrase', required=False, default=None)
processed = parser.parse_args(remaining)
processed = vars(processed)
private_key_file = processed.get('private_key_file')
passphrase = processed.get('key_passphrase')
response_json = utils.web_response_to_json(response)
try:
response_body = response_json['GetPasswordDataResponse']
encrypted_password = response_body['passwordData']
if not private_key_file or not encrypted_password:
return response
decrypted_password = decrypt_instance_password(encrypted_password,
private_key_file,
passphrase)
response_json['GetPasswordDataResponse']['passwordData'] = \
decrypted_password
return response_json
except KeyError as ke:
raise exception.UnknownOutputFormat()
|
{
"content_hash": "758665d08eef151343ba49d7e39cc341",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 77,
"avg_line_length": 43.54814814814815,
"alnum_prop": 0.6735839428474231,
"repo_name": "jiocloudservices/jcsclient",
"id": "ef725f7907737df94801e3043248f68889ec0d31",
"size": "7005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/jcsclient/compute_api/instance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "181299"
}
],
"symlink_target": ""
}
|
"""Implement training process including curriculum and hard negative mining."""
import argparse
import collections
import copy
from ..tqdm_utils import tqdm_for
from difflogic.dataset.utils import RandomlyIterDataset
from difflogic.thutils import monitor_gradrms
import jacinle.random as random
from jacinle.logging import get_logger
from jacinle.utils.meter import GroupMeters
from jacinle.utils.tqdm import tqdm_pbar
from jactorch.train.env import TrainerEnv
from jactorch.utils.meta import as_cuda
__all__ = ['TrainerBase', 'CurriculumTrainerBase', 'MiningTrainerBase']
logger = get_logger(__file__)
class TrainerBase(TrainerEnv):
"""The base Trainer class supports basic training and testing interfaces.
Implement the basic training and testing procedure. The training have multiple
epochs, with multiple iterations in each epoch. A list defined by
[begin:step:end] represents the argument (number of objects) for testing.
Args:
model: The model for both training and evaluation, the mode is turned by
calling model.eval() or model.train().
optimizer: The optimizer for the model when being optimized.
epochs: The number of epochs for training.
epoch_size: The number of iterations per epoch during training.
test_epoch_size: The number of iterations per epoch during testing.
test_number_begin: The begin number of the list.
test_number_step: The step size of the list.
test_number_end: The end number of the list.
"""
def __init__(self, model, optimizer, epochs, epoch_size, test_epoch_size,
test_number_begin, test_number_step, test_number_end):
super().__init__(model, optimizer)
self.epochs = epochs
self.epoch_size = epoch_size
self.test_epoch_size = test_epoch_size
self.test_number_begin = test_number_begin
self.test_number_step = test_number_step
self.test_number_end = test_number_end
__hyperparams__ = ('epochs', 'epoch_size', 'test_epoch_size',
'test_number_begin', 'test_number_step', 'test_number_end')
__hyperparam_defaults__ = {'test_number_step': 0}
@classmethod
def _get_hyperparams(cls):
return TrainerBase.__hyperparams__
@classmethod
def make_trainer_parser(cls, parser, defaults, prefix=None):
for k, v in TrainerBase.__hyperparam_defaults__.items():
defaults.setdefault(k, v)
prefix = '--' if prefix is None else '--' + str(prefix) + '-'
if not isinstance(parser, argparse._ArgumentGroup):
parser = parser.add_argument_group('Trainer')
parser.add_argument(
prefix + 'epochs',
type=int,
default=defaults['epochs'],
metavar='N',
help='number of total epochs to run')
parser.add_argument(
prefix + 'epoch-size',
type=int,
default=defaults['epoch_size'],
metavar='N',
help='number of iterations per epoch')
parser.add_argument(
prefix + 'test-epoch-size',
type=int,
default=defaults['test_epoch_size'],
metavar='N',
help='number of iterations per test epoch')
parser.add_argument(
prefix + 'test-number-begin',
type=int,
default=defaults['test_number_begin'],
metavar='N',
help='begin number of nodes for test')
parser.add_argument(
prefix + 'test-number-step',
type=int,
default=defaults['test_number_step'],
metavar='N',
help='step number of nodes for test')
parser.add_argument(
prefix + 'test-number-end',
type=int,
default=defaults['test_number_end'],
metavar='N',
help='end number of nodes for test')
@classmethod
def from_args(cls, model, optimizer, args, prefix=None, **kwargs):
prefix = '' if prefix is None else str(prefix) + '_'
init_params = {k: getattr(args, prefix + k) for k in cls._get_hyperparams()}
init_params.update(kwargs)
return cls(model, optimizer, **init_params)
def _dump_meters(self, meters, mode):
"""Provide ways to dump the statistics (stored in meters)
for plotting or analysing.
"""
pass
def _prepare_dataset(self, epoch_size, mode):
"""Prepare dataset for getting training/testing data.
Args:
epoch_size: The number of iterations in each epoch.
mode: 'train' or 'test' for training or testing.
Returns:
None, this is a hook function before getting training/testing data.
"""
raise NotImplementedError()
def _get_train_data(self, index, meters):
"""Get training data can be directly fed into the train_step function.
Args:
index: The current iteration index in current epoch.
meters: a stats collector to collect information.
Returns:
feed_dict, A dict can be directly fed into the train_step function.
"""
raise NotImplementedError()
def _get_result(self, index, meters, mode):
"""Include two steps, get testing data from dataset & evaluate the model.
Args:
index: The current iteration index in current epoch.
meters: a stats collector to collect information.
mode: 'train' or 'test' or others.
Returns:
message: The message to be shown on tqdm progress bar.
extra_info: An extra variable to give extra information.
"""
raise NotImplementedError()
def _train_step(self, feed_dict, meters):
ret = self.step(feed_dict)
loss, monitors, output_dict, extras = ret
meters.update(monitor_gradrms(self.model))
meters.update(monitors)
meters.update(loss=loss)
return 'Train: loss={loss:.4f}'.format(loss=loss), ret
def _train_epoch(self, epoch_size):
model = self.model
meters = GroupMeters()
self._prepare_dataset(epoch_size, mode='train')
def train_func(index):
model.eval()
feed_dict = self._get_train_data(index, meters)
model.train()
message, _ = self._train_step(feed_dict, meters)
return message
# For $epoch_size times, do train_func with tqdm progress bar.
tqdm_for(epoch_size, train_func)
logger.info(
meters.format_simple(
'> Train Epoch {:5d}: '.format(self.current_epoch),
compressed=False))
self._dump_meters(meters, 'train')
return meters
def _test_epoch(self, epoch_size):
meters = GroupMeters()
self._prepare_dataset(epoch_size, mode='test')
def test_func(index):
message, _ = self._get_result(index, meters, mode='test')
return message
tqdm_for(epoch_size, test_func)
logger.info(meters.format_simple('> Evaluation: ', compressed=False))
self._dump_meters(meters, 'test')
return meters
def _early_stop(self, meters):
"""A hook function to enable early_stop checking."""
return False
def train(self):
self.early_stopped = False
for i in range(1, 1 + self.epochs):
self.current_epoch = i
meters = self._train_epoch(self.epoch_size)
if self._early_stop(meters):
self.early_stopped = True
break
return meters
def test(self):
self.model.eval()
results = []
self.test_number = self.test_number_begin
while self.test_number <= self.test_number_end:
meters = self._test_epoch(self.test_epoch_size)
results.append(meters)
if self.test_number_step <= 0:
break
self.test_number += self.test_number_step
return results
class CurriculumTrainerBase(TrainerBase):
"""A base trainer class supports curriculum learning w.r.t an integer argument.
The lessons in the curriculum are defined by an integer argument: The number
of object. The lessons are defined by a list of the form [start:step:graduate].
"""
def __init__(self, model, optimizer, epochs, epoch_size, test_epoch_size,
test_number_begin, test_number_step, test_number_end,
curriculum_start, curriculum_step, curriculum_graduate,
enable_candidate, curriculum_thresh, curriculum_thresh_relax,
curriculum_force_upgrade_epochs, sample_array_capacity,
enhance_epochs):
super().__init__(model, optimizer, epochs, epoch_size, test_epoch_size,
test_number_begin, test_number_step, test_number_end)
self.curriculum_start = curriculum_start
self.curriculum_step = curriculum_step
self.curriculum_graduate = curriculum_graduate
self.enable_candidate = enable_candidate
self.curriculum_thresh = curriculum_thresh
self.curriculum_thresh_relax = curriculum_thresh_relax
self.curriculum_force_upgrade_epochs = curriculum_force_upgrade_epochs
self.sample_array_capacity = sample_array_capacity
self.enhance_epochs = enhance_epochs
__hyperparams__ = ('curriculum_start', 'curriculum_step',
'curriculum_graduate', 'enable_candidate',
'curriculum_thresh', 'curriculum_thresh_relax',
'curriculum_force_upgrade_epochs', 'sample_array_capacity',
'enhance_epochs')
__hyperparam_defaults__ = {
'curriculum_step': 1,
'enable_candidate': True,
'curriculum_thresh': 1.0,
'curriculum_thresh_relax': 0.0,
'curriculum_force_upgrade_epochs': None,
'sample_array_capacity': 1,
'enhance_epochs': 0
}
@classmethod
def _get_hyperparams(cls):
return super()._get_hyperparams() + CurriculumTrainerBase.__hyperparams__
@classmethod
def make_trainer_parser(cls, parser, defaults, prefix=None):
super().make_trainer_parser(parser, defaults, prefix)
for k, v in CurriculumTrainerBase.__hyperparam_defaults__.items():
defaults.setdefault(k, v)
prefix = '--' if prefix is None else '--' + str(prefix) + '-'
if not isinstance(parser, argparse._ArgumentGroup):
parser = parser.add_argument_group('CurriculumTrainer')
parser.add_argument(
prefix + 'curriculum-start',
type=int,
default=defaults['curriculum_start'],
metavar='N',
help='starting number of nodes for curriculum')
parser.add_argument(
prefix + 'curriculum-step',
type=int,
default=defaults['curriculum_step'],
metavar='N',
help='number of nodes difference between lessons in curriculum')
parser.add_argument(
prefix + 'curriculum-graduate',
type=int,
default=defaults['curriculum_graduate'],
metavar='N',
help='graduate number of nodes for curriculum')
parser.add_argument(
prefix + 'enable-candidate',
type='bool',
default=defaults['enable_candidate'],
metavar='B',
help='enable candidate stage in curriculum')
parser.add_argument(
prefix + 'curriculum-thresh',
type=float,
default=defaults['curriculum_thresh'],
metavar='F',
help='threshold for curriculum lessons')
parser.add_argument(
prefix + 'curriculum-thresh-relax',
type=float,
default=defaults['curriculum_thresh_relax'],
metavar='F',
help='threshold = 1 - (graduate_number - current_number) * relax')
parser.add_argument(
prefix + 'curriculum-force-upgrade-epochs',
type=int,
default=defaults['curriculum_force_upgrade_epochs'],
metavar='N',
help='maximum number of epochs to force upgrade lesson')
parser.add_argument(
prefix + 'sample-array-capacity',
type=int,
default=defaults['sample_array_capacity'],
metavar='N',
help='the capacity of the sample array for numbers')
parser.add_argument(
prefix + 'enhance-epochs',
type=int,
default=defaults['enhance_epochs'],
metavar='N',
help='The number of training epochs even after graduation.')
def _get_accuracy(self, meters):
"""return the statistics to be compared with the threshold."""
raise NotImplementedError()
def _get_threshold(self):
return self.curriculum_thresh - self.curriculum_thresh_relax * \
(self.curriculum_graduate - self.current_number)
def _pass_lesson(self, meters):
"""Check whether the current performance is enough to pass current lesson."""
acc = self._get_accuracy(meters)
thresh = self._get_threshold()
if acc >= thresh:
return True
# Force upgrade to next lesson if used too much epochs.
t = self.curriculum_force_upgrade_epochs
if t is not None and self.current_epoch - self.last_upgrade_epoch >= t:
return True
return False
def _upgrade_lesson(self):
"""Upgrade to next lesson."""
self.nr_upgrades += 1
self.last_upgrade_epoch = self.current_epoch
if self.enable_candidate:
# When all lessons finished, it becomes candidate before graduated.
if self.current_number < self.curriculum_graduate:
self.current_number += self.curriculum_step
# sample_array records the lessons the model recently studied.
self.sample_array.append(self.current_number)
elif self.is_candidate:
self.is_graduated = True
else:
self.is_candidate = True
else:
if self.current_number < self.curriculum_graduate:
self.current_number += self.curriculum_step
self.sample_array.append(self.current_number)
else:
self.is_graduated = True
def _take_exam(self, train_meters=None):
"""Use training results as exam result, upgrade to next lesson if pass."""
if self._pass_lesson(train_meters):
self._upgrade_lesson()
def _train_epoch(self, epoch_size):
"""Add an exam session after each training epoch."""
meters = super()._train_epoch(epoch_size)
if not self.is_graduated:
self._take_exam(train_meters=copy.copy(meters))
return meters
def _early_stop(self, meters):
"""Early stop the training when the model graduated from the curriculum."""
return self.is_graduated and \
self.current_epoch - self.last_upgrade_epoch >= self.enhance_epochs
def _sample_number(self, mode):
"""Sample an integer argument from choices defined by an array."""
if mode == 'test':
return self.test_number
# review (sample training data) from recently studied lessons.
return random.choice(self.sample_array)
def train(self):
self.is_candidate = False
self.is_graduated = False
self.nr_upgrades = 0
self.last_upgrade_epoch = 0
self.current_number = self.curriculum_start
self.sample_array = collections.deque(maxlen=self.sample_array_capacity)
self.sample_array.append(self.current_number)
super().train()
return self.is_graduated
class MiningTrainerBase(CurriculumTrainerBase):
"""A trainer class supports both curriculum learning and hard negative mining.
Targeted on RL cases (with environment provided). Maintain two list of data
represents positive and negative ones. The environment instance is regarded as
positive if the agent can successfully accomplish the task.
Based on the curriculum schedule, there are periodically mining process (also
used as exams to determine the upgrade to next lesson or not). During the
mining process, random environment instances are being sampled, and collected
into positive and negative ones according to the outcome. Dur training, the
data are being balanced sampled from the positive and negative examples.
"""
pos_data = None
neg_data = None
def __init__(self, model, optimizer, epochs, epoch_size, test_epoch_size,
test_number_begin, test_number_step, test_number_end,
curriculum_start, curriculum_step, curriculum_graduate,
enable_candidate, curriculum_thresh, curriculum_thresh_relax,
curriculum_force_upgrade_epochs, sample_array_capacity,
enhance_epochs, enable_mining, repeat_mining, candidate_mul,
mining_interval, mining_epoch_size, mining_dataset_size,
inherit_neg_data, disable_balanced_sample, prob_pos_data):
super().__init__(model, optimizer, epochs, epoch_size, test_epoch_size,
test_number_begin, test_number_step, test_number_end,
curriculum_start, curriculum_step, curriculum_graduate,
enable_candidate, curriculum_thresh,
curriculum_thresh_relax, curriculum_force_upgrade_epochs,
sample_array_capacity, enhance_epochs)
self.enable_mining = enable_mining
self.repeat_mining = repeat_mining
self.candidate_mul = candidate_mul
self.mining_interval = mining_interval
self.mining_epoch_size = mining_epoch_size
self.mining_dataset_size = mining_dataset_size
self.inherit_neg_data = inherit_neg_data
self.disable_balanced_sample = disable_balanced_sample
self.prob_pos_data = prob_pos_data
__hyperparams__ = ('enable_mining', 'repeat_mining', 'candidate_mul',
'mining_interval', 'mining_epoch_size',
'mining_dataset_size', 'inherit_neg_data',
'disable_balanced_sample', 'prob_pos_data')
__hyperparam_defaults__ = {
'repeat_mining': True,
'candidate_mul': 2,
'inherit_neg_data': False,
'disable_balanced_sample': False,
'prob_pos_data': 0.5
}
@classmethod
def _get_hyperparams(cls):
return super()._get_hyperparams() + MiningTrainerBase.__hyperparams__
@classmethod
def make_trainer_parser(cls, parser, defaults, prefix=None):
super().make_trainer_parser(parser, defaults, prefix)
for k, v in MiningTrainerBase.__hyperparam_defaults__.items():
defaults.setdefault(k, v)
prefix = '--' if prefix is None else '--' + str(prefix) + '-'
if not isinstance(parser, argparse._ArgumentGroup):
parser = parser.add_argument_group('MiningTrainer')
parser.add_argument(
prefix + 'enable-mining',
type='bool',
default=defaults['enable_mining'],
metavar='B',
help='enable hard-env mining')
parser.add_argument(
prefix + 'repeat-mining',
type='bool',
default=defaults['repeat_mining'],
metavar='B',
help='repeat mining until failing on a lesson')
parser.add_argument(
prefix + 'candidate-mul',
type=int,
default=defaults['candidate_mul'],
metavar='N',
help='x times more mining iters when being candidate')
parser.add_argument(
prefix + 'mining-interval',
type=int,
default=defaults['mining_interval'],
metavar='N',
help='the interval(number of epochs) of the mining')
parser.add_argument(
prefix + 'mining-epoch-size',
type=int,
default=defaults['mining_epoch_size'],
metavar='N',
help='number of iterations per epoch of mining')
parser.add_argument(
prefix + 'mining-dataset-size',
type=int,
default=defaults['mining_dataset_size'],
metavar='N',
help='size of the dataset collected during mining')
parser.add_argument(
prefix + 'inherit-neg-data',
type='bool',
default=defaults['inherit_neg_data'],
metavar='B',
help='recompute the negative data from last mining')
parser.add_argument(
prefix + 'disable-balanced-sample',
type='bool',
default=defaults['disable_balanced_sample'],
metavar='B',
help='use random samples instead of balanced samples when enable mining'
)
parser.add_argument(
prefix + 'prob-pos-data',
type=float,
default=defaults['prob_pos_data'],
metavar='F',
help='the probability of use positive data during training')
def _get_player(self, number, mode):
"""Get an environment to be interact with, with nr_obj & mode specified."""
raise NotImplementedError()
def _balanced_sample(self, meters):
"""Balanced sample positive and negative data with $prob_pos_data."""
nr_pos, nr_neg = self.pos_data.size, self.neg_data.size
assert nr_pos + nr_neg > 0
if nr_neg == 0:
use_pos_data = True
elif nr_pos == 0:
use_pos_data = False
else:
use_pos_data = random.rand() < self.prob_pos_data
meters.update(pos_data_ratio=int(use_pos_data))
pool = self.pos_data if use_pos_data else self.neg_data
return pool.get()
def _get_number_and_player(self, meters, mode):
"""Sample both the number of objects and the environment."""
balanced_sample = mode == 'train' and self.enable_mining and (
not self.disable_balanced_sample and self.pos_data is not None)
if balanced_sample:
number, player = self._balanced_sample(meters)
else:
number = self._sample_number(mode)
player = self._get_player(number, mode)
if mode == 'train':
meters.update(train_number=number)
return number, player
def _get_result_given_player(self, index, meters, number, player, mode):
"""Compute the result given player, upon the mode.
Args:
index: Current episode id.
meters: Used to collect stats.
number: The number of objects/blocks.
player: Environment for player to interact.
mode: 'train'/'test'/'mining'/'inherit'
Returns('train' mode):
feed_dict: feed_dict for train_step
Returns(other modes):
message: The message shown on the progress bar.
result: necessary extra information, see also _extract_info
"""
raise NotImplementedError()
def _get_result(self, index, meters, mode):
number, player = self._get_number_and_player(meters, mode)
return self._get_result_given_player(index, meters, number, player, mode)
def _extract_info(self, extra):
"""Extract necessary information from extra variable.
Args:
extra: An extra variable returned by _get_result_given_player.
Returns:
succ: The result of the episode, success or not, to classify as pos/neg.
number: The number of objects/blocks
backup: The clone of the environment, for interacting multiple times.
"""
raise NotImplementedError()
def _get_train_data(self, index, meters):
return self._get_result(index, meters, mode='train')
def _inherit_neg_data(self, neg_data, old_neg_data, meters,
mining_dataset_size):
"""To avoid wasting already collect negative data, re-exam them."""
if not self.inherit_neg_data or \
(old_neg_data is None or old_neg_data.size == 0):
return
original_size = neg_data.size
old_neg_data.reset()
maximum_inherit_size = min(old_neg_data.size, mining_dataset_size)
def inherit_func(index):
number, player = old_neg_data.get()
message, result = self._get_result_given_player(
index, meters, number, player, mode='inherit')
positive, number, backup = self._extract_info(result)
if not positive:
neg_data.append((number, backup))
return message
tqdm_for(maximum_inherit_size, inherit_func)
logger.info(
meters.format_simple(
'> Inherit: new_size:{}, old_size:{}'.format(
neg_data.size - original_size, old_neg_data.size),
compressed=False))
def _mining_epoch(self, mining_epoch_size, mining_dataset_size):
"""Take exam, collect and update positive dataset and negative dataset"""
pos_data = RandomlyIterDataset()
neg_data = RandomlyIterDataset()
self.model.eval()
meters = GroupMeters()
with tqdm_pbar(total=mining_epoch_size) as pbar:
for i in range(mining_epoch_size):
message, result = self._get_result(i, meters, mode='mining')
positive, number, backup = self._extract_info(result)
dataset = pos_data if positive else neg_data
if dataset.size < mining_dataset_size:
dataset.append((number, backup))
pbar.set_description(message)
pbar.update()
# When both positive and negative dataset are full, break.
if pos_data.size >= mining_dataset_size and \
neg_data.size >= mining_dataset_size:
break
logger.info(meters.format_simple('> Mining: ', compressed=False))
self._inherit_neg_data(neg_data, self.neg_data, meters, mining_dataset_size)
self.pos_data = pos_data
self.neg_data = neg_data
self._dump_meters(meters, 'mining')
return meters
def _upgrade_lesson(self):
super()._upgrade_lesson()
if self.is_graduated:
self.pos_data, self.neg_data = None, None
def _take_exam(self, train_meters=None):
if not self.enable_mining:
super()._take_exam(train_meters)
return
# The mining process, as well as the examing time,
# only taken at a certain rate.
if self.need_mining or (self.mining_interval <=
self.current_epoch - self.last_mining_epoch):
self.last_mining_epoch = self.current_epoch
mining_epoch_size = self.mining_epoch_size
# The exam elapses longer when in candidate status.
if self.is_candidate:
mining_epoch_size *= self.candidate_mul
meters = self._mining_epoch(mining_epoch_size, self.mining_dataset_size)
# Use the performance during mining as the outcome for the exam.
if self._pass_lesson(meters):
self._upgrade_lesson()
if self.is_graduated or (not self.repeat_mining and self.need_mining):
self.need_mining = False
else:
# Can take exam consecutively if repeat_mining=True.
self.need_mining = True
self._take_exam()
else:
self.need_mining = False
def train(self):
self.need_mining = False
self.last_mining_epoch = 0
return super().train()
|
{
"content_hash": "13cd8e5ad91007e283d41b0e2c84fa06",
"timestamp": "",
"source": "github",
"line_count": 704,
"max_line_length": 81,
"avg_line_length": 36.46448863636363,
"alnum_prop": 0.6530715593471232,
"repo_name": "google/neural-logic-machines",
"id": "497cb27d8a4d6ff24ce10f951bed0ea470511ca3",
"size": "26272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "difflogic/train/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "24594"
},
{
"name": "Python",
"bytes": "178976"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.