code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import subprocess
from pyrm114 import *
#
p = pyrmClassifier(['a','b'])
p.train('a', 'ay lmao')
p.train('b', 'b rip')
p.classify('lmao')
with open('rip.txt', 'w') as f:
p.classify('lmao', f)
subprocess.call('cat rip.txt')
os.remove('rip.txt')
print p.crm_files_exist()
p.reset()
print p.crm_files_exist()
|
errcHuang/pyRM114
|
tests/test.py
|
Python
|
mit
| 310
|
# DO NOT EDIT THIS FILE in the QGIS plugin directory.
# Edit the original library file in the qgpkg directory and
# execute `make` to update the QGIS plugin files.
from __future__ import print_function
import sys
import os
import sqlite3
import tempfile
import logging
from xml.etree import ElementTree as ET
from qgis.core import *
from qgis.utils import *
from PyQt4.QtXml import *
from PyQt4.QtCore import *
from StringIO import StringIO
from urlparse import urlparse
import sys
from qgpkg import QGpkg
logger = logging.getLogger('qgpkg')
# Debug code for Pycharm
#sys.path.append('/home/joana/Downloads/pycharm-2016.3.3/debug-eggs/pycharm-debug.egg')
#import pydevd
#pydevd.settrace('localhost', port=53100, stdoutToServer=True, stderrToServer=True)
class QGpkg_owc (QGpkg):
"""Read and write QGIS mapping information in a GeoPackage database file, using this spec:
https://github.com/pka/qgpkg/blob/master/ows_geopackage_extension.md
"""
def write(self, project_path):
self.log(logging.ERROR, u"Sorry, but it appears that writing into this geopackage extension was not implemented yet!")
return
def read(self, gpkg_path):
iface.newProject(True) # Clear project, before opening
''' Read QGIS project from GeoPackage '''
# Check if it's a GeoPackage Database
self.database_connect(gpkg_path)
if not self.check_gpkg(gpkg_path):
self.log(logging.ERROR, u"No valid GeoPackage selected.")
return
try:
self.c.execute('SELECT table_name FROM gpkg_contents')
except sqlite3.OperationalError:
self.log(logging.ERROR, u"Unable to read table Name.")
return
table_names = self.c.fetchall()
db_name = QFileInfo(gpkg_path).baseName()
# Load OWS Context
try:
self.c.execute('SELECT content FROM owc_context')
except sqlite3.OperationalError:
self.log(logging.ERROR, u"Unable to read table owc_context.")
return
context = self.c.fetchone()
if context is None:
self.log(logging.ERROR, u"No record found on table owc_context!")
return
# Everything is read from the context
self.loadContext(context[0], gpkg_path)
# TODO: read resources
def loadContext(self, context, gpkg_path):
"""Parses and applies the information on OWC_context.
Args:
context: The contents of owc_context table.
gpkg_path: The path of the gpkg file.
"""
it = ET.iterparse(StringIO(context))
for _, el in it:
if '}' in el.tag:
el.tag = el.tag.split('}', 1)[1] # strip all namespaces
root = it.root
# Missing mandatory elements
# spec reference
spec_elem = root.find("specReference")
# if spec_elem is None:
# self.log(logging.ERROR, u"Could not parse project spec feference.")
# return
# Language
lang_elem = root.find("language")
# if lang_elem is None:
# self.log(logging.ERROR, u"Could not parse project language.")
# return
# Parse project id (mandatory)
id_elem = root.find("id")
if id_elem is None:
self.log(logging.ERROR, u"Could not parse project id.")
return
# Parse project title (mandatory)
title_elem = root.find("title")
if title_elem is None:
self.log(logging.ERROR, u"Could not parse project title.")
return
QgsProject.instance().setTitle(title_elem.text)
# Parse bbox, if it exists (not owc)
where_elem = root.find("where")
if where_elem is not None:
self.loadBBbox(where_elem)
# OWC (optional) elements
# Parse abstract (optional)
abstract_elem = root.find("abstract")
# Parse update date (updateDate?) (optional)
update_elem = root.find("update")
# Parse author (optional)
author_elem = root.find("author")
# TODO: parse comma separated list
# Parse publisher (optional)
publisher_elem = root.find("publisher")
# Parse creator (optional)
creator_elem = root.find("creator")
# Parse rights (optional)
rights_elem = root.find("rights")
# Parse area of interest (optional)
aio_elem = root.find("areaOfInterest")
# TODO: parse GM_Envelope
# Parse time interval of interest (optional)
time_elem = root.find("timeIntervalOfInterest")
# Parse keyword (optional)
keyword_elem = root.find("keyword")
# Parse context metadata (optional)
metadata_elem = root.find("contextMetadata")
entry_elems = root.findall("entry") # owc:resource?
if entry_elems is not None:
entry_elems.reverse()
# Load every entry
for entry_elem in entry_elems:
self.loadOWCLayer(gpkg_path, entry_elem)
def loadOWCLayer(self, gpkg_path, entry_elem):
"""Parses layer information from an entry, on OWC_context, and uses it to load and style the layer.
Args:
gpkg_path: The geopackage path.
entry_elem: The entry xml node.
"""
# Id: is it called code?
id_elem = entry_elem.find("id")
if id_elem is None:
self.log(logging.ERROR, u"Could not parse layer uri.")
return
# Parse RFC and check if there is a valid schema
parsed_url = urlparse(id_elem.text)
if (parsed_url.scheme) is None:
self.log(logging.ERROR, u"Invalid layer uri.")
return
# Mandatory
title_elem = entry_elem.find("title")
if title_elem is None:
self.log(logging.ERROR, u"Could not parse layer title.")
return
# TODO: make offering more general, to support other types of data formats (e.g.: wms)
offering_elem = entry_elem.find("offering")
if offering_elem is not None:
# Mandatory
content_elem = offering_elem.find("content")
if content_elem is None:
self.log(logging.ERROR, u"Failed to content '" + name + "' layer!")
return;
href = content_elem.get("href")
name = self.find_between(href, "#table=")
if name is None:
self._log(logging.ERROR, u"Could not parse table name.")
return
layer = self.loadLayer(gpkg_path, name, title_elem.text)
if layer is None or not layer.isValid():
self.log(logging.ERROR, u"Layer '" + name + "' failed to load!")
return;
# layer.setShortName(name)
# Check visibility (mandatory)
visibility = entry_elem.find("category").get("term")
if visibility is None:
self.log(logging.ERROR, u"Failed to read visibility for '" + name + "' layer!")
return;
iface.legendInterface().setLayerVisible(layer, visibility.lower() == 'true')
# Read style (optional)
style_elem = offering_elem.find("styleSet")
if style_elem is not None:
self.loadOWCStyle(style_elem, title_elem.text)
# Read other OWC (optional) elements ##################
# Parse abstract (optional)
abstract_elem = entry_elem.find("abstract")
if abstract_elem is not None:
layer.setAbstract(abstract_elem.text)
# Parse update date (optional): shouldn't this be OWC:updateDate ?
date_elem = entry_elem.find("updated")
# Parse author (optional)
author_elem = entry_elem.find("author")
if author_elem is not None:
layer.setAttribution(author_elem.text)
# Parse publisher (optional)
publisher_elem = entry_elem.find("publisher")
# Parse rights (optional)
rights_elem = entry_elem.find("rights")
# Parse geospatial extent (optional)
ext_elem = entry_elem.find("geospatialExtent")
# TODO: parse GM_envelope
# Parse temporal extent (optional)
temp_elem = entry_elem.find("temporalExtent")
# TODO: parse TM_GeometricPrimitive
# Parse content description (optional)
desc_elem = entry_elem.find("contentDescription")
# Parse preview (optional)
prev_elem = entry_elem.find("preview")
# TODO: validate uri
# Parse content reference (optional)
ref_elem = entry_elem.find("contentByRef")
# TODO: validate uri
# Parse status (optional)
active_elem = entry_elem.find("active")
# TODO: validate boolean
# Parse keyword (optional)
keyword_elem = entry_elem.find("keyword")
if keyword_elem is not None:
keywords = [keyword_elem.text]
layer.setKeywordList(keywords)
# Parse minimum scale denominator (optional)
minScale_elem = entry_elem.find("minScaleDenominator")
if minScale_elem is not None:
layer.setMinimumScale(float(minScale_elem.text))
# Parse maximum scale denominator (optional)
maxScale_elem = entry_elem.find("maxScaleDenominator")
if maxScale_elem is not None:
layer.setMaximumScale(float(maxScale_elem.text))
# Parse resource metadata (optional)
metadata_elem = entry_elem.find("resourceMetadata")
# Parse folder (optional)
folder_elem = entry_elem.find("folder")
def loadOWCStyle(self, style_elem, layer_title):
"""Parses and applies style information from a styleSet, on OWC_context.
Args:
style_elem: The styleSet xml node.
layer_title: The title of the layer to which we want to apply the style.
"""
# Mandatory: given name
stylename_elem = style_elem.find("name")
if stylename_elem is None:
self.log(logging.ERROR, u"Could not parse style name.")
return
# parse title (optional)
title_elem = style_elem.find("title")
# parse content (mandatory)
href = style_elem.find("content").get("href")
pref1 = "#table="
pref2 = "&name="
style_table = self.find_between(href, pref1, pref2)
stylename = self.find_between(href, pref2)
if stylename is None or stylename != stylename_elem.text:
self._log(logging.ERROR, u"Could not parse style name.")
return
type = style_elem.find("content").get("type")
if type != "application/sld+xml":
self._log(logging.ERROR, u"Currently we only support styles in sld/xml format.")
return
self.loadStyle(stylename, style_table, layer_title)
# Load layers from gpkg
def loadLayer(self, gpkg_path, layername, title):
"""Loads a layer from a geopackage, and it sets its title.
Args:
gpkg_path: The gpkg path.
layername: The layer name, within the geopackage.
title: The title to be given to the layer.
Returns:
An handle to the loaded layer.
"""
return iface.addVectorLayer(gpkg_path + "|layername=" + layername, title, "ogr")
def loadStyle(self, style_name, table_name, given_name):
"""Load named style from a table.
Args:
style_name: The style name, as it is referenced in the style table.
table_name: The name of the table where the style is stored (shouldn't it be a convention?).
given_name: The layer title.
"""
try:
self.c.execute("SELECT content FROM owc_style where name like'" + style_name + "'")
except sqlite3.OperationalError:
self.log(logging.ERROR, u"Could not find style "
+ style_name)
return
styles = self.c.fetchone()
if styles is None:
self.log(logging.ERROR, u"Could not find any styles "
u"named " + style_name)
return
style = styles[0]
layerList = QgsMapLayerRegistry.instance().mapLayersByName(given_name)
if layerList is None:
self.log(logging.ERROR, u"We could not find a loaded layer "
"called " + given_name + ". Something is not right!")
layer = layerList[0]
f = QTemporaryFile()
if f.open():
f.write(style)
f.close()
ret = layer.loadSldStyle(f.fileName())
# TODO: add style to default styles?
if ret[1] is True:
self.log(logging.DEBUG, "Style '" + style_name + "' loaded")
else:
self.log(logging.ERROR, "Style '" + style_name + "' not loaded: " + ret[0])
f.remove()
else:
self.log(logging.ERROR, u"Although there was a reference to style "
+ style_name + ", we could not find it in table owc_style. Something is not right!")
return
def loadBBbox(self, where_elem):
"""Parses and applies bbox.
Args:
where_elem: The where xml node.
"""
env_elem = where_elem.find("Envelope")
if env_elem is None:
self.log(logging.ERROR, u"Could not parse envelope.")
return
lower_elem = env_elem.find("lowerCorner")
if lower_elem is None:
self.log(logging.ERROR, u"Could not parse lower corner.")
return
lc = lower_elem.text.split()
if (len(lc) != 2):
self.log(logging.ERROR, u"Wrong number of entries in lower corner.")
return
upper_elem = env_elem.find("upperCorner")
if upper_elem is None:
self.log(logging.ERROR, u"Could not parse lower corner.")
return
uc = upper_elem.text.split()
if (len(uc) != 2):
self.log(logging.ERROR, u"Wrong number of entries in upper corner.")
return
# TODO: review this implementation
# str = (self.find_between(context, "<georss:where>", "</georss:where>")).replace('\n', '').encode('ascii',
# 'ignore')
# d = QDomDocument()
# d.setContent("< ?xml version = \"1.0\" encoding = \"utf-8\"? >" + str.)
# docElem = d.documentElement()
# extent = QgsOgcUtils.rectangleFromGMLEnvelope(docElem.firstChild())
# TODO: what happens to srs and dimension?
extent = QgsRectangle(float(lc[0]), float(lc[1]), float(uc[0]), float(uc[1]))
iface.mapCanvas().setExtent(extent)
iface.mapCanvas().refresh()
def find_between(self, s, first, last=None):
"""Extracts a substring from a string, between one, or two substrings.
If the last parameter is empty, it will extract everything after the first substring.
Args:
s: The string we want to parse.
first: The first substring.
last: The last substring (optional).
Returns:
The extracted substring.
"""
try:
start = s.index(first) + len(first)
if last is None:
end = len(s)
else:
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
|
pka/qgpkg
|
qgisgpkg/qgpkg_owc.py
|
Python
|
mit
| 15,628
|
import os.path
import pickle
import sys
import unittest.mock
from pathlib import Path
from textwrap import dedent
from types import ModuleType
from typing import Generator
import py
import pytest
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pathlib import bestrelpath
from _pytest.pathlib import commonpath
from _pytest.pathlib import ensure_deletable
from _pytest.pathlib import fnmatch_ex
from _pytest.pathlib import get_extended_length_path_str
from _pytest.pathlib import get_lock_path
from _pytest.pathlib import import_path
from _pytest.pathlib import ImportPathMismatchError
from _pytest.pathlib import maybe_delete_a_numbered_dir
from _pytest.pathlib import resolve_package_path
from _pytest.pathlib import symlink_or_skip
from _pytest.pathlib import visit
from _pytest.tmpdir import TempPathFactory
class TestFNMatcherPort:
"""Test that our port of py.common.FNMatcher (fnmatch_ex) produces the
same results as the original py.path.local.fnmatch method."""
@pytest.fixture(params=["pathlib", "py.path"])
def match(self, request):
if request.param == "py.path":
def match_(pattern, path):
return py.path.local(path).fnmatch(pattern)
else:
assert request.param == "pathlib"
def match_(pattern, path):
return fnmatch_ex(pattern, path)
return match_
if sys.platform == "win32":
drv1 = "c:"
drv2 = "d:"
else:
drv1 = "/c"
drv2 = "/d"
@pytest.mark.parametrize(
"pattern, path",
[
("*.py", "foo.py"),
("*.py", "bar/foo.py"),
("test_*.py", "foo/test_foo.py"),
("tests/*.py", "tests/foo.py"),
(drv1 + "/*.py", drv1 + "/foo.py"),
(drv1 + "/foo/*.py", drv1 + "/foo/foo.py"),
("tests/**/test*.py", "tests/foo/test_foo.py"),
("tests/**/doc/test*.py", "tests/foo/bar/doc/test_foo.py"),
("tests/**/doc/**/test*.py", "tests/foo/doc/bar/test_foo.py"),
],
)
def test_matching(self, match, pattern, path):
assert match(pattern, path)
def test_matching_abspath(self, match):
abspath = os.path.abspath(os.path.join("tests/foo.py"))
assert match("tests/foo.py", abspath)
@pytest.mark.parametrize(
"pattern, path",
[
("*.py", "foo.pyc"),
("*.py", "foo/foo.pyc"),
("tests/*.py", "foo/foo.py"),
(drv1 + "/*.py", drv2 + "/foo.py"),
(drv1 + "/foo/*.py", drv2 + "/foo/foo.py"),
("tests/**/test*.py", "tests/foo.py"),
("tests/**/test*.py", "foo/test_foo.py"),
("tests/**/doc/test*.py", "tests/foo/bar/doc/foo.py"),
("tests/**/doc/test*.py", "tests/foo/bar/test_foo.py"),
],
)
def test_not_matching(self, match, pattern, path):
assert not match(pattern, path)
class TestImportPath:
"""
Most of the tests here were copied from py lib's tests for "py.local.path.pyimport".
Having our own pyimport-like function is inline with removing py.path dependency in the future.
"""
@pytest.fixture(scope="session")
def path1(self, tmp_path_factory: TempPathFactory) -> Generator[Path, None, None]:
path = tmp_path_factory.mktemp("path")
self.setuptestfs(path)
yield path
assert path.joinpath("samplefile").exists()
def setuptestfs(self, path: Path) -> None:
# print "setting up test fs for", repr(path)
samplefile = path / "samplefile"
samplefile.write_text("samplefile\n")
execfile = path / "execfile"
execfile.write_text("x=42")
execfilepy = path / "execfile.py"
execfilepy.write_text("x=42")
d = {1: 2, "hello": "world", "answer": 42}
path.joinpath("samplepickle").write_bytes(pickle.dumps(d, 1))
sampledir = path / "sampledir"
sampledir.mkdir()
sampledir.joinpath("otherfile").touch()
otherdir = path / "otherdir"
otherdir.mkdir()
otherdir.joinpath("__init__.py").touch()
module_a = otherdir / "a.py"
module_a.write_text("from .b import stuff as result\n")
module_b = otherdir / "b.py"
module_b.write_text('stuff="got it"\n')
module_c = otherdir / "c.py"
module_c.write_text(
dedent(
"""
import py;
import otherdir.a
value = otherdir.a.result
"""
)
)
module_d = otherdir / "d.py"
module_d.write_text(
dedent(
"""
import py;
from otherdir import a
value2 = a.result
"""
)
)
def test_smoke_test(self, path1: Path) -> None:
obj = import_path(path1 / "execfile.py")
assert obj.x == 42 # type: ignore[attr-defined]
assert obj.__name__ == "execfile"
def test_renamed_dir_creates_mismatch(
self, tmp_path: Path, monkeypatch: MonkeyPatch
) -> None:
tmp_path.joinpath("a").mkdir()
p = tmp_path.joinpath("a", "test_x123.py")
p.touch()
import_path(p)
tmp_path.joinpath("a").rename(tmp_path.joinpath("b"))
with pytest.raises(ImportPathMismatchError):
import_path(tmp_path.joinpath("b", "test_x123.py"))
# Errors can be ignored.
monkeypatch.setenv("PY_IGNORE_IMPORTMISMATCH", "1")
import_path(tmp_path.joinpath("b", "test_x123.py"))
# PY_IGNORE_IMPORTMISMATCH=0 does not ignore error.
monkeypatch.setenv("PY_IGNORE_IMPORTMISMATCH", "0")
with pytest.raises(ImportPathMismatchError):
import_path(tmp_path.joinpath("b", "test_x123.py"))
def test_messy_name(self, tmp_path: Path) -> None:
# http://bitbucket.org/hpk42/py-trunk/issue/129
path = tmp_path / "foo__init__.py"
path.touch()
module = import_path(path)
assert module.__name__ == "foo__init__"
def test_dir(self, tmp_path: Path) -> None:
p = tmp_path / "hello_123"
p.mkdir()
p_init = p / "__init__.py"
p_init.touch()
m = import_path(p)
assert m.__name__ == "hello_123"
m = import_path(p_init)
assert m.__name__ == "hello_123"
def test_a(self, path1: Path) -> None:
otherdir = path1 / "otherdir"
mod = import_path(otherdir / "a.py")
assert mod.result == "got it" # type: ignore[attr-defined]
assert mod.__name__ == "otherdir.a"
def test_b(self, path1: Path) -> None:
otherdir = path1 / "otherdir"
mod = import_path(otherdir / "b.py")
assert mod.stuff == "got it" # type: ignore[attr-defined]
assert mod.__name__ == "otherdir.b"
def test_c(self, path1: Path) -> None:
otherdir = path1 / "otherdir"
mod = import_path(otherdir / "c.py")
assert mod.value == "got it" # type: ignore[attr-defined]
def test_d(self, path1: Path) -> None:
otherdir = path1 / "otherdir"
mod = import_path(otherdir / "d.py")
assert mod.value2 == "got it" # type: ignore[attr-defined]
def test_import_after(self, tmp_path: Path) -> None:
tmp_path.joinpath("xxxpackage").mkdir()
tmp_path.joinpath("xxxpackage", "__init__.py").touch()
mod1path = tmp_path.joinpath("xxxpackage", "module1.py")
mod1path.touch()
mod1 = import_path(mod1path)
assert mod1.__name__ == "xxxpackage.module1"
from xxxpackage import module1
assert module1 is mod1
def test_check_filepath_consistency(
self, monkeypatch: MonkeyPatch, tmp_path: Path
) -> None:
name = "pointsback123"
p = tmp_path.joinpath(name + ".py")
p.touch()
for ending in (".pyc", ".pyo"):
mod = ModuleType(name)
pseudopath = tmp_path.joinpath(name + ending)
pseudopath.touch()
mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod)
newmod = import_path(p)
assert mod == newmod
monkeypatch.undo()
mod = ModuleType(name)
pseudopath = tmp_path.joinpath(name + "123.py")
pseudopath.touch()
mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod)
with pytest.raises(ImportPathMismatchError) as excinfo:
import_path(p)
modname, modfile, orig = excinfo.value.args
assert modname == name
assert modfile == str(pseudopath)
assert orig == p
assert issubclass(ImportPathMismatchError, ImportError)
def test_issue131_on__init__(self, tmp_path: Path) -> None:
# __init__.py files may be namespace packages, and thus the
# __file__ of an imported module may not be ourselves
# see issue
tmp_path.joinpath("proja").mkdir()
p1 = tmp_path.joinpath("proja", "__init__.py")
p1.touch()
tmp_path.joinpath("sub", "proja").mkdir(parents=True)
p2 = tmp_path.joinpath("sub", "proja", "__init__.py")
p2.touch()
m1 = import_path(p1)
m2 = import_path(p2)
assert m1 == m2
def test_ensuresyspath_append(self, tmp_path: Path) -> None:
root1 = tmp_path / "root1"
root1.mkdir()
file1 = root1 / "x123.py"
file1.touch()
assert str(root1) not in sys.path
import_path(file1, mode="append")
assert str(root1) == sys.path[-1]
assert str(root1) not in sys.path[:-1]
def test_invalid_path(self, tmp_path: Path) -> None:
with pytest.raises(ImportError):
import_path(tmp_path / "invalid.py")
@pytest.fixture
def simple_module(self, tmp_path: Path) -> Path:
fn = tmp_path / "mymod.py"
fn.write_text(
dedent(
"""
def foo(x): return 40 + x
"""
)
)
return fn
def test_importmode_importlib(self, simple_module: Path) -> None:
"""`importlib` mode does not change sys.path."""
module = import_path(simple_module, mode="importlib")
assert module.foo(2) == 42 # type: ignore[attr-defined]
assert str(simple_module.parent) not in sys.path
def test_importmode_twice_is_different_module(self, simple_module: Path) -> None:
"""`importlib` mode always returns a new module."""
module1 = import_path(simple_module, mode="importlib")
module2 = import_path(simple_module, mode="importlib")
assert module1 is not module2
def test_no_meta_path_found(
self, simple_module: Path, monkeypatch: MonkeyPatch
) -> None:
"""Even without any meta_path should still import module."""
monkeypatch.setattr(sys, "meta_path", [])
module = import_path(simple_module, mode="importlib")
assert module.foo(2) == 42 # type: ignore[attr-defined]
# mode='importlib' fails if no spec is found to load the module
import importlib.util
monkeypatch.setattr(
importlib.util, "spec_from_file_location", lambda *args: None
)
with pytest.raises(ImportError):
import_path(simple_module, mode="importlib")
def test_resolve_package_path(tmp_path: Path) -> None:
pkg = tmp_path / "pkg1"
pkg.mkdir()
(pkg / "__init__.py").touch()
(pkg / "subdir").mkdir()
(pkg / "subdir/__init__.py").touch()
assert resolve_package_path(pkg) == pkg
assert resolve_package_path(pkg.joinpath("subdir", "__init__.py")) == pkg
def test_package_unimportable(tmp_path: Path) -> None:
pkg = tmp_path / "pkg1-1"
pkg.mkdir()
pkg.joinpath("__init__.py").touch()
subdir = pkg.joinpath("subdir")
subdir.mkdir()
pkg.joinpath("subdir/__init__.py").touch()
assert resolve_package_path(subdir) == subdir
xyz = subdir.joinpath("xyz.py")
xyz.touch()
assert resolve_package_path(xyz) == subdir
assert not resolve_package_path(pkg)
def test_access_denied_during_cleanup(tmp_path: Path, monkeypatch: MonkeyPatch) -> None:
"""Ensure that deleting a numbered dir does not fail because of OSErrors (#4262)."""
path = tmp_path / "temp-1"
path.mkdir()
def renamed_failed(*args):
raise OSError("access denied")
monkeypatch.setattr(Path, "rename", renamed_failed)
lock_path = get_lock_path(path)
maybe_delete_a_numbered_dir(path)
assert not lock_path.is_file()
def test_long_path_during_cleanup(tmp_path: Path) -> None:
"""Ensure that deleting long path works (particularly on Windows (#6775))."""
path = (tmp_path / ("a" * 250)).resolve()
if sys.platform == "win32":
# make sure that the full path is > 260 characters without any
# component being over 260 characters
assert len(str(path)) > 260
extended_path = "\\\\?\\" + str(path)
else:
extended_path = str(path)
os.mkdir(extended_path)
assert os.path.isdir(extended_path)
maybe_delete_a_numbered_dir(path)
assert not os.path.isdir(extended_path)
def test_get_extended_length_path_str() -> None:
assert get_extended_length_path_str(r"c:\foo") == r"\\?\c:\foo"
assert get_extended_length_path_str(r"\\share\foo") == r"\\?\UNC\share\foo"
assert get_extended_length_path_str(r"\\?\UNC\share\foo") == r"\\?\UNC\share\foo"
assert get_extended_length_path_str(r"\\?\c:\foo") == r"\\?\c:\foo"
def test_suppress_error_removing_lock(tmp_path: Path) -> None:
"""ensure_deletable should be resilient if lock file cannot be removed (#5456, #7491)"""
path = tmp_path / "dir"
path.mkdir()
lock = get_lock_path(path)
lock.touch()
mtime = lock.stat().st_mtime
with unittest.mock.patch.object(Path, "unlink", side_effect=OSError) as m:
assert not ensure_deletable(
path, consider_lock_dead_if_created_before=mtime + 30
)
assert m.call_count == 1
assert lock.is_file()
with unittest.mock.patch.object(Path, "is_file", side_effect=OSError) as m:
assert not ensure_deletable(
path, consider_lock_dead_if_created_before=mtime + 30
)
assert m.call_count == 1
assert lock.is_file()
# check now that we can remove the lock file in normal circumstances
assert ensure_deletable(path, consider_lock_dead_if_created_before=mtime + 30)
assert not lock.is_file()
def test_bestrelpath() -> None:
curdir = Path("/foo/bar/baz/path")
assert bestrelpath(curdir, curdir) == "."
assert bestrelpath(curdir, curdir / "hello" / "world") == "hello" + os.sep + "world"
assert bestrelpath(curdir, curdir.parent / "sister") == ".." + os.sep + "sister"
assert bestrelpath(curdir, curdir.parent) == ".."
assert bestrelpath(curdir, Path("hello")) == "hello"
def test_commonpath() -> None:
path = Path("/foo/bar/baz/path")
subpath = path / "sampledir"
assert commonpath(path, subpath) == path
assert commonpath(subpath, path) == path
assert commonpath(Path(str(path) + "suffix"), path) == path.parent
assert commonpath(path, path.parent.parent) == path.parent.parent
def test_visit_ignores_errors(tmp_path: Path) -> None:
symlink_or_skip("recursive", tmp_path / "recursive")
tmp_path.joinpath("foo").write_bytes(b"")
tmp_path.joinpath("bar").write_bytes(b"")
assert [
entry.name for entry in visit(str(tmp_path), recurse=lambda entry: False)
] == ["bar", "foo"]
@pytest.mark.skipif(not sys.platform.startswith("win"), reason="Windows only")
def test_samefile_false_negatives(tmp_path: Path, monkeypatch: MonkeyPatch) -> None:
"""
import_file() should not raise ImportPathMismatchError if the paths are exactly
equal on Windows. It seems directories mounted as UNC paths make os.path.samefile
return False, even when they are clearly equal.
"""
module_path = tmp_path.joinpath("my_module.py")
module_path.write_text("def foo(): return 42")
monkeypatch.syspath_prepend(tmp_path)
with monkeypatch.context() as mp:
# Forcibly make os.path.samefile() return False here to ensure we are comparing
# the paths too. Using a context to narrow the patch as much as possible given
# this is an important system function.
mp.setattr(os.path, "samefile", lambda x, y: False)
module = import_path(module_path)
assert getattr(module, "foo")() == 42
|
nicoddemus/pytest
|
testing/test_pathlib.py
|
Python
|
mit
| 16,530
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from unittest.case import TestCase
except ImportError:
# Runing unit-tests in production environment
from unittest2.case import TestCase
import mock
import os
import re
import time
import uuid
from datetime import datetime
from functools import partial
from itertools import izip
from netaddr import IPNetwork
from random import randint
from oslo_serialization import jsonutils
import sqlalchemy as sa
import web
from webtest import app
import nailgun
from nailgun import consts
from nailgun.errors import errors
from nailgun.settings import settings
from nailgun.db import db
from nailgun.db import flush
from nailgun.db import syncdb
from nailgun.logger import logger
from nailgun.db.sqlalchemy.fixman import load_fake_deployment_tasks
from nailgun.db.sqlalchemy.fixman import load_fixture
from nailgun.db.sqlalchemy.fixman import upload_fixture
from nailgun.db.sqlalchemy.models import NodeAttributes
from nailgun.db.sqlalchemy.models import NodeNICInterface
from nailgun.db.sqlalchemy.models import Notification
from nailgun.db.sqlalchemy.models import Task
# here come objects
from nailgun.objects import Cluster
from nailgun.objects import MasterNodeSettings
from nailgun.objects import Node
from nailgun.objects import NodeGroup
from nailgun.objects import Plugin
from nailgun.objects import Release
from nailgun.app import build_app
from nailgun.consts import NETWORK_INTERFACE_TYPES
from nailgun.middleware.connection_monitor import ConnectionMonitorMiddleware
from nailgun.middleware.keystone import NailgunFakeKeystoneAuthMiddleware
from nailgun.network.manager import NetworkManager
from nailgun.network.template import NetworkTemplate
from nailgun.utils import reverse
class TimeoutError(Exception):
pass
def test_db_driver(handler):
try:
return handler()
except web.HTTPError:
if str(web.ctx.status).startswith(("4", "5")):
db.rollback()
raise
except Exception:
db.rollback()
raise
finally:
db.commit()
# we do not remove session in tests
class EnvironmentManager(object):
def __init__(self, app, session=None):
self.db = session or db()
self.app = app
self.tester = TestCase
self.tester.runTest = lambda a: None
self.tester = self.tester()
self.here = os.path.abspath(os.path.dirname(__file__))
self.fixture_dir = os.path.join(self.here, "..", "fixtures")
self.default_headers = {
"Content-Type": "application/json"
}
self.releases = []
self.clusters = []
self.nodes = []
self.plugins = []
self.network_manager = NetworkManager
def create(self, **kwargs):
release_data = kwargs.pop('release_kwargs', {"api": False})
cluster_data = kwargs.pop('cluster_kwargs', {})
if 'release_id' not in cluster_data:
cluster_data['release_id'] = self.create_release(**release_data).id
cluster = self.create_cluster(
**cluster_data
)
for node_kwargs in kwargs.pop('nodes_kwargs', []):
if "cluster_id" not in node_kwargs:
if isinstance(cluster, dict):
node_kwargs["cluster_id"] = cluster["id"]
else:
node_kwargs["cluster_id"] = cluster.id
node_kwargs.setdefault("api", False)
if "pending_roles" not in node_kwargs:
node_kwargs.setdefault("roles", ["controller"])
self.create_node(
**node_kwargs
)
return cluster
def create_release(self, api=False, **kwargs):
os = kwargs.get(
'operating_system', consts.RELEASE_OS.centos)
version = kwargs.get(
'version', '{0}-6.1'.format(randint(0, 100000000)))
# NOTE(ikalnitsky): In order to do not read each time openstack.yaml
# we're reading it once and then look for needed release.
releases = self.read_fixtures(('openstack',))
release_data = next((
r for r in releases if r['fields']['operating_system'] == os),
releases[0])
release_data = release_data['fields']
release_data.update({
'name': u"release_name_" + version,
'version': version,
'state': consts.RELEASE_STATES.available,
'description': u"release_desc" + version,
})
if kwargs.get('deployment_tasks') is None:
kwargs['deployment_tasks'] = \
load_fake_deployment_tasks(apply_to_db=False)
release_data.update(kwargs)
if api:
resp = self.app.post(
reverse('ReleaseCollectionHandler'),
params=jsonutils.dumps(release_data),
headers=self.default_headers
)
self.tester.assertEqual(resp.status_code, 201)
release = resp.json_body
self.releases.append(
self.db.query(Release).get(release['id'])
)
else:
release = Release.create(release_data)
db().commit()
self.releases.append(release)
return release
def get_role(self, release_id, role_name, expect_errors=False):
return self.app.get(
reverse(
'RoleHandler',
{'role_name': role_name, 'release_id': release_id}),
headers=self.default_headers,
expect_errors=expect_errors
)
def update_role(self, release_id, role_name, data, expect_errors=False):
return self.app.put(
reverse(
'RoleHandler',
{'role_name': role_name, 'release_id': release_id}),
jsonutils.dumps(data),
headers=self.default_headers,
expect_errors=expect_errors
)
def delete_role(self, release_id, role_name, expect_errors=False):
return self.app.delete(
reverse(
'RoleHandler',
{'role_name': role_name, 'release_id': release_id}),
headers=self.default_headers,
expect_errors=expect_errors
)
def create_role(self, release_id, data, expect_errors=False):
return self.app.post(
reverse('RoleCollectionHandler', {'release_id': release_id}),
jsonutils.dumps(data),
headers=self.default_headers,
expect_errors=expect_errors
)
def create_cluster(self, api=True, exclude=None, **kwargs):
cluster_data = {
'name': 'cluster-api-' + str(randint(0, 1000000)),
}
editable_attributes = kwargs.pop(
'editable_attributes', None)
if kwargs:
cluster_data.update(kwargs)
if 'release_id' not in cluster_data:
cluster_data['release_id'] = self.create_release(api=False).id
if exclude and isinstance(exclude, list):
for ex in exclude:
try:
del cluster_data[ex]
except KeyError as err:
logger.warning(err)
if api:
resp = self.app.post(
reverse('ClusterCollectionHandler'),
jsonutils.dumps(cluster_data),
headers=self.default_headers,
expect_errors=True
)
self.tester.assertEqual(resp.status_code, 201, resp.body)
cluster = resp.json_body
cluster_db = Cluster.get_by_uid(cluster['id'])
else:
cluster = Cluster.create(cluster_data)
cluster_db = cluster
db().commit()
self.clusters.append(cluster_db)
if editable_attributes:
Cluster.patch_attributes(cluster_db,
{'editable': editable_attributes})
return cluster
def create_node(
self, api=False,
exclude=None, expect_http=201,
expected_error=None,
**kwargs):
# TODO(alekseyk) Simplify 'interfaces' and 'mac' manipulation logic
metadata = kwargs.get('meta', {})
default_metadata = self.default_metadata()
default_metadata.update(metadata)
mac = kwargs.get('mac', self.generate_random_mac())
if default_metadata['interfaces']:
if not metadata or 'interfaces' not in metadata:
default_metadata['interfaces'][0]['mac'] = mac
default_metadata['interfaces'][0]['pxe'] = True
for iface in default_metadata['interfaces'][1:]:
if 'mac' in iface:
iface['mac'] = self.generate_random_mac()
else:
for iface in default_metadata['interfaces']:
if iface.get('pxe'):
if not iface.get('mac'):
iface['mac'] = mac
elif 'mac' not in kwargs:
mac = iface['mac']
if iface.get('mac') == mac:
break
else:
default_metadata['interfaces'][0]['mac'] = mac
default_metadata['interfaces'][0]['pxe'] = True
node_data = {
'mac': mac,
'status': 'discover',
'ip': '10.20.0.130',
'meta': default_metadata
}
if kwargs:
meta = kwargs.pop('meta', None)
node_data.update(kwargs)
if meta:
kwargs['meta'] = meta
if exclude and isinstance(exclude, list):
for ex in exclude:
try:
del node_data[ex]
except KeyError as err:
logger.warning(err)
if api:
resp = self.app.post(
reverse('NodeCollectionHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers,
expect_errors=True
)
self.tester.assertEqual(resp.status_code, expect_http, resp.body)
if expected_error:
self.tester.assertEqual(
resp.json_body["message"],
expected_error
)
if str(expect_http)[0] != "2":
return None
self.tester.assertEqual(resp.status_code, expect_http)
node = resp.json_body
node_db = Node.get_by_uid(node['id'])
if 'interfaces' not in node_data['meta'] \
or not node_data['meta']['interfaces']:
self._set_interfaces_if_not_set_in_meta(
node_db.id,
kwargs.get('meta', None))
self.nodes.append(node_db)
else:
node = Node.create(node_data)
db().commit()
self.nodes.append(node)
return node
def create_nodes(self, count, *args, **kwargs):
"""Helper to generate specific number of nodes."""
return [self.create_node(*args, **kwargs) for _ in xrange(count)]
def create_nodes_w_interfaces_count(self,
nodes_count, if_count=2, **kwargs):
"""Create nodes_count nodes with if_count interfaces each.
Default random MAC is generated for each interface.
"""
nodes = []
for i in range(nodes_count):
meta = self.default_metadata()
if_list = [
{
"name": "eth{0}".format(i),
"mac": self.generate_random_mac(),
}
for i in range(if_count)]
if_list[0]['pxe'] = True
self.set_interfaces_in_meta(meta, if_list)
nodes.append(self.create_node(meta=meta, mac=if_list[0]['mac'],
**kwargs))
return nodes
def create_task(self, **kwargs):
task = Task(**kwargs)
self.db.add(task)
self.db.commit()
return task
def create_attributes(self):
return NodeAttributes()
def create_notification(self, **kwargs):
notif_data = {
"topic": "discover",
"message": "Test message",
"status": "unread",
"datetime": datetime.now()
}
if kwargs:
notif_data.update(kwargs)
notification = Notification()
notification.cluster_id = notif_data.get("cluster_id")
for f, v in notif_data.iteritems():
setattr(notification, f, v)
self.db.add(notification)
self.db.commit()
return notification
def create_node_group(self, api=True, **kwargs):
ng_data = {
'cluster_id': self.clusters[0].id,
'name': 'test_ng'
}
if kwargs:
ng_data.update(kwargs)
if api:
resp = self.app.post(
reverse('NodeGroupCollectionHandler'),
jsonutils.dumps(ng_data),
headers=self.default_headers,
expect_errors=False
)
ng = resp
else:
ng = NodeGroup.create(ng_data)
db().commit()
return ng
def create_plugin(self, api=False, cluster=None, **kwargs):
plugin_data = self.get_default_plugin_metadata()
plugin_data.update(kwargs)
if api:
resp = self.app.post(
reverse('PluginCollectionHandler'),
jsonutils.dumps(plugin_data),
headers=self.default_headers,
expect_errors=False
)
plugin = Plugin.get_by_uid(resp.json_body['id'])
self.plugins.append(plugin)
else:
plugin = Plugin.create(plugin_data)
self.plugins.append(plugin)
# Enable plugin for specific cluster
if cluster:
cluster.plugins.append(plugin)
return plugin
def default_metadata(self):
item = self.find_item_by_pk_model(
self.read_fixtures(("sample_environment",)),
1, 'nailgun.node')
return item.get('fields').get('meta', {})
def generate_random_mac(self):
mac = [randint(0x00, 0x7f) for _ in xrange(6)]
return ':'.join(map(lambda x: "%02x" % x, mac)).lower()
def generate_interfaces_in_meta(self, amount):
nics = []
for i in xrange(amount):
nics.append(
{
'name': 'eth{0}'.format(i),
'mac': self.generate_random_mac(),
'current_speed': 100,
'max_speed': 1000,
'offloading_modes': [
{
'name': 'enabled_offloading_mode',
'state': True,
"sub": [
{
'name': 'disabled_offloading_sub_mode',
'state': False,
"sub": []
}
]
},
{
'name': 'disabled_offloading_mode',
'state': False,
"sub": []
}
]
}
)
self.set_admin_ip_for_for_single_interface(nics)
return {'interfaces': nics}
def _set_interfaces_if_not_set_in_meta(self, node_id, meta):
if not meta or 'interfaces' not in meta:
self._add_interfaces_to_node(node_id)
def _create_interfaces_from_meta(self, node):
# Create interfaces from meta
for interface in node.meta['interfaces']:
interface = NodeNICInterface(
mac=interface.get('mac'),
name=interface.get('name'),
ip_addr=interface.get('ip'),
netmask=interface.get('netmask')
)
self.db.add(interface)
node.nic_interfaces.append(interface)
self.db.flush()
# If node in a cluster then assign networks for all interfaces
if node.cluster_id:
self.network_manager.assign_networks_by_default(node)
# At least one interface should have
# same ip as mac in meta
if node.nic_interfaces and not \
filter(lambda i: node.mac == i.mac, node.nic_interfaces):
node.nic_interfaces[0].mac = node.mac
self.db.commit()
def _add_interfaces_to_node(self, node_id, count=1):
interfaces = []
node = self.db.query(Node.model).get(node_id)
networks_to_assign = \
list(node.cluster.network_groups) if node.cluster else []
for i in xrange(count):
interface = NodeNICInterface(
node_id=node_id,
name='eth{0}'.format(i),
mac=self.generate_random_mac(),
current_speed=100,
max_speed=1000,
assigned_networks=networks_to_assign
)
self.db.add(interface)
self.db.commit()
interfaces.append(interface)
# assign all networks to first NIC
networks_to_assign = []
return interfaces
def set_admin_ip_for_for_single_interface(self, interfaces):
"""Set admin ip for single interface if it not setted yet."""
ips = [interface.get('ip') for interface in interfaces]
admin_ips = [
ip for ip in ips
if self.network_manager.is_ip_belongs_to_admin_subnet(ip)]
if not admin_ips:
admin_cidr = self.network_manager.get_admin_network_group().cidr
interfaces[0]['ip'] = str(IPNetwork(admin_cidr).ip)
def set_interfaces_in_meta(self, meta, interfaces):
"""Set interfaces in metadata."""
meta['interfaces'] = interfaces
self.set_admin_ip_for_for_single_interface(meta['interfaces'])
return meta['interfaces']
def get_default_roles(self):
return list(self.get_default_roles_metadata.keys())
def get_default_volumes_metadata(self):
return self.read_fixtures(
('openstack',))[0]['fields']['volumes_metadata']
def get_default_roles_metadata(self):
return self.read_fixtures(
('openstack',))[0]['fields']['roles_metadata']
def get_default_networks_metadata(self):
return self.read_fixtures(
('openstack',))[0]['fields']['networks_metadata']
def get_default_attributes_metadata(self):
return self.read_fixtures(
['openstack'])[0]['fields']['attributes_metadata']
def get_default_plugin_env_config(self, **kwargs):
return {
'attributes': {
'{0}_text'.format(kwargs.get('plugin_name', 'plugin_name')): {
'value': kwargs.get('value', 'value'),
'type': kwargs.get('type', 'text'),
'description': kwargs.get('description', 'description'),
'weight': kwargs.get('weight', 25),
'label': kwargs.get('label', 'label')}}}
def get_default_plugin_node_roles_config(self, **kwargs):
node_roles = {
'testing_plugin': {
'name': 'Some plugin role',
'description': 'Some description'
}
}
node_roles.update(kwargs)
return node_roles
def get_default_plugin_volumes_config(self, **kwargs):
volumes = {
'volumes_roles_mapping': {
'testing_plugin': [
{'allocate_size': 'min', 'id': 'os'},
{'allocate_size': 'all', 'id': 'test_volume'}
]
},
'volumes': [
{'id': 'test_volume', 'type': 'vg'}
]
}
volumes.update(kwargs)
return volumes
def get_default_network_roles_config(self, **kwargs):
network_roles = [
{
'id': 'test_network_role',
'default_mapping': 'public',
'properties': {
'subnet': 'true',
'gateway': 'false',
'vip': [
{'name': 'test_vip_1', 'shared': False},
{'name': 'test_vip_2', 'shared': False}
]
}
}
]
network_roles[0].update(kwargs)
return network_roles
def get_default_plugin_deployment_tasks(self, **kwargs):
deployment_tasks = [
{
'id': 'role-name',
'type': 'group',
'role': ['role-name'],
'requires': ['controller'],
'required_for': ['deploy_end'],
'parameters': {
'strategy': {
'type': 'parallel'
}
}
}
]
deployment_tasks[0].update(kwargs)
return deployment_tasks
def get_default_plugin_tasks(self, **kwargs):
default_tasks = [
{
'role': '[test_role]',
'stage': 'post_deployment',
'type': 'puppet',
'parameters': {
'puppet_manifest': '/etc/puppet/modules/test_manigest.pp',
'puppet_modules': '/etc/puppet/modules',
'timeout': 720
}
}
]
default_tasks[0].update(kwargs)
return default_tasks
def get_default_plugin_metadata(self, **kwargs):
sample_plugin = {
'version': '0.1.0',
'name': 'testing_plugin',
'title': 'Test plugin',
'package_version': '1.0.0',
'description': 'Enable to use plugin X for Neutron',
'fuel_version': ['6.0'],
'groups': [],
'licenses': ['License 1'],
'authors': ['Author1'],
'homepage': 'http://some-plugin-url.com/',
'releases': [
{'repository_path': 'repositories/ubuntu',
'version': '2014.2-6.0', 'os': 'ubuntu',
'mode': ['ha', 'multinode'],
'deployment_scripts_path': 'deployment_scripts/'},
{'repository_path': 'repositories/centos',
'version': '2014.2-6.0', 'os': 'centos',
'mode': ['ha', 'multinode'],
'deployment_scripts_path': 'deployment_scripts/'}]}
sample_plugin.update(kwargs)
return sample_plugin
def get_default_vmware_attributes_metadata(self):
return self.read_fixtures(
['openstack'])[0]['fields']['vmware_attributes_metadata']
def upload_fixtures(self, fxtr_names):
for fxtr_path in self.fxtr_paths_by_names(fxtr_names):
with open(fxtr_path, "r") as fxtr_file:
upload_fixture(fxtr_file)
def read_fixtures(self, fxtr_names):
data = []
for fxtr_path in self.fxtr_paths_by_names(fxtr_names):
with open(fxtr_path, "r") as fxtr_file:
try:
data.extend(load_fixture(fxtr_file))
except Exception as exc:
logger.error(
'Error "%s" occurred while loading '
'fixture %s' % (exc, fxtr_path)
)
return data
def fxtr_paths_by_names(self, fxtr_names):
for fxtr in fxtr_names:
for ext in ['json', 'yaml']:
fxtr_path = os.path.join(
self.fixture_dir,
"%s.%s" % (fxtr, ext)
)
if os.path.exists(fxtr_path):
logger.debug(
"Fixture file is found, yielding path: %s",
fxtr_path
)
yield fxtr_path
break
else:
logger.warning(
"Fixture file was not found: %s",
fxtr
)
def find_item_by_pk_model(self, data, pk, model):
for item in data:
if item.get('pk') == pk and item.get('model') == model:
return item
def launch_provisioning_selected(self, nodes_uids=None):
if self.clusters:
if not nodes_uids:
nodes_uids = [n.uid for n in self.clusters[0].nodes]
action_url = reverse(
'ProvisionSelectedNodes',
kwargs={'cluster_id': self.clusters[0].id}
) + '?nodes={0}'.format(','.join(nodes_uids))
resp = self.app.put(
action_url,
'{}',
headers=self.default_headers,
expect_errors=True
)
self.tester.assertEqual(202, resp.status_code)
response = resp.json_body
return self.db.query(Task).filter_by(
uuid=response['uuid']
).first()
else:
raise NotImplementedError(
"Nothing to provision - try creating cluster"
)
def launch_deployment(self):
if self.clusters:
resp = self.app.put(
reverse(
'ClusterChangesHandler',
kwargs={'cluster_id': self.clusters[0].id}),
headers=self.default_headers)
return self.db.query(Task).filter_by(
uuid=resp.json_body['uuid']
).first()
else:
raise NotImplementedError(
"Nothing to deploy - try creating cluster"
)
def stop_deployment(self):
if self.clusters:
resp = self.app.put(
reverse(
'ClusterStopDeploymentHandler',
kwargs={'cluster_id': self.clusters[0].id}),
expect_errors=True,
headers=self.default_headers)
return self.db.query(Task).filter_by(
uuid=resp.json_body['uuid']
).first()
else:
raise NotImplementedError(
"Nothing to stop - try creating cluster"
)
def reset_environment(self, expect_http=202):
if self.clusters:
resp = self.app.put(
reverse(
'ClusterResetHandler',
kwargs={'cluster_id': self.clusters[0].id}),
expect_errors=True,
headers=self.default_headers)
self.tester.assertEqual(resp.status_code, expect_http)
if not str(expect_http).startswith("2"):
return resp.body
return self.db.query(Task).filter_by(
uuid=resp.json_body['uuid']
).first()
else:
raise NotImplementedError(
"Nothing to reset - try creating cluster"
)
def delete_environment(self, expect_http=202):
if self.clusters:
resp = self.app.delete(
reverse(
'ClusterHandler',
kwargs={'obj_id': self.clusters[0].id}),
expect_errors=True,
headers=self.default_headers)
self.tester.assertEqual(resp.status_code, expect_http)
if not str(expect_http).startswith("2"):
return resp.body
return self.db.query(Task).filter_by(
name=consts.TASK_NAMES.cluster_deletion
).first()
else:
raise NotImplementedError(
"Nothing to delete - try creating cluster"
)
def update_environment(self, pending_release_id=None, expect_http=202):
if self.clusters:
if not pending_release_id:
pending_release_id = self.clusters[0].release_id
self.clusters[0].pending_release_id = pending_release_id
self.db.commit()
resp = self.app.put(
reverse(
'ClusterUpdateHandler',
kwargs={'cluster_id': self.clusters[0].id}),
expect_errors=True,
headers=self.default_headers)
self.tester.assertEqual(expect_http, resp.status_code)
if not str(expect_http).startswith("2"):
return resp.body
return self.db.query(Task).filter_by(
name=consts.TASK_NAMES.update
).first()
else:
raise NotImplementedError(
"Nothing to update - try creating cluster"
)
def launch_verify_networks(self, data=None, expect_errors=False):
if self.clusters:
net_urls = {
"nova_network": {
"config": "NovaNetworkConfigurationHandler",
"verify": "NovaNetworkConfigurationVerifyHandler"
},
"neutron": {
"config": "NeutronNetworkConfigurationHandler",
"verify": "NeutronNetworkConfigurationVerifyHandler"
}
}
provider = self.clusters[0].net_provider
if data:
nets = jsonutils.dumps(data)
else:
resp = self.app.get(
reverse(
net_urls[provider]["config"],
kwargs={'cluster_id': self.clusters[0].id}
),
headers=self.default_headers
)
self.tester.assertEqual(200, resp.status_code)
nets = resp.body
resp = self.app.put(
reverse(
net_urls[provider]["verify"],
kwargs={'cluster_id': self.clusters[0].id}),
nets,
headers=self.default_headers,
expect_errors=expect_errors,
)
if expect_errors:
return resp
else:
task_uuid = resp.json_body['uuid']
return self.db.query(Task).filter_by(uuid=task_uuid).first()
else:
raise NotImplementedError(
"Nothing to verify - try creating cluster"
)
def make_bond_via_api(self, bond_name, bond_mode, nic_names, node_id=None,
bond_properties=None, interface_properties=None):
if not node_id:
node_id = self.nodes[0]["id"]
resp = self.app.get(
reverse("NodeNICsHandler",
kwargs={"node_id": node_id}),
headers=self.default_headers)
self.tester.assertEqual(resp.status_code, 200)
data = resp.json_body
nics = self.db.query(NodeNICInterface).filter(
NodeNICInterface.name.in_(nic_names)
).filter(
NodeNICInterface.node_id == node_id
)
self.tester.assertEqual(nics.count(), len(nic_names))
assigned_nets, slaves = [], []
for nic in data:
if nic['name'] in nic_names:
assigned_nets.extend(nic['assigned_networks'])
slaves.append({'name': nic['name']})
nic['assigned_networks'] = []
bond_dict = {
"name": bond_name,
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": bond_mode,
"slaves": slaves,
"assigned_networks": assigned_nets
}
if bond_properties:
bond_dict["bond_properties"] = bond_properties
if interface_properties:
bond_dict["interface_properties"] = interface_properties
data.append(bond_dict)
resp = self.node_nics_put(node_id, data)
self.tester.assertEqual(resp.status_code, 200)
def refresh_nodes(self):
for n in self.nodes[:]:
try:
self.db.add(n)
self.db.refresh(n)
except Exception:
self.nodes.remove(n)
self.db.flush()
def refresh_clusters(self):
for n in self.clusters[:]:
try:
self.db.refresh(n)
except Exception:
self.nodes.remove(n)
def _wait_task(self, task, timeout, message):
timer = time.time()
while task.status == 'running':
self.db.refresh(task)
if time.time() - timer > timeout:
raise Exception(
"Task '{0}' seems to be hanged".format(
task.name
)
)
time.sleep(1)
self.tester.assertEqual(task.progress, 100)
if isinstance(message, type(re.compile("regexp"))):
self.tester.assertIsNotNone(re.match(message, task.message))
elif isinstance(message, str):
self.tester.assertEqual(task.message, message)
def wait_ready(self, task, timeout=60, message=None):
self._wait_task(task, timeout, message)
self.tester.assertEqual(task.status, 'ready')
def wait_error(self, task, timeout=60, message=None):
self._wait_task(task, timeout, message)
self.tester.assertEqual(task.status, 'error')
def wait_for_nodes_status(self, nodes, status):
def check_statuses():
self.refresh_nodes()
nodes_with_status = filter(
lambda x: x.status in status,
nodes)
return len(nodes) == len(nodes_with_status)
self.wait_for_true(
check_statuses,
error_message='Something wrong with the statuses')
def wait_for_true(self, check, args=[], kwargs={},
timeout=60, error_message='Timeout error'):
start_time = time.time()
while True:
result = check(*args, **kwargs)
if result:
return result
if time.time() - start_time > timeout:
raise TimeoutError(error_message)
time.sleep(0.1)
def _api_get(self, method, instance_id, expect_errors=False):
return self.app.get(
reverse(method,
kwargs=instance_id),
headers=self.default_headers,
expect_errors=expect_errors)
def _api_put(self, method, instance_id, data, expect_errors=False):
return self.app.put(
reverse(method,
kwargs=instance_id),
jsonutils.dumps(data),
headers=self.default_headers,
expect_errors=expect_errors)
def nova_networks_get(self, cluster_id, expect_errors=False):
return self._api_get('NovaNetworkConfigurationHandler',
{'cluster_id': cluster_id},
expect_errors)
def nova_networks_put(self, cluster_id, networks, expect_errors=False):
return self._api_put('NovaNetworkConfigurationHandler',
{'cluster_id': cluster_id},
networks,
expect_errors)
def neutron_networks_get(self, cluster_id, expect_errors=False):
return self._api_get('NeutronNetworkConfigurationHandler',
{'cluster_id': cluster_id},
expect_errors)
def neutron_networks_put(self, cluster_id, networks, expect_errors=False):
return self._api_put('NeutronNetworkConfigurationHandler',
{'cluster_id': cluster_id},
networks,
expect_errors)
def cluster_changes_put(self, cluster_id, expect_errors=False):
return self._api_put('ClusterChangesHandler',
{'cluster_id': cluster_id},
[],
expect_errors)
def node_nics_get(self, node_id, expect_errors=False):
return self._api_get('NodeNICsHandler',
{'node_id': node_id},
expect_errors)
def node_nics_put(self, node_id, interfaces, expect_errors=False):
return self._api_put('NodeNICsHandler',
{'node_id': node_id},
interfaces,
expect_errors)
def node_collection_nics_put(self, nodes,
expect_errors=False):
return self._api_put('NodeCollectionNICsHandler',
{},
nodes,
expect_errors)
def _create_network_group(self, expect_errors=False, cluster=None,
**kwargs):
if not cluster:
cluster = self.clusters[0]
ng = {
"release": cluster.release.id,
"name": "external",
"vlan_start": 50,
"cidr": "10.3.0.0/24",
"gateway": "10.3.0.1",
"group_id": Cluster.get_default_group(cluster).id,
"meta": {
"notation": consts.NETWORK_NOTATION.cidr,
"map_priority": 2}
}
ng.update(kwargs)
resp = self.app.post(
reverse('NetworkGroupCollectionHandler'),
jsonutils.dumps(ng),
headers=self.default_headers,
expect_errors=expect_errors,
)
return resp
def _update_network_group(self, ng_data, expect_errors=False):
return self.app.put(
reverse(
'NetworkGroupHandler',
kwargs={'obj_id': ng_data['id']}
),
jsonutils.dumps(ng_data),
headers=self.default_headers,
expect_errors=expect_errors
)
class BaseTestCase(TestCase):
fixtures = ['admin_network', 'master_node_settings']
def __init__(self, *args, **kwargs):
super(BaseTestCase, self).__init__(*args, **kwargs)
self.default_headers = {
"Content-Type": "application/json"
}
@classmethod
def setUpClass(cls):
cls.app = app.TestApp(
build_app(db_driver=test_db_driver).wsgifunc(
ConnectionMonitorMiddleware)
)
syncdb()
@classmethod
def _set_up_check_repo_patcher(cls):
resp_mock = mock.Mock()
resp_mock.status_code = 200
resp_mock.url = ''
responses_mock = mock.Mock(return_value=[resp_mock])
cls.repo_check_patcher = mock.patch(
('nailgun.task.task.CheckRepositoryConnectionFromMasterNodeTask'
'._get_responses'),
new=responses_mock
)
cls.repo_check_patcher.start()
def setUp(self):
self.db = db
flush()
self.env = EnvironmentManager(app=self.app, session=self.db)
self.env.upload_fixtures(self.fixtures)
def tearDown(self):
self.db.remove()
def assertNotRaises(self, exception, method, *args, **kwargs):
try:
method(*args, **kwargs)
except exception:
self.fail('Exception "{0}" raised.'.format(exception))
def assertRaisesWithMessage(self, exc, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.assertFail()
except Exception as inst:
self.assertIsInstance(inst, exc)
self.assertEqual(inst.message, msg)
def assertValidJSON(self, data):
self.assertNotRaises(ValueError, jsonutils.loads, data)
def datadiff(self, node1, node2, path=None, ignore_keys=[],
compare_sorted=False):
if path is None:
path = []
def fail(msg, failed_path):
self.fail('Path "{0}": {1}'.format("->".join(failed_path), msg))
if not isinstance(node1, dict) or not isinstance(node2, dict):
if isinstance(node1, (list, tuple)):
newpath = path[:]
if compare_sorted:
node1 = sorted(node1)
node2 = sorted(node2)
for i, keys in enumerate(izip(node1, node2)):
newpath.append(str(i))
self.datadiff(keys[0], keys[1], newpath, ignore_keys,
compare_sorted)
newpath.pop()
elif node1 != node2:
err = "Values differ: {0} != {1}".format(
str(node1),
str(node2)
)
fail(err, path)
else:
newpath = path[:]
if len(node1) != len(node2):
fail('Nodes have different keys number: {0} != {1}'.format(
len(node1), len(node2)), path)
for key1, key2 in zip(
sorted(node1),
sorted(node2)
):
if key1 != key2:
err = "Keys differ: {0} != {1}".format(
str(key1),
str(key2)
)
fail(err, path)
if key1 in ignore_keys:
continue
newpath.append(key1)
self.datadiff(node1[key1], node2[key2], newpath, ignore_keys,
compare_sorted)
newpath.pop()
class BaseIntegrationTest(BaseTestCase):
def tearDown(self):
self._wait_for_threads()
super(BaseIntegrationTest, self).tearDown()
@classmethod
def setUpClass(cls):
super(BaseIntegrationTest, cls).setUpClass()
nailgun.task.task.logs_utils.prepare_syslog_dir = mock.Mock()
cls._set_up_check_repo_patcher()
@classmethod
def tearDownClass(cls):
cls.repo_check_patcher.stop()
super(BaseIntegrationTest, cls).tearDownClass()
def _wait_for_threads(self):
# wait for fake task thread termination
import threading
for thread in threading.enumerate():
if thread is not threading.currentThread():
if hasattr(thread, "rude_join"):
timer = time.time()
timeout = 25
thread.rude_join(timeout)
if time.time() - timer > timeout:
raise Exception(
'{0} seconds is not enough'
' - possible hanging'.format(
timeout
)
)
class BaseAuthenticationIntegrationTest(BaseIntegrationTest):
@classmethod
def setUpClass(cls):
super(BaseAuthenticationIntegrationTest, cls).setUpClass()
cls.app = app.TestApp(build_app(db_driver=test_db_driver).wsgifunc(
ConnectionMonitorMiddleware, NailgunFakeKeystoneAuthMiddleware))
syncdb()
def get_auth_token(self):
resp = self.app.post(
'/keystone/v2.0/tokens',
jsonutils.dumps({
'auth': {
'tenantName': 'admin',
'passwordCredentials': {
'username': settings.FAKE_KEYSTONE_USERNAME,
'password': settings.FAKE_KEYSTONE_PASSWORD,
},
},
})
)
return resp.json['access']['token']['id'].encode('utf-8')
class BaseUnitTest(TestCase):
pass
def fake_tasks(fake_rpc=True,
mock_rpc=True,
tick_count=100,
tick_interval=0,
**kwargs):
def wrapper(func):
func = mock.patch(
'nailgun.task.task.settings.FAKE_TASKS',
True
)(func)
func = mock.patch(
'nailgun.task.fake.settings.FAKE_TASKS_TICK_COUNT',
tick_count
)(func)
func = mock.patch(
'nailgun.task.fake.settings.FAKE_TASKS_TICK_INTERVAL',
tick_interval
)(func)
if fake_rpc:
func = mock.patch(
'nailgun.task.task.rpc.cast',
partial(
nailgun.task.task.fake_cast,
**kwargs
)
)(func)
elif mock_rpc:
func = mock.patch(
'nailgun.task.task.rpc.cast',
**kwargs
)(func)
return func
return wrapper
# this method is for development and troubleshooting purposes
def datadiff(data1, data2, branch, p=True):
def iterator(data1, data2):
if isinstance(data1, (list,)) and isinstance(data2, (list,)):
return xrange(max(len(data1), len(data2)))
elif isinstance(data1, (dict,)) and isinstance(data2, (dict,)):
return (set(data1.keys()) | set(data2.keys()))
else:
raise TypeError
diff = []
if data1 != data2:
try:
it = iterator(data1, data2)
except Exception:
return [(branch, data1, data2)]
for k in it:
newbranch = branch[:]
newbranch.append(k)
if p:
print("Comparing branch: %s" % newbranch)
try:
try:
v1 = data1[k]
except (KeyError, IndexError):
if p:
print("data1 seems does not have key = %s" % k)
diff.append((newbranch, None, data2[k]))
continue
try:
v2 = data2[k]
except (KeyError, IndexError):
if p:
print("data2 seems does not have key = %s" % k)
diff.append((newbranch, data1[k], None))
continue
except Exception:
if p:
print("data1 and data2 cannot be compared on "
"branch: %s" % newbranch)
return diff.append((newbranch, data1, data2))
else:
if v1 != v2:
if p:
print("data1 and data2 do not match "
"each other on branch: %s" % newbranch)
# print("data1 = %s" % data1)
print("v1 = %s" % v1)
# print("data2 = %s" % data2)
print("v2 = %s" % v2)
diff.extend(datadiff(v1, v2, newbranch))
return diff
def reflect_db_metadata():
meta = sa.MetaData()
meta.reflect(bind=db.get_bind())
return meta
def get_nodegroup_network_schema_template(template, group_name):
custom_template = template['adv_net_template'][group_name]
custom_template_obj = NetworkTemplate(jsonutils.dumps(custom_template))
node_custom_template = custom_template_obj.safe_substitute(
custom_template['nic_mapping']['default'])
return jsonutils.loads(node_custom_template)['network_scheme']
class BaseAlembicMigrationTest(TestCase):
def setUp(self):
super(BaseAlembicMigrationTest, self).setUp()
self.meta = reflect_db_metadata()
def tearDown(self):
db.remove()
super(BaseAlembicMigrationTest, self).tearDown()
class BaseMasterNodeSettignsTest(BaseIntegrationTest):
def setUp(self):
super(BaseMasterNodeSettignsTest, self).setUp()
self.create_master_node_settings()
master_node_settings_template = {
"settings": {
"statistics": {
"send_anonymous_statistic": {
"type": "checkbox",
"value": True,
"label": "statistics.setting_labels."
"send_anonymous_statistic",
"weight": 10
},
"send_user_info": {
"type": "checkbox",
"value": False,
"label": "statistics.setting_labels.send_user_info",
"weight": 20,
"restrictions": [
"fuel_settings:statistics."
"send_anonymous_statistic.value == false",
{
"condition":
"not ('mirantis' in version:feature_groups)",
"action": "hide"
}
]
},
"name": {
"type": "text",
"value": "",
"label": "statistics.setting_labels.name",
"weight": 30,
"regex": {
"source": "\S",
"error": "statistics.errors.name"
},
"restrictions": [
"fuel_settings:statistics."
"send_anonymous_statistic.value == false",
"fuel_settings:statistics."
"send_user_info.value == false",
{
"condition":
"not ('mirantis' in version:feature_groups)",
"action": "hide"
}
]
},
"email": {
"type": "text",
"value": "",
"label": "statistics.setting_labels.email",
"weight": 40,
"regex": {
"source": "\S",
"error": "statistics.errors.email"
},
"restrictions": [
"fuel_settings:statistics."
"send_anonymous_statistic.value == false",
"fuel_settings:statistics."
"send_user_info.value == false",
{
"condition":
"not ('mirantis' in version:feature_groups)",
"action": "hide"
}
]
},
"company": {
"type": "text",
"value": "",
"label": "statistics.setting_labels.company",
"weight": 50,
"regex": {
"source": "\S",
"error": "statistics.errors.company"
},
"restrictions": [
"fuel_settings:statistics."
"send_anonymous_statistic.value == false",
"fuel_settings:statistics."
"send_user_info.value == false",
{
"condition":
"not ('mirantis' in version:feature_groups)",
"action": "hide"
}
]
},
"user_choice_saved": {
"type": "hidden",
"value": False
}
}
}
}
def create_master_node_settings(self):
self.master_node_settings = {
'master_node_uid': str(uuid.uuid4()),
}
self.master_node_settings.update(self.master_node_settings_template)
MasterNodeSettings.create(self.master_node_settings)
self.db.commit()
class BaseValidatorTest(TestCase):
"""JSON-schema validation policy:
1) All required properties are present;
2) No additional properties allowed;
3) Item has correct type.
"""
validator = None
def serialize(self, data):
"""Serialize object to a string.
:param data: object being serialized
:return: stringified JSON-object
"""
return jsonutils.dumps(data)
def get_invalid_data_context(self, data, *args):
"""Returns context object of raised InvalidData exception.
:return: context of 'errors.InvalidData'
"""
serialized_data = self.serialize(data)
with self.assertRaises(errors.InvalidData) as context:
self.validator(serialized_data, *args)
return context
def assertRaisesAdditionalProperty(self, obj, key):
context = self.get_invalid_data_context(obj)
self.assertIn(
"Additional properties are not allowed".format(key),
context.exception.message)
self.assertIn(
"'{0}' was unexpected".format(key),
context.exception.message)
def assertRaisesRequiredProperty(self, obj, key):
context = self.get_invalid_data_context(obj)
self.assertIn(
"Failed validating 'required' in schema",
context.exception.message)
self.assertIn(
"'{0}' is a required property".format(key),
context.exception.message)
def assertRaisesInvalidType(self, obj, value, expected_value):
context = self.get_invalid_data_context(obj)
self.assertIn(
"Failed validating 'type' in schema",
context.exception.message)
self.assertIn(
"{0} is not of type {1}".format(value, expected_value),
context.exception.message)
def assertRaisesInvalidAnyOf(self, obj, passed_value, instance):
context = self.get_invalid_data_context(obj)
self.assertIn(
"Failed validating 'anyOf' in schema",
context.exception.message)
err_msg = "{0} is not valid under any of the given schemas"
self.assertIn(
err_msg.format(passed_value),
context.exception.message)
self.assertIn(
"On instance{0}".format(instance),
context.exception.message)
def assertRaisesInvalidEnum(self, obj, value, expected_value):
context = self.get_invalid_data_context(obj)
self.assertIn(
"Failed validating 'enum' in schema",
context.exception.message)
self.assertIn(
"{0} is not one of {1}".format(value, expected_value),
context.exception.message)
def assertRaisesTooLong(self, obj, stringified_values):
context = self.get_invalid_data_context(obj)
self.assertIn(
"{0} is too long".format(stringified_values),
context.exception.message)
def assertRaisesTooShort(self, obj, stringified_values):
context = self.get_invalid_data_context(obj)
self.assertIn(
"{0} is too short".format(stringified_values),
context.exception.message)
def assertRaisesNonUnique(self, obj, stringified_values):
context = self.get_invalid_data_context(obj)
self.assertIn(
"{0} has non-unique elements".format(stringified_values),
context.exception.message)
def assertRaisesLessThanMinimum(self, obj, stringified_values):
context = self.get_invalid_data_context(obj)
self.assertIn(
"{0} is less than the minimum".format(stringified_values),
context.exception.message)
def assertRaisesGreaterThanMaximum(self, obj, stringified_values):
context = self.get_invalid_data_context(obj)
self.assertIn(
"{0} is greater than the maximum".format(stringified_values),
context.exception.message)
def assertRaisesNotMatchPattern(self, obj, stringified_values):
context = self.get_invalid_data_context(obj)
self.assertIn(
"Failed validating 'pattern' in schema",
context.exception.message)
self.assertIn(
"{0} does not match".format(stringified_values),
context.exception.message)
|
SmartInfrastructures/fuel-web-dev
|
nailgun/nailgun/test/base.py
|
Python
|
apache-2.0
| 56,661
|
# -*- coding: utf-8 -*-
"""
The module :mod:`openerp.tests.common` provides unittest2 test cases and a few
helpers and classes to write tests.
"""
import errno
import glob
import json
import logging
import os
import select
import subprocess
import threading
import time
import unittest2
import urllib2
import xmlrpclib
from contextlib import contextmanager
from datetime import datetime, timedelta
import werkzeug
import openerp
from openerp import api
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
# The openerp library is supposed already configured.
ADDONS_PATH = openerp.tools.config['addons_path']
HOST = '127.0.0.1'
PORT = openerp.tools.config['xmlrpc_port']
# Useless constant, tests are aware of the content of demo data
ADMIN_USER_ID = openerp.SUPERUSER_ID
def get_db_name():
db = openerp.tools.config['db_name']
# If the database name is not provided on the command-line,
# use the one on the thread (which means if it is provided on
# the command-line, this will break when installing another
# database from XML-RPC).
if not db and hasattr(threading.current_thread(), 'dbname'):
return threading.current_thread().dbname
return db
def at_install(flag):
""" Sets the at-install state of a test, the flag is a boolean specifying
whether the test should (``True``) or should not (``False``) run during
module installation.
By default, tests are run right after installing the module, before
starting the installation of the next module.
"""
def decorator(obj):
obj.at_install = flag
return obj
return decorator
def post_install(flag):
""" Sets the post-install state of a test. The flag is a boolean
specifying whether the test should or should not run after a set of
module installations.
By default, tests are *not* run after installation of all modules in the
current installation set.
"""
def decorator(obj):
obj.post_install = flag
return obj
return decorator
class BaseCase(unittest2.TestCase):
"""
Subclass of TestCase for common OpenERP-specific code.
This class is abstract and expects self.registry, self.cr and self.uid to be
initialized by subclasses.
"""
def cursor(self):
return self.registry.cursor()
def ref(self, xid):
""" Returns database ID for the provided :term:`external identifier`,
shortcut for ``get_object_reference``
:param xid: fully-qualified :term:`external identifier`, in the form
:samp:`{module}.{identifier}`
:raise: ValueError if not found
:returns: registered id
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
_, id = self.registry('ir.model.data').get_object_reference(self.cr, self.uid, module, xid)
return id
def browse_ref(self, xid):
""" Returns a record object for the provided
:term:`external identifier`
:param xid: fully-qualified :term:`external identifier`, in the form
:samp:`{module}.{identifier}`
:raise: ValueError if not found
:returns: :class:`~openerp.models.BaseModel`
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
return self.registry('ir.model.data').get_object(self.cr, self.uid, module, xid)
@contextmanager
def _assertRaises(self, exception):
""" Context manager that clears the environment upon failure. """
with super(BaseCase, self).assertRaises(exception) as cm:
with self.env.clear_upon_failure():
yield cm
def assertRaises(self, exception, func=None, *args, **kwargs):
if func:
with self._assertRaises(exception):
func(*args, **kwargs)
else:
return self._assertRaises(exception)
class TransactionCase(BaseCase):
""" TestCase in which each test method is run in its own transaction,
and with its own cursor. The transaction is rolled back and the cursor
is closed after each test.
"""
def setUp(self):
self.registry = RegistryManager.get(get_db_name())
#: current transaction's cursor
self.cr = self.cursor()
self.uid = openerp.SUPERUSER_ID
#: :class:`~openerp.api.Environment` for the current test case
self.env = api.Environment(self.cr, self.uid, {})
def tearDown(self):
# rollback and close the cursor, and reset the environments
self.env.reset()
self.cr.rollback()
self.cr.close()
class SingleTransactionCase(BaseCase):
""" TestCase in which all test methods are run in the same transaction,
the transaction is started with the first test method and rolled back at
the end of the last.
"""
@classmethod
def setUpClass(cls):
cls.registry = RegistryManager.get(get_db_name())
cls.cr = cls.registry.cursor()
cls.uid = openerp.SUPERUSER_ID
cls.env = api.Environment(cls.cr, cls.uid, {})
@classmethod
def tearDownClass(cls):
# rollback and close the cursor, and reset the environments
cls.env.reset()
cls.cr.rollback()
cls.cr.close()
class RedirectHandler(urllib2.HTTPRedirectHandler):
"""
HTTPRedirectHandler is predicated upon HTTPErrorProcessor being used and
works by intercepting 3xy "errors".
Inherit from it to handle 3xy non-error responses instead, as we're not
using the error processor
"""
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if 300 <= code < 400:
return self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HttpCase(TransactionCase):
""" Transactional HTTP TestCase with url_open and phantomjs helpers.
"""
def __init__(self, methodName='runTest'):
super(HttpCase, self).__init__(methodName)
# v8 api with correct xmlrpc exception handling.
self.xmlrpc_url = url_8 = 'http://%s:%d/xmlrpc/2/' % (HOST, PORT)
self.xmlrpc_common = xmlrpclib.ServerProxy(url_8 + 'common')
self.xmlrpc_db = xmlrpclib.ServerProxy(url_8 + 'db')
self.xmlrpc_object = xmlrpclib.ServerProxy(url_8 + 'object')
def setUp(self):
super(HttpCase, self).setUp()
self.registry.enter_test_mode()
# setup a magic session_id that will be rollbacked
self.session = openerp.http.root.session_store.new()
self.session_id = self.session.sid
self.session.db = get_db_name()
openerp.http.root.session_store.save(self.session)
# setup an url opener helper
self.opener = urllib2.OpenerDirector()
self.opener.add_handler(urllib2.UnknownHandler())
self.opener.add_handler(urllib2.HTTPHandler())
self.opener.add_handler(urllib2.HTTPSHandler())
self.opener.add_handler(urllib2.HTTPCookieProcessor())
self.opener.add_handler(RedirectHandler())
self.opener.addheaders.append(('Cookie', 'session_id=%s' % self.session_id))
def tearDown(self):
self.registry.leave_test_mode()
super(HttpCase, self).tearDown()
def url_open(self, url, data=None, timeout=10):
if url.startswith('/'):
url = "http://localhost:%s%s" % (PORT, url)
return self.opener.open(url, data, timeout)
def authenticate(self, user, password):
if user is not None:
url = '/login?%s' % werkzeug.urls.url_encode({'db': get_db_name(),'login': user, 'key': password})
auth = self.url_open(url)
assert auth.getcode() < 400, "Auth failure %d" % auth.getcode()
def phantom_poll(self, phantom, timeout):
""" Phantomjs Test protocol.
Use console.log in phantomjs to output test results:
- for a success: console.log("ok")
- for an error: console.log("error")
Other lines are relayed to the test log.
"""
t0 = datetime.now()
td = timedelta(seconds=timeout)
buf = bytearray()
while True:
# timeout
self.assertLess(datetime.now() - t0, td,
"PhantomJS tests should take less than %s seconds" % timeout)
# read a byte
try:
ready, _, _ = select.select([phantom.stdout], [], [], 0.5)
except select.error, e:
# In Python 2, select.error has no relation to IOError or
# OSError, and no errno/strerror/filename, only a pair of
# unnamed arguments (matching errno and strerror)
err, _ = e.args
if err == errno.EINTR:
continue
raise
if ready:
s = phantom.stdout.read(1)
if not s:
break
buf.append(s)
# process lines
if '\n' in buf:
line, buf = buf.split('\n', 1)
line = str(line)
# relay everything from console.log, even 'ok' or 'error...' lines
_logger.info("phantomjs: %s", line)
if line == "ok":
break
if line.startswith("error"):
line_ = line[6:]
# when error occurs the execution stack may be sent as as JSON
try:
line_ = json.loads(line_)
except ValueError:
pass
self.fail(line_ or "phantomjs test failed")
def phantom_run(self, cmd, timeout):
_logger.info('phantom_run executing %s', ' '.join(cmd))
ls_glob = os.path.expanduser('~/.qws/share/data/Ofi Labs/PhantomJS/http_localhost_%s.*'%PORT)
for i in glob.glob(ls_glob):
_logger.info('phantomjs unlink localstorage %s', i)
os.unlink(i)
try:
phantom = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=None)
except OSError:
raise unittest2.SkipTest("PhantomJS not found")
try:
self.phantom_poll(phantom, timeout)
finally:
# kill phantomjs if phantom.exit() wasn't called in the test
if phantom.poll() is None:
phantom.terminate()
phantom.wait()
self._wait_remaining_requests()
_logger.info("phantom_run execution finished")
def _wait_remaining_requests(self):
t0 = int(time.time())
for thread in threading.enumerate():
if thread.name.startswith('openerp.service.http.request.'):
while thread.isAlive():
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
t1 = int(time.time())
if t0 != t1:
_logger.info('remaining requests')
openerp.tools.misc.dumpstacks()
t0 = t1
def phantom_jsfile(self, jsfile, timeout=60, **kw):
options = {
'timeout' : timeout,
'port': PORT,
'db': get_db_name(),
'session_id': self.session_id,
}
options.update(kw)
phantomtest = os.path.join(os.path.dirname(__file__), 'phantomtest.js')
# phantom.args[0] == phantomtest path
# phantom.args[1] == options
cmd = [
'phantomjs',
jsfile, phantomtest, json.dumps(options)
]
self.phantom_run(cmd, timeout)
def phantom_js(self, url_path, code, ready="window", login=None, timeout=60, **kw):
""" Test js code running in the browser
- optionnally log as 'login'
- load page given by url_path
- wait for ready object to be available
- eval(code) inside the page
To signal success test do:
console.log('ok')
To signal failure do:
console.log('error')
If neither are done before timeout test fails.
"""
options = {
'port': PORT,
'db': get_db_name(),
'url_path': url_path,
'code': code,
'ready': ready,
'timeout' : timeout,
'login' : login,
'session_id': self.session_id,
}
options.update(kw)
options.setdefault('password', options.get('login'))
phantomtest = os.path.join(os.path.dirname(__file__), 'phantomtest.js')
cmd = ['phantomjs', phantomtest, json.dumps(options)]
self.phantom_run(cmd, timeout)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
numerigraphe/odoo
|
openerp/tests/common.py
|
Python
|
agpl-3.0
| 13,126
|
"""
dj-stripe SubscriptionSchedule model tests.
"""
from copy import deepcopy
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from djstripe.enums import SubscriptionScheduleStatus
from djstripe.models import SubscriptionSchedule
from . import (
FAKE_CUSTOMER_II,
FAKE_SUBSCRIPTION_SCHEDULE,
AssertStripeFksMixin,
datetime_to_unix,
)
class SubscriptionScheduleTest(AssertStripeFksMixin, TestCase):
def setUp(self):
user = get_user_model().objects.create_user(
username="pydanny", email="pydanny@gmail.com"
)
self.customer = FAKE_CUSTOMER_II.create_for_user(user)
self.default_expected_blank_fks = {
"djstripe.Customer.coupon",
"djstripe.Customer.default_payment_method",
"djstripe.SubscriptionSchedule.released_subscription",
}
@patch(
"stripe.Customer.retrieve",
return_value=deepcopy(FAKE_CUSTOMER_II),
autospec=True,
)
def test_sync_from_stripe_data(self, customer_retrieve_mock):
canceled_schedule_fake = deepcopy(FAKE_SUBSCRIPTION_SCHEDULE)
canceled_schedule_fake["canceled_at"] = 1624553655
canceled_schedule_fake["status"] = SubscriptionScheduleStatus.canceled
schedule = SubscriptionSchedule.sync_from_stripe_data(canceled_schedule_fake)
self.assert_fks(schedule, expected_blank_fks=self.default_expected_blank_fks)
self.assertEqual(datetime_to_unix(schedule.canceled_at), 1624553655)
@patch(
"stripe.Customer.retrieve",
return_value=deepcopy(FAKE_CUSTOMER_II),
autospec=True,
)
def test___str__(self, customer_retrieve_mock):
schedule = SubscriptionSchedule.sync_from_stripe_data(
deepcopy(FAKE_SUBSCRIPTION_SCHEDULE)
)
self.assertEqual(f"<id={FAKE_SUBSCRIPTION_SCHEDULE['id']}>", str(schedule))
self.assert_fks(schedule, expected_blank_fks=self.default_expected_blank_fks)
|
pydanny/dj-stripe
|
tests/test_subscription_schedule.py
|
Python
|
bsd-3-clause
| 2,029
|
# Copyright (c) 2014 Alcatel-Lucent Enterprise
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from facette.connection import *
from facette.utils import *
from facette.v1.collection import Collection
import json
class Collections:
def __init__(self, c):
self.root = "/api/v1/library/collections/"
self.c = c
def list(self, exclude=None, filter=None, limit=None, offset=None, parent=None):
payload = {}
payload_add(payload, 'exclude', exclude)
payload_add(payload, 'filter', filter)
payload_add(payload, 'limit', limit)
payload_add(payload, 'offset', offset)
payload_add(payload, 'parent', parent)
code, js = self.c.get(self.root, payload)
collections = []
if code == RESULT_OK:
for x in js:
c = Collection(x)
collections.append(c)
return collections
def get(self, name):
code, js = self.c.get(self.root + name)
c = None
if code == RESULT_OK:
c = Collection(js)
return c
def add(self, c):
payload = str(c)
code, js = self.c.post(self.root, payload)
return facette_http_status(code, RESULT_CREATED, js)
def update(self, id, c):
payload = str(c)
code, js = self.c.put(self.root + id, payload)
return facette_http_status(code, RESULT_OK, js)
def delete(self, id):
code, js = self.c.delete(self.root + id)
return facette_http_status(code, RESULT_OK, js)
|
OpenTouch/python-facette
|
src/facette/v1/collections.py
|
Python
|
apache-2.0
| 2,084
|
from auth import *
from documents import *
from notes import *
from topics import *
|
CENDARI/editorsnotes
|
editorsnotes/main/models/__init__.py
|
Python
|
agpl-3.0
| 84
|
from django.core.management.base import BaseCommand
from attachments.models import Attachment, get_file_suffix, \
is_img
class Command(BaseCommand):
help = "update attachment's base info."
def handle(self, **options):
attachments = Attachment.objects.all()
for o in attachments:
if o.file.storage.exists(o.file.name):
o.suffix = get_file_suffix(o.file.name)
o.is_img = is_img(o.suffix)
o.file_size = o.file.size
o.save()
else:
o.file_size = -1
o.save()
|
pombredanne/django-lb-attachments
|
attachments/management/commands/update_attachments.py
|
Python
|
bsd-3-clause
| 612
|
import numpy as np
import copy
from pybullet_utils.logger import Logger
import inspect as inspect
from pybullet_envs.deep_mimic.env.env import Env
import pybullet_utils.math_util as MathUtil
class ReplayBuffer(object):
TERMINATE_KEY = 'terminate'
PATH_START_KEY = 'path_start'
PATH_END_KEY = 'path_end'
def __init__(self, buffer_size):
assert buffer_size > 0
self.buffer_size = buffer_size
self.total_count = 0
self.buffer_head = 0
self.buffer_tail = MathUtil.INVALID_IDX
self.num_paths = 0
self._sample_buffers = dict()
self.buffers = None
self.clear()
return
def sample(self, n):
curr_size = self.get_current_size()
assert curr_size > 0
idx = np.empty(n, dtype=int)
# makes sure that the end states are not sampled
for i in range(n):
while True:
curr_idx = np.random.randint(0, curr_size, size=1)[0]
curr_idx += self.buffer_tail
curr_idx = np.mod(curr_idx, self.buffer_size)
if not self.is_path_end(curr_idx):
break
idx[i] = curr_idx
return idx
def sample_filtered(self, n, key):
assert key in self._sample_buffers
curr_buffer = self._sample_buffers[key]
idx = curr_buffer.sample(n)
return idx
def count_filtered(self, key):
curr_buffer = self._sample_buffers[key]
return curr_buffer.count
def get(self, key, idx):
return self.buffers[key][idx]
def get_all(self, key):
return self.buffers[key]
def get_idx_filtered(self, key):
assert key in self._sample_buffers
curr_buffer = self._sample_buffers[key]
idx = curr_buffer.slot_to_idx[:curr_buffer.count]
return idx
def get_path_start(self, idx):
return self.buffers[self.PATH_START_KEY][idx]
def get_path_end(self, idx):
return self.buffers[self.PATH_END_KEY][idx]
def get_pathlen(self, idx):
is_array = isinstance(idx, np.ndarray) or isinstance(idx, list)
if not is_array:
idx = [idx]
n = len(idx)
start_idx = self.get_path_start(idx)
end_idx = self.get_path_end(idx)
pathlen = np.empty(n, dtype=int)
for i in range(n):
curr_start = start_idx[i]
curr_end = end_idx[i]
if curr_start < curr_end:
curr_len = curr_end - curr_start
else:
curr_len = self.buffer_size - curr_start + curr_end
pathlen[i] = curr_len
if not is_array:
pathlen = pathlen[0]
return pathlen
def is_valid_path(self, idx):
start_idx = self.get_path_start(idx)
valid = start_idx != MathUtil.INVALID_IDX
return valid
def store(self, path):
start_idx = MathUtil.INVALID_IDX
n = path.pathlength()
if (n > 0):
assert path.is_valid()
if path.check_vals():
if self.buffers is None:
self._init_buffers(path)
idx = self._request_idx(n + 1)
self._store_path(path, idx)
self._add_sample_buffers(idx)
self.num_paths += 1
self.total_count += n + 1
start_idx = idx[0]
else:
Logger.print2('Invalid path data value detected')
return start_idx
def clear(self):
self.buffer_head = 0
self.buffer_tail = MathUtil.INVALID_IDX
self.num_paths = 0
for key in self._sample_buffers:
self._sample_buffers[key].clear()
return
def get_next_idx(self, idx):
next_idx = np.mod(idx + 1, self.buffer_size)
return next_idx
def is_terminal_state(self, idx):
terminate_flags = self.buffers[self.TERMINATE_KEY][idx]
terminate = terminate_flags != Env.Terminate.Null.value
is_end = self.is_path_end(idx)
terminal_state = np.logical_and(terminate, is_end)
return terminal_state
def check_terminal_flag(self, idx, flag):
terminate_flags = self.buffers[self.TERMINATE_KEY][idx]
terminate = terminate_flags == flag.value
return terminate
def is_path_end(self, idx):
is_end = self.buffers[self.PATH_END_KEY][idx] == idx
return is_end
def add_filter_key(self, key):
assert self.get_current_size() == 0
if key not in self._sample_buffers:
self._sample_buffers[key] = SampleBuffer(self.buffer_size)
return
def get_current_size(self):
if self.buffer_tail == MathUtil.INVALID_IDX:
return 0
elif self.buffer_tail < self.buffer_head:
return self.buffer_head - self.buffer_tail
else:
return self.buffer_size - self.buffer_tail + self.buffer_head
def _check_flags(self, key, flags):
return (flags & key) == key
def _add_sample_buffers(self, idx):
flags = self.buffers['flags']
for key in self._sample_buffers:
curr_buffer = self._sample_buffers[key]
filter_idx = [
i for i in idx if (self._check_flags(key, flags[i]) and not self.is_path_end(i))
]
curr_buffer.add(filter_idx)
return
def _free_sample_buffers(self, idx):
for key in self._sample_buffers:
curr_buffer = self._sample_buffers[key]
curr_buffer.free(idx)
return
def _init_buffers(self, path):
self.buffers = dict()
self.buffers[self.PATH_START_KEY] = MathUtil.INVALID_IDX * np.ones(self.buffer_size, dtype=int)
self.buffers[self.PATH_END_KEY] = MathUtil.INVALID_IDX * np.ones(self.buffer_size, dtype=int)
for key in dir(path):
val = getattr(path, key)
if not key.startswith('__') and not inspect.ismethod(val):
if key == self.TERMINATE_KEY:
self.buffers[self.TERMINATE_KEY] = np.zeros(shape=[self.buffer_size], dtype=int)
else:
val_type = type(val[0])
is_array = val_type == np.ndarray
if is_array:
shape = [self.buffer_size, val[0].shape[0]]
dtype = val[0].dtype
else:
shape = [self.buffer_size]
dtype = val_type
self.buffers[key] = np.zeros(shape, dtype=dtype)
return
def _request_idx(self, n):
assert n + 1 < self.buffer_size # bad things can happen if path is too long
remainder = n
idx = []
start_idx = self.buffer_head
while remainder > 0:
end_idx = np.minimum(start_idx + remainder, self.buffer_size)
remainder -= (end_idx - start_idx)
free_idx = list(range(start_idx, end_idx))
self._free_idx(free_idx)
idx += free_idx
start_idx = 0
self.buffer_head = (self.buffer_head + n) % self.buffer_size
return idx
def _free_idx(self, idx):
assert (idx[0] <= idx[-1])
n = len(idx)
if self.buffer_tail != MathUtil.INVALID_IDX:
update_tail = idx[0] <= idx[-1] and idx[0] <= self.buffer_tail and idx[-1] >= self.buffer_tail
update_tail |= idx[0] > idx[-1] and (idx[0] <= self.buffer_tail or
idx[-1] >= self.buffer_tail)
if update_tail:
i = 0
while i < n:
curr_idx = idx[i]
if self.is_valid_path(curr_idx):
start_idx = self.get_path_start(curr_idx)
end_idx = self.get_path_end(curr_idx)
pathlen = self.get_pathlen(curr_idx)
if start_idx < end_idx:
self.buffers[self.PATH_START_KEY][start_idx:end_idx + 1] = MathUtil.INVALID_IDX
self._free_sample_buffers(list(range(start_idx, end_idx + 1)))
else:
self.buffers[self.PATH_START_KEY][start_idx:self.buffer_size] = MathUtil.INVALID_IDX
self.buffers[self.PATH_START_KEY][0:end_idx + 1] = MathUtil.INVALID_IDX
self._free_sample_buffers(list(range(start_idx, self.buffer_size)))
self._free_sample_buffers(list(range(0, end_idx + 1)))
self.num_paths -= 1
i += pathlen + 1
self.buffer_tail = (end_idx + 1) % self.buffer_size
else:
i += 1
else:
self.buffer_tail = idx[0]
return
def _store_path(self, path, idx):
n = path.pathlength()
for key, data in self.buffers.items():
if key != self.PATH_START_KEY and key != self.PATH_END_KEY and key != self.TERMINATE_KEY:
val = getattr(path, key)
val_len = len(val)
assert val_len == n or val_len == n + 1
data[idx[:val_len]] = val
self.buffers[self.TERMINATE_KEY][idx] = path.terminate.value
self.buffers[self.PATH_START_KEY][idx] = idx[0]
self.buffers[self.PATH_END_KEY][idx] = idx[-1]
return
class SampleBuffer(object):
def __init__(self, size):
self.idx_to_slot = np.empty(shape=[size], dtype=int)
self.slot_to_idx = np.empty(shape=[size], dtype=int)
self.count = 0
self.clear()
return
def clear(self):
self.idx_to_slot.fill(MathUtil.INVALID_IDX)
self.slot_to_idx.fill(MathUtil.INVALID_IDX)
self.count = 0
return
def is_valid(self, idx):
return self.idx_to_slot[idx] != MathUtil.INVALID_IDX
def get_size(self):
return self.idx_to_slot.shape[0]
def add(self, idx):
for i in idx:
if not self.is_valid(i):
new_slot = self.count
assert new_slot >= 0
self.idx_to_slot[i] = new_slot
self.slot_to_idx[new_slot] = i
self.count += 1
return
def free(self, idx):
for i in idx:
if self.is_valid(i):
slot = self.idx_to_slot[i]
last_slot = self.count - 1
last_idx = self.slot_to_idx[last_slot]
self.idx_to_slot[last_idx] = slot
self.slot_to_idx[slot] = last_idx
self.idx_to_slot[i] = MathUtil.INVALID_IDX
self.slot_to_idx[last_slot] = MathUtil.INVALID_IDX
self.count -= 1
return
def sample(self, n):
if self.count > 0:
slots = np.random.randint(0, self.count, size=n)
idx = self.slot_to_idx[slots]
else:
idx = np.empty(shape=[0], dtype=int)
return idx
def check_consistency(self):
valid = True
if self.count < 0:
valid = False
if valid:
for i in range(self.get_size()):
if self.is_valid(i):
s = self.idx_to_slot[i]
if self.slot_to_idx[s] != i:
valid = False
break
s2i = self.slot_to_idx[i]
if s2i != MathUtil.INVALID_IDX:
i2s = self.idx_to_slot[s2i]
if i2s != i:
valid = False
break
count0 = np.sum(self.idx_to_slot == MathUtil.INVALID_IDX)
count1 = np.sum(self.slot_to_idx == MathUtil.INVALID_IDX)
valid &= count0 == count1
return valid
|
MadManRises/Madgine
|
shared/bullet3-2.89/examples/pybullet/gym/pybullet_envs/deep_mimic/learning/replay_buffer.py
|
Python
|
mit
| 10,316
|
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'PeptideShaker-1.16.23.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
java = java_executable()
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
dmaticzka/bioconda-recipes
|
recipes/peptide-shaker/peptide-shaker.py
|
Python
|
mit
| 3,272
|
#!/usr/bin/env python
import shogun as sg
parameter_list=[[".", "features_string_char.py"]]
def features_string_file (directory, fname):
from shogun import StringCharFeatures, RAWBYTE
# load features from directory
f=StringCharFeatures(RAWBYTE)
f.load_from_directory(directory)
#and output several stats
#print("max string length", f.get_max_vector_length())
#print("number of strings", f.get_num_vectors())
#print("length of first string", f.get_vector_length(0))
#print("str[0,0:3]", f.get_feature(0,0), f.get_feature(0,1), f.get_feature(0,2))
#print("len(str[0])", f.get_vector_length(0))
#print("str[0]", f.get_feature_vector(0))
#or load features from file (one string per line)
fil=sg.read_csv(fname)
f.load(fil)
#print(f.get_features())
#or load fasta file
#f.load_fasta('fasta.fa')
#print(f.get_features())
return f.get_string_list(), f
if __name__=='__main__':
print('StringWordFeatures')
features_string_file(*parameter_list[0])
|
shogun-toolbox/shogun
|
examples/undocumented/python/features_string_file.py
|
Python
|
bsd-3-clause
| 966
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import absolute_import
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('fixes', parent_package, top_path)
config.add_subpackage('numpy')
config.add_subpackage('numpy.testing')
config.add_subpackage('nibabel')
config.add_subpackage('scipy')
config.add_subpackage('scipy.ndimage')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
alexis-roche/nipy
|
nipy/fixes/setup.py
|
Python
|
bsd-3-clause
| 656
|
# author : Etienne THIERY
from matgen import *
import random
import numpy
def test_symmetricPositiveDefinite():
for i in range(10):
print(".", end="", flush=True)
size = random.randint(400, 500)
maxVal = random.randint(0, 1000)
M = symmetricPositiveDefinite(size, maxVal)
if not (isSymmetric(M) and isDefinitePositive(M)):
return False
return True
def test_symmetricSparsePositiveDefinite():
for i in range(10):
print(".", end="", flush=True)
size = random.randint(400, 500)
maxVal = random.randint(0, 1000)
nbZeros = random.randint(0, size*(size-1))
M = symmetricSparsePositiveDefinite(size, nbZeros, maxVal)
if not (isSymmetric(M) and isDefinitePositive(M) and abs(numberOfZeros(M)-nbZeros) <= 1):
return False
return True
def numberOfZeros(M):
count = 0
for line in M:
for coeff in line:
if coeff == 0:
count+=1
return count
def printTest(test_func):
print("Testing " + test_func.__name__[5:] + " : ", end="", flush=True)
print(("" if test_func() else "un") + "expected behaviour", flush=True)
printTest(test_symmetricPositiveDefinite)
printTest(test_symmetricSparsePositiveDefinite)
|
ethiery/heat-solver
|
trunk/test_matgen.py
|
Python
|
mit
| 1,289
|
import logging
from collections import OrderedDict
from functools import wraps
from gevent import spawn
import zmq.green as zmq
from frankdux.types import Descriptor, Int, Float, String, Bytes, TypeRegistry
from .exceptions import ArgumentCountException
from .encoding import MessageEncoder
from frankdux.codegen import CodeGen
class Function(object):
name = None
types = None # OrderedDict of key=type pairs, str:type
return_type = None
func = None
def __init__(self, name, types, return_type, func):
self.name = name
self.types = types
# make sure this is list of dicts
self.return_type = return_type
self.func = func
def __call__(self, **kwargs):
return self.func(**kwargs)
class FrankDux(object):
# dict: [function_name] = Function()
registry = None
# zeromq server context
context = None
# TypeRegistry instance
type_registry = None
def __init__(self):
self.registry = {} # str: Function
self.encoder = TypeRegistry()
self.encoder.add_type(Int)
self.encoder.add_type(Float)
def register(self, *args, **kwargs):
"""
Registers a function to be available for RPC
Specify arguments followed by return type
Usage:
# how to set default values?
@app.register(int, returns=bool)
def greater_than_zero(a):
return a > 0
:param args:
:return:
"""
logging.debug("Registering function with args:")
# import ipdb; ipdb.set_trace()
# type upgrades
args = map(upgrade_type, args)
def new_func(func):
# register the function here
returns = kwargs.get("returns", None)
# make sure param counts match
if len(func.func_code.co_varnames) != len(args):
raise ArgumentCountException()
name = func.func_name
# pull out the arg types & match to the names
# str:type
zipped = OrderedDict(zip(func.func_code.co_varnames, args))
@wraps(func)
def new_rpc(*new_args, **new_kwargs):
# check types
# get a list of the default args
# make sure types are optional
arguments = self.validate_args(zipped, new_kwargs)
result = func(**arguments)
# TODO type check return type
return result
f = Function(name=name, types=zipped,
return_type=returns, func=new_rpc)
self.registry[name] = f
logging.debug("Created func: %s %s %s", func, args, kwargs)
return new_rpc
return new_func
def call(self, func, **kwargs):
pass
def __getitem__(self, item):
return self.registry[item]
def validate_args(self, typemap, kwargs):
"""
checks each of the elements in kwargs
returns a dictionary of k/v pairs
sets None as default for all keys that aren't set explicitly
:param typemap: dict of key:type
:param kwargs: dict of key:value
:return: key:value, type checked
"""
# if len(typemap) != len(kwargs):
# raise ArgumentCountException()
result = {}
for k, v in typemap.iteritems():
tmp = kwargs.get(k, None)
if tmp is None or v._validate(tmp):
result[k] = tmp
else:
raise TypeError
return result
def run(self, port=5000):
# You have made it to the Kumite!
# Run FrankDux on some port
# probably need to use ZeroMQ Router/Dealer w/ device
self.context = zmq.Context()
# incoming requests
incoming = self.context.socket(zmq.ROUTER)
incoming.bind("tcp://*:{}".format(port))
logging.info("Creating dealer for workers")
workers = self.context.socket(zmq.DEALER)
workers.bind("inproc://workers")
# spawn workers
for i in range(20):
spawn(self.worker, i)
zmq.device(zmq.QUEUE, incoming, workers)
logging.info("Finishing up")
def worker(self, i):
logging.info("Starting worker %d", i)
sock = self.context.socket(zmq.REP)
sock.connect("inproc://workers")
while True:
logging.info("Worker %s waiting for incoming message", i)
msg = sock.recv()
logging.info("Worker %s received message %s", i, msg)
sock.send("OK")
def generate_client_libraries(self, output_dir, language):
code = CodeGen(self, output_dir, language=language)
code.write()
def decode_request(self, data):
pass
def upgrade_type(t):
if issubclass(t, Descriptor):
return t
types = {
int: Int,
float: Float,
str: String,
}
try:
return types[t]
except:
raise
|
rustyrazorblade/FrankDux
|
frankdux/__init__.py
|
Python
|
bsd-2-clause
| 5,024
|
from typing import Tuple
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = (
"{{last_name}} {{company_suffix}}",
"{{last_name}} {{last_name}} {{company_suffix}}",
"{{last_name}}",
"{{last_name}}",
)
catch_phrase_formats = ("{{catch_phrase_noun}} {{catch_phrase_verb}} {{catch_phrase_attribute}}",)
nouns = (
"la sécurité",
"le plaisir",
"le confort",
"la simplicité",
"l'assurance",
"l'art",
"le pouvoir",
"le droit",
"la possibilité",
"l'avantage",
"la liberté",
)
verbs = (
"de rouler",
"d'avancer",
"d'évoluer",
"de changer",
"d'innover",
"de louer",
"d'atteindre vos buts",
"de concrétiser vos projets",
)
attributes = (
"de manière efficace",
"plus rapidement",
"plus facilement",
"plus simplement",
"en toute tranquilité",
"avant-tout",
"autrement",
"naturellement",
"à la pointe",
"sans soucis",
"à l'état pur",
"à sa source",
"de manière sûre",
"en toute sécurité",
)
company_suffixes: Tuple[str, ...] = (
"SA",
"S.A.",
"SARL",
"S.A.R.L.",
"S.A.S.",
"et Fils",
)
siren_format = "### ### ###"
def catch_phrase_noun(self) -> str:
"""
Returns a random catch phrase noun.
"""
return self.random_element(self.nouns)
def catch_phrase_attribute(self) -> str:
"""
Returns a random catch phrase attribute.
"""
return self.random_element(self.attributes)
def catch_phrase_verb(self) -> str:
"""
Returns a random catch phrase verb.
"""
return self.random_element(self.verbs)
def catch_phrase(self) -> str:
"""
:example: 'integrate extensible convergence'
"""
catch_phrase = ""
while True:
pattern: str = self.random_element(self.catch_phrase_formats)
catch_phrase = self.generator.parse(pattern)
catch_phrase = catch_phrase[0].upper() + catch_phrase[1:]
if self._is_catch_phrase_valid(catch_phrase):
break
return catch_phrase
# An array containing string which should not appear twice in a catch phrase
words_which_should_not_appear_twice = ("sécurité", "simpl")
def _is_catch_phrase_valid(self, catch_phrase: str) -> bool:
"""
Validates a french catch phrase.
:param catch_phrase: The catch phrase to validate.
"""
for word in self.words_which_should_not_appear_twice:
# Fastest way to check if a piece of word does not appear twice.
begin_pos = catch_phrase.find(word)
end_pos = catch_phrase.find(word, begin_pos + 1)
if begin_pos != -1 and begin_pos != end_pos:
return False
return True
def siren(self) -> str:
"""
Generates a siren number (9 digits).
"""
return self.numerify(self.siren_format)
def siret(self, max_sequential_digits: int = 2) -> str:
"""
Generates a siret number (14 digits).
It is in fact the result of the concatenation of a siren number (9 digits),
a sequential number (4 digits) and a control number (1 digit) concatenation.
If $max_sequential_digits is invalid, it is set to 2.
:param max_sequential_digits: The maximum number of digits for the sequential number (> 0 && <= 4).
"""
if max_sequential_digits > 4 or max_sequential_digits <= 0:
max_sequential_digits = 2
sequential_number = str(self.random_number(max_sequential_digits)).zfill(4)
return self.numerify(self.siren() + " " + sequential_number + "#")
|
joke2k/faker
|
faker/providers/company/fr_FR/__init__.py
|
Python
|
mit
| 3,985
|
import threading
import time
import sublime
import sublime_plugin
from ..show_error import show_error
from .existing_packages_command import ExistingPackagesCommand
from ..thread_progress import ThreadProgress
from ..package_disabler import PackageDisabler
from ..package_manager import PackageManager
class RemovePackageCommand(sublime_plugin.WindowCommand,
ExistingPackagesCommand, PackageDisabler):
"""
A command that presents a list of installed packages, allowing the user to
select one to remove
"""
def __init__(self, window):
"""
:param window:
An instance of :class:`sublime.Window` that represents the Sublime
Text window to show the list of installed packages in.
"""
self.window = window
self.manager = PackageManager()
def run(self):
self.package_list = self.make_package_list('remove')
if not self.package_list:
show_error('There are no packages that can be removed.')
return
self.window.show_quick_panel(self.package_list, self.on_done)
def on_done(self, picked):
"""
Quick panel user selection handler - deletes the selected package
:param picked:
An integer of the 0-based package name index from the presented
list. -1 means the user cancelled.
"""
if picked == -1:
return
package = self.package_list[picked][0]
self.disable_packages(package, 'remove')
thread = RemovePackageThread(self.manager, package)
thread.start()
ThreadProgress(thread, 'Removing package %s' % package,
'Package %s successfully removed' % package)
class RemovePackageThread(threading.Thread, PackageDisabler):
"""
A thread to run the remove package operation in so that the Sublime Text
UI does not become frozen
"""
def __init__(self, manager, package):
self.manager = manager
self.package = package
threading.Thread.__init__(self)
def run(self):
# Let the package disabling take place
time.sleep(0.7)
self.result = self.manager.remove_package(self.package)
def unignore_package():
self.reenable_package(self.package, 'remove')
sublime.set_timeout(unignore_package, 200)
|
koery/win-sublime
|
Data/Packages/Package Control/package_control/commands/remove_package_command.py
|
Python
|
mit
| 2,356
|
from flask import Flask, jsonify, abort, request
app = Flask(__name__)
vnf_nodes = [
{
'vnf_id': 1,
'vnf_name': u'vnf01',
'vnf_desc': u'test1',
'vnf_load': u'R17'
},
{
'vnf_id': 2,
'vnf_name': u'vnf02',
'vnf_desc': u'test2',
'vnf_load': u'R18'
},
{
'vnf_id': 3,
'vnf_name': u'vnf03',
'vnf_desc': u'test3',
'vnf_load': u'R19'
}
]
@app.route('/vnfs', methods=['GET'])
def get_vnfs():
return jsonify({'vnf_nodes': vnf_nodes})
@app.route('/vnfs/<int:vnf_id>', methods=['GET'])
def get_vnf(vnf_id):
v_node = filter(lambda t: t['vnf_id'] == vnf_id, vnf_nodes)
if len(v_node) == 0:
abort(404)
return jsonify({'v_node': v_node[0]})
@app.route('/vnfs', methods=['POST'])
def create_vnf():
if not request.json or not 'vnf_id' in request.json or not 'vnf_name' in request.json or not 'vnf_desc' in request.json:
abort(400)
v_node = {
'vnf_id': request.json['vnf_id'],
'vnf_name': request.json['vnf_name'],
'vnf_desc': request.json['vnf_desc'],
'vnf_load': request.json['vnf_load']
}
vnf_nodes.append(v_node)
for v_node in vnf_nodes:
print v_node
return jsonify({'v_node': v_node}), 201
@app.route('/vnfs/<int:vnf_id>', methods=['DELETE'])
def delete_vnf(vnf_id):
v_node = filter(lambda t: t['vnf_id'] == vnf_id, vnf_nodes)
if len(v_node) == 0:
abort(404)
vnf_nodes.remove(v_node[0])
return 204
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
IT-SeanWANG/CodeJam
|
2016_1st/Refer1_Q3.py
|
Python
|
apache-2.0
| 1,621
|
# Copyright 2016-2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
from rest_framework import status
from django.test import TestCase
from django.test import Client
from lcm.pub.utils import restcall
from lcm.pub.utils import fileutil
from lcm.pub.nfvi.vim.vimadaptor import VimAdaptor
from lcm.pub.database.models import NfPackageModel, VnfPackageFileModel, NfInstModel
from lcm.pub.database.models import JobStatusModel, JobModel
from lcm.packages.nf_package import NfOnBoardingThread, NfPkgDeletePendingThread
from lcm.packages.nf_package import NfPkgDeleteThread
from lcm.packages import nf_package
from lcm.pub.nfvi.vim.const import VIM_OPENSTACK
class TestNfPackage(TestCase):
def setUp(self):
self.client = Client()
NfPackageModel.objects.filter().delete()
VnfPackageFileModel.objects.filter().delete()
NfInstModel.objects.filter().delete()
JobModel.objects.filter().delete()
JobStatusModel.objects.filter().delete()
self.vnfd_raw_data = {
"rawData":{
"instance":{
"metadata":{
"is_shared":False,
"plugin_info":"vbrasplugin_1.0",
"vendor":"zte",
"request_reclassification":False,
"name":"vbras",
"version":1,
"vnf_type":"vbras",
"cross_dc":False,
"vnfd_version":"1.0.0",
"id":"zte_vbras_1.0",
"nsh_aware":True
},
"nodes":[
{
"id":"aaa_dnet_cp_0xu2j5sbigxc8h1ega3if0ld1",
"type_name":"tosca.nodes.nfv.ext.zte.CP",
"template_name":"aaa_dnet_cp",
"properties":{
"bandwidth":{
"type_name":"integer",
"value":0
},
"direction":{
"type_name":"string",
"value":"bidirectional"
},
"vnic_type":{
"type_name":"string",
"value":"normal"
},
"sfc_encapsulation":{
"type_name":"string",
"value":"mac"
},
"order":{
"type_name":"integer",
"value":2
}
},
"relationships":[
{
"name":"guest_os",
"source_requirement_index":0,
"target_node_id":"AAA_image_d8aseebr120nbm7bo1ohkj194",
"target_capability_name":"feature"
}
]
},
{
"id":"LB_Image_oj5l2ay8l2g6vcq6fsswzduha",
"type_name":"tosca.nodes.nfv.ext.ImageFile",
"template_name":"LB_Image",
"properties":{
"disk_format":{
"type_name":"string",
"value":"qcow2"
},
"file_url":{
"type_name":"string",
"value":"/SoftwareImages/image-lb"
},
"name":{
"type_name":"string",
"value":"image-lb"
}
}
}
]
},
"model":{
"metadata":{
"is_shared":False,
"plugin_info":"vbrasplugin_1.0",
"vendor":"zte",
"request_reclassification":False,
"name":"vbras",
"version":1,
"vnf_type":"vbras",
"cross_dc":False,
"vnfd_version":"1.0.0",
"id":"zte_vbras_1.0",
"nsh_aware":True
},
"node_templates":[
{
"name":"aaa_dnet_cp",
"type_name":"tosca.nodes.nfv.ext.zte.CP",
"default_instances":1,
"min_instances":0,
"properties":{
"bandwidth":{
"type_name":"integer",
"value":0
}
},
"requirement_templates":[
{
"name":"virtualbinding",
"target_node_template_name":"AAA",
"target_capability_name":"virtualbinding"
}
]
}
]
}
}
}
def tearDown(self):
pass
def assert_job_result(self, job_id, job_progress, job_detail):
jobs = JobStatusModel.objects.filter(
jobid=job_id,
progress=job_progress,
descp=job_detail)
self.assertEqual(1, len(jobs))
@mock.patch.object(NfOnBoardingThread, 'run')
def test_nf_pkg_on_boarding_normal(self, mock_run):
resp = self.client.post("/openoapi/nslcm/v1/vnfpackage", {
"csarId": "1",
"vimIds": ["1"]
}, format='json')
self.assertEqual(resp.status_code, status.HTTP_202_ACCEPTED)
@mock.patch.object(restcall, 'call_req')
def test_nf_pkg_on_boarding_when_on_boarded(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({"onBoardState": "onBoarded"}), '200']
NfOnBoardingThread(csar_id="1",
vim_ids=["1"],
lab_vim_id="",
job_id="2").run()
self.assert_job_result("2", 255, "CSAR(1) already onBoarded.")
@mock.patch.object(restcall, 'call_req')
def test_nf_pkg_on_boarding_when_on_boarding(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"onBoardState": "non-onBoarded",
"processState": "onBoarding"
}), '200']
NfOnBoardingThread(csar_id="2",
vim_ids=["1"],
lab_vim_id="",
job_id="3").run()
self.assert_job_result("3", 255, "CSAR(2) is onBoarding now.")
@mock.patch.object(restcall, 'call_req')
def test_nf_on_boarding_when_nfd_already_exists(self, mock_call_req):
mock_vals = {
"/openoapi/catalog/v1/csars/2":
[0, json.JSONEncoder().encode({
"onBoardState": "onBoardFailed", "processState": "deleteFailed"}), '200'],
"/openoapi/catalog/v1/servicetemplates/queryingrawdata":
[0, json.JSONEncoder().encode(self.vnfd_raw_data), '200']}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
NfPackageModel(uuid="1", nfpackageid="2", vnfdid="zte_vbras_1.0").save()
NfOnBoardingThread(csar_id="2", vim_ids=["1"], lab_vim_id="", job_id="4").run()
self.assert_job_result("4", 255, "NFD(zte_vbras_1.0) already exists.")
@mock.patch.object(restcall, 'call_req')
@mock.patch.object(fileutil, 'download_file_from_http')
@mock.patch.object(VimAdaptor, '__init__')
@mock.patch.object(VimAdaptor, 'create_image')
@mock.patch.object(VimAdaptor, 'get_image')
def test_nf_on_boarding_when_successfully(self, mock_get_image, mock_create_image,
mock__init__, mock_download_file_from_http, mock_call_req):
mock_download_file_from_http.return_value = True, "/root/package"
mock_vals = {
"/openoapi/catalog/v1/csars/2":
[0, json.JSONEncoder().encode({
"onBoardState": "onBoardFailed", "processState": "deleteFailed"}), '200'],
"/openoapi/catalog/v1/servicetemplates/queryingrawdata":
[0, json.JSONEncoder().encode(self.vnfd_raw_data), '200'],
"/openoapi/catalog/v1/csars/2/files?relativePath=/SoftwareImages/image-lb":
[0, json.JSONEncoder().encode({
"csar_file_info": [{"downloadUri": "8"}, {"localPath": "9"}]}), '200'],
"/openoapi/extsys/v1/vims":
[0, json.JSONEncoder().encode([{
"vimId": "1", "type": VIM_OPENSTACK,
"url": "/root/package", "userName": "tom",
"password": "tom", "tenant": "10"}]), '200'],
"/openoapi/catalog/v1/csars/2?onBoardState=onBoarded": [0, '{}', 200],
"/openoapi/catalog/v1/csars/2?operationalState=Enabled": [0, '{}', 200],
"/openoapi/catalog/v1/csars/2?processState=normal": [0, '{}', 200]}
mock_create_image.return_value = [0, {"id": "30", "name": "jerry", "res_type": 0}]
mock__init__.return_value = None
mock_get_image.return_value = [0, {"id": "30", "name": "jerry", "size": "60", "status": "active"}]
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
NfOnBoardingThread(csar_id="2", vim_ids=["1"], lab_vim_id="", job_id="4").run()
self.assert_job_result("4", 100, "CSAR(2) onBoarding successfully.")
@mock.patch.object(restcall, 'call_req')
@mock.patch.object(fileutil, 'download_file_from_http')
@mock.patch.object(VimAdaptor, '__init__')
@mock.patch.object(VimAdaptor, 'create_image')
@mock.patch.object(VimAdaptor, 'get_image')
def test_nf_on_boarding_when_timeout(self, mock_get_image, mock_create_image,
mock__init__, mock_download_file_from_http, mock_call_req):
nf_package.MAX_RETRY_TIMES = 2
nf_package.SLEEP_INTERVAL_SECONDS = 1
mock_download_file_from_http.return_value = True, "/root/package"
mock_vals = {
"/openoapi/catalog/v1/csars/3":
[0, json.JSONEncoder().encode({"onBoardState": "onBoardFailed",
"processState": "deleteFailed"}), '200'],
"/openoapi/catalog/v1/servicetemplates/queryingrawdata":
[0, json.JSONEncoder().encode(self.vnfd_raw_data), '200'],
"/openoapi/catalog/v1/csars/3/files?relativePath=/SoftwareImages/image-lb":
[0, json.JSONEncoder().encode({
"csar_file_info": [{"downloadUri": "8"}, {"localPath": "9"}]}), '200'],
"/openoapi/catalog/v1/csars/3?processState=onBoardFailed": [0, '{}', 200],
"/openoapi/extsys/v1/vims":
[0, json.JSONEncoder().encode([{
"vimId": "1", "type": VIM_OPENSTACK,
"url": "/root/package", "userName": "tom",
"password": "tom", "tenant": "10"}]), 200]}
mock_create_image.return_value = [0, {"id": "30", "name": "jerry", "res_type": 0}]
mock__init__.return_value = None
mock_get_image.return_value = [0, {"id": "30", "name": "jerry", "size": "60", "status": "0"}]
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
NfOnBoardingThread(csar_id="3", vim_ids=["1"], lab_vim_id="", job_id="6").run()
self.assert_job_result("6", 255, "Failed to create image:timeout(2 seconds.)")
@mock.patch.object(restcall, 'call_req')
@mock.patch.object(fileutil, 'download_file_from_http')
@mock.patch.object(VimAdaptor, '__init__')
@mock.patch.object(VimAdaptor, 'create_image')
def test_nf_on_boarding_when_failed_to_create_image(self, mock_create_image,
mock__init__, mock_download_file_from_http, mock_call_req):
mock_download_file_from_http.return_value = True, "/root/package"
mock_vals = {
"/openoapi/catalog/v1/csars/5":
[0, json.JSONEncoder().encode({
"onBoardState": "onBoardFailed", "processState": "deleteFailed"}), '200'],
"/openoapi/catalog/v1/servicetemplates/queryingrawdata":
[0, json.JSONEncoder().encode(self.vnfd_raw_data), '200'],
"/openoapi/catalog/v1/csars/5/files?relativePath=/SoftwareImages/image-lb":
[0, json.JSONEncoder().encode({
"csar_file_info": [{"downloadUri": "8"}, {"localPath": "9"}]}), '200'],
"/openoapi/catalog/v1/csars/5?processState=onBoardFailed": [0, '{}', 200],
"/openoapi/extsys/v1/vims":
[0, json.JSONEncoder().encode([{
"vimId": "1", "type": VIM_OPENSTACK,
"url": "/root/package", "userName": "tom",
"password": "tom", "tenant": "10"}]), '200']}
mock_create_image.return_value = [1, 'Unsupported image format.']
mock__init__.return_value = None
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
NfOnBoardingThread(csar_id="5", vim_ids=["1"], lab_vim_id="", job_id="8").run()
self.assert_job_result("8", 255, "Failed to create image:Unsupported image format.")
#########################################################################
@mock.patch.object(restcall, 'call_req')
def test_get_csar_successfully(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"name": "1", "provider": "2", "version": "3", "operationalState": "4",
"usageState": "5", "onBoardState": "6", "processState": "7",
"deletionPending": "8", "downloadUri": "9", "createTime": "10",
"modifyTime": "11", "format": "12", "size": "13"
}), '200']
NfPackageModel(uuid="1", vnfdid="001", vendor="vendor",
vnfdversion="1.2.0", vnfversion="1.1.0", nfpackageid="13").save()
VnfPackageFileModel(id="1", filename="filename", imageid="00001",
vimid="1", vimuser="001", tenant="12", status="1", vnfpid="13").save()
NfInstModel(nfinstid="1", mnfinstid="001", nf_name="name", package_id="13").save()
resp = self.client.get("/openoapi/nslcm/v1/vnfpackage/13")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
expect_data = {
"csarId": '13',
"packageInfo": {
"vnfdId": "001",
"vnfdProvider": "vendor",
"vnfdVersion": "1.2.0",
"vnfVersion": "1.1.0",
"name": "1",
"provider": "2",
"version": "3",
"operationalState": "4",
"usageState": "5",
"onBoardState": "6",
"processState": "7",
"deletionPending": "8",
"downloadUri": "9",
"createTime": "10",
"modifyTime": "11",
"format": "12",
"size": "13"},
"imageInfo": [{
"index": "0",
"fileName": "filename",
"imageId": "00001",
"vimId": "1",
"vimUser": "001",
"tenant": "12",
"status": "1"}],
"vnfInstanceInfo": [{
"vnfInstanceId": "1",
"vnfInstanceName": "name"}]}
self.assertEqual(expect_data, resp.data)
#########################################################################
@mock.patch.object(restcall, 'call_req')
def test_delete_pending_csar_when_successfully(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"processState": "deleting"}), "200"]
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 100, "Delete pending CSAR(1) successfully.")
@mock.patch.object(restcall, 'call_req')
def test_delete_pending_csar_when_deleting(self, mock_call_req):
NfPackageModel(uuid="01", nfpackageid="1").save()
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"processState": "deleting"}), "200"]
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 100, "CSAR(1) is deleting now.")
@mock.patch.object(restcall, 'call_req')
def test_delete_pending_csar_when_not_deletion_pending(self, mock_call_req):
NfPackageModel(uuid="01", nfpackageid="1").save()
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"deletionPending": "false"}), "200"]
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 100, "CSAR(1) need not to be deleted.")
@mock.patch.object(restcall, 'call_req')
def test_delete_pending_csar_when_in_using(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"processState": "normal"}), "200"]
NfPackageModel(uuid="01", nfpackageid="1").save()
NfInstModel(nfinstid="01", package_id="1").save()
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 100, "CSAR(1) is in using, cannot be deleted.")
@mock.patch.object(VimAdaptor, '__init__')
@mock.patch.object(VimAdaptor, 'delete_image')
@mock.patch.object(restcall, 'call_req')
def test_delete_csarr_when_exception(self, mock_call_req, mock_delete_image, mock_init_):
mock_vals = {
("/openoapi/catalog/v1/csars/1", "DELETE"):
[1, "{}", "400"],
("/openoapi/catalog/v1/csars/1?processState=deleting", "PUT"):
[0, "{}", "200"],
("/openoapi/catalog/v1/csars/1?processState=deleteFailed", "PUT"):
[0, "{}", "200"],
("/openoapi/catalog/v1/csars/1", "GET"):
[0, json.JSONEncoder().encode({"processState": "normal"}), "200"],
("/openoapi/extsys/v1/vims", "GET"):
[0, json.JSONEncoder().encode([{"vimId": "002",
"url": "url_test",
"userName": "test01",
"password": "123456",
"tenant": "test"}]), "200"]}
mock_delete_image.return_value = [0, "", '200']
def side_effect(*args):
return mock_vals[(args[4], args[5])]
mock_call_req.side_effect = side_effect
mock_init_.return_value = None
VnfPackageFileModel(vnfpid="1", imageid="001", vimid="002").save()
NfPackageModel(uuid="01", nfpackageid="1").save()
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 255, "Failed to delete CSAR(1) from catalog.")
@mock.patch.object(VimAdaptor, '__init__')
@mock.patch.object(VimAdaptor, 'delete_image')
@mock.patch.object(restcall, 'call_req')
def test_delete_csar_when_successfully(self, mock_call_req, mock_delete_image, mock_init_):
mock_vals = {
("/openoapi/catalog/v1/csars/1", "DELETE"):
[0, json.JSONEncoder().encode({"successfully": "successfully"}), "200"],
("/openoapi/catalog/v1/csars/1?processState=deleting", "PUT"):
[0, json.JSONEncoder().encode({"successfully": "successfully"}), "200"],
("/openoapi/catalog/v1/csars/1?processState=deleteFailed", "PUT"):
[0, json.JSONEncoder().encode({"successfully": "successfully"}), "200"],
("/openoapi/catalog/v1/csars/1", "GET"):
[0, json.JSONEncoder().encode({"notProcessState": "notProcessState"}), "200"],
("/openoapi/extsys/v1/vims", "GET"):
[0, json.JSONEncoder().encode([{
"vimId": "002",
"url": "url_test",
"userName": "test01",
"password": "123456",
"tenant": "test"}]), "200"]}
mock_delete_image.return_value = [0, json.JSONEncoder().encode({"test": "test"}), '200']
def side_effect(*args):
return mock_vals[(args[4], args[5])]
mock_call_req.side_effect = side_effect
mock_init_.return_value = None
VnfPackageFileModel(vnfpid="1", imageid="001", vimid="002").save()
NfPackageModel(uuid="01", nfpackageid="1").save()
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 100, "Delete CSAR(1) successfully.")
#########################################################################
@mock.patch.object(restcall, 'call_req')
def test_delete_nf_pkg_when_deleting(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({"processState": "deleting"}), '200']
NfPkgDeleteThread(csar_id="1", job_id="2").run()
self.assert_job_result("2", 100, "CSAR(1) is deleting now.")
def test_get_nf_csars_normal(self):
NfPackageModel(uuid="01", nfpackageid="1", vnfdid="2").save()
resp = self.client.get("/openoapi/nslcm/v1/vnfpackage")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(1, len(resp.data["csars"]))
self.assertEqual("1", resp.data["csars"][0]["csarId"])
self.assertEqual("2", resp.data["csars"][0]["vnfdId"])
|
open-o/nfvo
|
lcm/lcm/packages/tests/test_nf.py
|
Python
|
apache-2.0
| 23,203
|
from pytest import mark
from tests import factories
from tests import common_tools
from tests.common_tools import (
make_user,
create_talk_for_user,
get_default_conference,
template_used)
from conference import user_panel
from conference import models
STERAMS_1 = [
{
"title": "Holy Grail",
"fare_codes": ["TRCC", "TRCP", "TRSC", "TRSP", "TRVC", "TRVP"],
"url": "https://www.youtube.com/embed/EEIk7gwjgIM"
}
]
def create_streamset():
get_default_conference()
stream_set = factories.StreamSetFactory(
streams=repr(STERAMS_1).replace('\'', '"')
)
stream_set.save()
@mark.django_db
def test_streamset(user_client):
create_streamset()
@mark.django_db
def test_streamset_without_ticket(user_client):
create_streamset()
# User without ticket
data = user_panel.get_streams_for_current_conference(user_client.user)
#print (data)
assert not data['streams']
assert 'reload_timeout_seconds' in data
@mark.django_db
def test_streamset_with_ticket(user_client):
create_streamset()
# User with view-only ticket
common_tools.setup_conference_with_typical_fares()
fare = models.Fare.objects.get(code='TRVC')
ticket = common_tools.create_valid_ticket_for_user_and_fare(
user_client.user, fare=fare)
ticket.save()
data = user_panel.get_streams_for_current_conference(user_client.user)
#print (data)
assert len(data['streams']) == 1
tracks = data['streams'][0]
assert tracks['title'] == 'Holy Grail'
assert tracks['url'] == 'https://www.youtube.com/embed/EEIk7gwjgIM'
assert 'reload_timeout_seconds' in data
assert data['reload_timeout_seconds'] > 3600 # factory sets the end_date to now + 1 hour
|
EuroPython/epcon
|
tests/test_streamset.py
|
Python
|
bsd-2-clause
| 1,750
|
#!/usr/bin/python
# Copyright (c) 2009 Las Cumbres Observatory (www.lcogt.net)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''run_server.py - An example server using the python socket implementation of
the Google Protocol Buffers.
This module is an executable script demonstrating the usage of the python socket
implementation of the Google Protocol Buffers. This script starts a socket
server running on localhost:8090. Once running, the run_client.py script can
be used to test the TimeService.getTime remote procedure call (RPC).
Authors: Martin Norbury (mnorbury@lcogt.net)
Eric Saunders (esaunders@lcogt.net)
May 2009
'''
# Add main protobuf module to classpath
import sys
sys.path.append('../../main')
from protobuf.server import SocketRpcServer
import time_pb2 as proto
import time
import logging
log = logging.getLogger(__name__)
port = 8090
class TimeService(proto.TimeService):
'''An example service implementation.'''
def getTime(self,controller,request,done):
'''Get the current time and return as a response message via the
callback routine provide.'''
log.info('Called TestMethod')
# Create response message
response = proto.TimeResponse()
response.str_time = time.asctime()
# Call provided callback with response message
done.run(response)
if __name__=='__main__':
logging.basicConfig(level=logging.DEBUG)
# Create service
service = TimeService()
server = SocketRpcServer(port)
server.registerService(service)
server.run()
|
nowelium/protobuf-socket-rpc
|
python/src/example/time/run_server.py
|
Python
|
mit
| 2,616
|
from sqlalchemy import *
from migrate import *
from floof.model.types import Timezone
from sqlalchemy.ext.declarative import declarative_base
TableBase = declarative_base()
# Modified tables
class User(TableBase):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, nullable=False)
resource_id = Column(Integer, ForeignKey('resources.id'), nullable=False)
name = Column(Unicode(24), nullable=False, index=True, unique=True)
email = Column(Unicode(255))
display_name = Column(Unicode(24), nullable=True)
has_trivial_display_name = Column(Boolean, nullable=False, default=False, server_default=u'f')
timezone = Column(Timezone, nullable=True)
role_id = Column(Integer, ForeignKey('roles.id'), nullable=False)
auth_method = Column(Enum(
u'cert_only',
u'openid_only',
u'cert_or_openid',
u'cert_and_openid',
name='user_auth_method'), nullable=False, default=u'openid_only')
def upgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
User.__table__.c.email.create()
### # XXX: I think I'm missing something here, because I seem to need this
### # .alter() to get the column creation to stick. Eh?
### User.__table__.c.email.alter()
def downgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
User.__table__.c.email.drop()
|
eevee/floof
|
migration/versions/024_Add_user_email_addresses.py
|
Python
|
isc
| 1,361
|
from os.path import exists, isdir
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
PROMPT = "> "
def chunks(l, n):
# noinspection PyArgumentList
for i in range(0, len(l), n):
yield l[i:i + n]
# noinspection PyUnusedLocal
def good_quit(screen):
quit()
def addstr(screen, string, *args, newline=True):
yx = screen.getyx()
if yx[0] == 0 and yx[1] == 0: newline = False
screen.addstr(yx[0] + (1 if newline else 0), 0 if newline else yx[1], string, *args)
def get_str(screen, prompt=PROMPT, header=None, default=None, clear=False):
if not header: header = []
if clear: screen.clear()
for item in header:
if item is None: continue
if isinstance(item, tuple):
addstr(screen, "{}".format(item[0]), item[1])
else:
addstr(screen, "{}".format(item))
addstr(screen, prompt)
screen.refresh()
data = screen.getstr().decode("utf-8")
if len(data) < 1 and default: return default
return data
def get_dir(screen, header=None, prompt=PROMPT, clear=False, default=None):
if not header: header = []
error = None
wanted_dir = None
while True:
wanted_dir = get_str(screen, header=header + [error], prompt=prompt, clear=clear, default=default)
if exists(wanted_dir) and isdir(wanted_dir): break
if not exists(wanted_dir):
error = "That directory didn't exist!"
elif not isdir(wanted_dir):
error = "That is not a directory."
return wanted_dir
def convert(old_plugin, conversion):
royalcommands = {}
for k, v in conversion.items():
if "." in k:
conversion.pop(k)
conversion[".".join(k.split(".")[1:])] = v
if k.split(".")[0] not in old_plugin: continue
royalcommands = dict(
list(royalcommands.items()) + list(convert(old_plugin[k.split(".")[0]], conversion).items()))
continue
if k not in old_plugin: continue
if "." in v:
conversion[k] = ".".join(v.split(".")[1:])
royalcommands[v.split(".")[0]] = convert({k: old_plugin[k]}, conversion)
# royalcommands[v.split(".")[0]] = convert(old_plugin, conversion)
continue
royalcommands[v] = old_plugin[k]
return royalcommands
def convert_backwards(royalcommands, conversion):
new_plugin = {}
conversion = {v: k for k, v in conversion.items()}
for k, v in conversion.items():
if "." in k:
conversion.pop(k)
conversion[".".join(k.split(".")[1:])] = v
new_plugin = dict(
list(new_plugin.items()) + list(
convert_backwards(royalcommands.get(k.split(".")[0], {}), conversion).items()))
continue
if k not in royalcommands: continue
if "." in v:
conversion[k] = ".".join(v.split(".")[1:])
new_plugin[v.split(".")[0]] = convert_backwards({k: royalcommands[k]},
{v: k for k, v in conversion.items()})
continue
new_plugin[v] = royalcommands[k]
return new_plugin
class CaseInsensitiveDict(dict):
def __setitem__(self, key, value):
super(CaseInsensitiveDict, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
|
RoyalDev/RoyalCommands-converter
|
converter/__init__.py
|
Python
|
gpl-3.0
| 3,485
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StartTask(Model):
"""A task which is run when a compute node joins a pool in the Azure Batch
service, or when the compute node is rebooted or reimaged.
:param command_line: The command line of the start task. The command line
does not run under a shell, and therefore cannot take advantage of shell
features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux.
:type command_line: str
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line.
:type resource_files: list of :class:`ResourceFile
<azure.batch.models.ResourceFile>`
:param environment_settings: A list of environment variable settings for
the start task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param user_identity: The user identity under which the start task runs.
If omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param max_task_retry_count: The maximum number of times the task may be
retried. The Batch service retries a task if its exit code is nonzero.
Note that this value specifically controls the number of retries. The
Batch service will try the task once, and may then retry up to this limit.
For example, if the maximum retry count is 3, Batch tries the task up to 4
times (one initial try and 3 retries). If the maximum retry count is 0,
the Batch service does not retry the task. If the maximum retry count is
-1, the Batch service retries the task without limit.
:type max_task_retry_count: int
:param wait_for_success: Whether the Batch service should wait for the
start task to complete successfully (that is, to exit with exit code 0)
before scheduling any tasks on the compute node. If true and the start
task fails on a compute node, the Batch service retries the start task up
to its maximum retry count (maxTaskRetryCount). If the task has still not
completed successfully after all retries, then the Batch service marks the
compute node unusable, and will not schedule tasks to it. This condition
can be detected via the node state and scheduling error detail. If false,
the Batch service will not wait for the start task to complete. In this
case, other tasks can start executing on the compute node while the start
task is still running; and even if the start task fails, new tasks will
continue to be scheduled on the node. The default is false.
:type wait_for_success: bool
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'command_line': {'key': 'commandLine', 'type': 'str'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
}
def __init__(self, command_line, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count=None, wait_for_success=None):
self.command_line = command_line
self.resource_files = resource_files
self.environment_settings = environment_settings
self.user_identity = user_identity
self.max_task_retry_count = max_task_retry_count
self.wait_for_success = wait_for_success
|
v-iam/azure-sdk-for-python
|
azure-batch/azure/batch/models/start_task.py
|
Python
|
mit
| 4,412
|
import logging
from SWDProtocol import *
from SWDErrors import *
# Refs:
# "Serial Wire Debug and the CoreSightTM Debug and Trace Architecture"
class SWDAdapterBase(object):
"Base abstract class for SWD adapter hardware"
def __init__(self):
self.log = logging.getLogger("comm")
#
# Mandatory interface - these must be implemented by hardware
#
def writeBits(self, val, num):
"Write 1-8 bits to SWD"
def readBits(self, num):
"Read 1-8 bits from SWD"
pass
#
# Extended interface - these can be expressed in terms of the
# mandatory interface above (such default implementation is provided),
# but supporting some of the operations in hardware will make it
# "hardware accelerated".
#
def writeByte(self, val):
self.writeBits(val, 8)
self.log.debug("Wrote byte %#x", val)
def readByte(self):
return self.readBits(8)
def writeWord(self, val):
self.writeByte(val & 0xff)
val >>= 8
self.writeByte(val & 0xff)
val >>= 8
self.writeByte(val & 0xff)
val >>= 8
self.writeByte(val & 0xff)
def readWord(self):
val = self.readByte()
val |= self.readByte() << 8
val |= self.readByte() << 16
val |= self.readByte() << 24
return val
def turnClk(self):
"Turn a clock cycle - required when changing comm direction."
self.readBits(1)
def readAck(self):
# ACK transmitted LSB first, so we kinda see it reversed
return self.readBits(3)
def writeWordParity(self, val):
par = self.calcParity(val)
self.writeWord(val)
self.writeBits(par, 1)
self.log.debug("Written word %#x with parity %d", val, par)
def readWordParity(self):
val = self.readWord()
par = self.readBits(1)
if par != self.calcParity(val):
raise SWDParityError()
self.log.debug("Read word %#x with parity %d", val, par)
return val
def writeSWD(self, opcode, val):
self.writeByte(opcode)
self.turnClk()
ack = self.readAck()
self.turnClk()
if ack != ACK_OK:
self.handleAck(ack)
self.writeWordParity(val)
def readSWD(self, opcode):
self.writeByte(opcode)
self.turnClk()
ack = self.readAck()
if ack != ACK_OK:
self.turnClk()
self.handleAck(ack)
val = self.readWordParity()
self.turnClk()
return val
def handleAck(self, ack):
if ack == ACK_WAIT:
raise SWDWaitError(ack)
elif ack == ACK_FAULT:
raise SWDFaultError(ack)
elif ack == ACK_NOTPRESENT:
raise SWDNotPresentError(ack)
else:
raise SWDProtocolError(ack)
def resetSWD(self):
# "It consists of a sequence of 50 clock cycles with data = 1"
# We send 64 bits
self.writeWord(0xffffffff)
self.writeWord(0xffffffff)
# Unclear why exactly this needed
self.writeByte(0)
# "After the host has transmitted a line request sequence to the
# SW-DP, it must read the IDCODE register."
def makeOpcode(self, rw, APnDP, addr):
opcode = 0x81 # Framing
opcode |= rw | APnDP | addr
if self.calcParity(opcode):
opcode |= OP_PARITY
return opcode
def readCmd(self, APnDP, addr):
return self.readSWD(self.makeOpcode(OP_READ, APnDP, addr))
def writeCmd(self, APnDP, addr, val):
self.writeSWD(self.makeOpcode(OP_WRITE, APnDP, addr), val)
def JTAG2SWD(self):
"Initialize SWD-over-JTAG."
# Reset JTAG
self.writeWord(0xffffffff)
# self.writeWord(0xffffffff)
# Activate SWD interface
self.writeByte(0x9e)
self.writeByte(0xE7)
return self.resetSWD()
@staticmethod
def calcParity(val):
count = 0
while val:
val &= val - 1
count += 1
return count & 1
|
heartscrytech/PySWD
|
SWDAdapterBase.py
|
Python
|
bsd-3-clause
| 4,081
|
import fdr
|
akhambhati/Echobase
|
Echobase/Statistics/FDR/__init__.py
|
Python
|
gpl-3.0
| 11
|
#######
# Objetos
#######
class Persona:
#atributos de la clase
edad=0
genero=""
nombre=""
altura=0
#metodos de la clase
def caminar(self):
print("Estoy caminando!!")
def saludar(self):
print("Hola!! soy %s"%self.nombre)
def comer(self,comida):
print("Me gusta ",comida)
def cumpleaños(self):
self.edad+=1
class Persona2(Persona):
def __init__(self,nombre,edad,genero):
self.nombre=nombre
self.edad=edad
self.genero=genero
print("Se ha creado una persona ")
class Persona3():
def __init__(self,nombre,apellido,edad,genero,dinero):
self.nombre=nombre
self.apellido=apellido
self.edad=edad
self.genero=genero
self.dinero=dinero
print("Hola soy %s %s"%(self.nombre,self.apellido))
print("Tengo $ ",self.dinero)
def prestarDinero(self,persona,monto):
if(self.dinero>=monto):
persona.dinero+=monto
self.dinero-=monto
else:
print("Ahorita no joven")
luis=Persona3("Luis","Torres",22,"M",100)
print("Dinero de Luis",luis.dinero)
pablo=Persona3("Pablo","Gonzales",33,"M",0)
print("Dinero de Pablo",pablo.dinero)
pablo.prestarDinero(luis,1000)
luis.prestarDinero(pablo,50)
print("Dinero de Luis",luis.dinero)
print("Dinero de Pablo",pablo.dinero)
#Instanciar una clase
juan=Persona()
alan=Persona2("alan",22,"M")
paco=Persona2("Paco",10,"M")
maria=Persona2("Maria",12,"F")
juan.edad=22
juan.genero="M"
juan.nombre="Juan"
juan.altura=2.2
juan.saludar()
Persona.saludar(juan)
print("Edad de Juan ",juan.edad)
juan.comer("tamales")
juan.cumpleaños()
print("Edad de Juan ",juan.edad)
alan.saludar()
|
allanstone/cursoSemestralPython
|
SextaClase/objetos.py
|
Python
|
mit
| 1,595
|
import unittest,sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from PSFile import PSFile
class TestPSFile(unittest.TestCase):
def testInit(self):
fn = 'ps_r0_20140815-121539.h5'
psf = PSFile(fn)
print "fn=",psf.openfile
for i,freq in enumerate(psf.freq):
print "i=",i," freq=",freq
if __name__ == '__main__':
unittest.main()
|
bmazin/SDR
|
Setup/test/TestPSFile.py
|
Python
|
gpl-2.0
| 400
|
'''
@author: Rahul Tanwani
@summary: Contains base test case for reusable test methods.
'''
import json
from django.test import TestCase
from batch_requests.settings import br_settings as settings
class TestBase(TestCase):
'''
Base class for all reusable test methods.
'''
def assert_reponse_compatible(self, ind_resp, batch_resp):
'''
Assert if the response of independent request is compatible with
batch response.
'''
# Remove duration header to compare.
if settings.ADD_DURATION_HEADER:
del batch_resp['headers'][settings.DURATION_HEADER_NAME]
self.assertDictEqual(ind_resp, batch_resp, "Compatibility is broken!")
def headers_dict(self, headers):
'''
Converts the headers from the response in to a dict.
'''
return dict(headers.values())
def prepare_response(self, status_code, body, headers):
'''
Returns a dict of all the parameters.
'''
return {"status_code": status_code, "body": body, "headers": self.headers_dict(headers)}
def _batch_request(self, method, path, data, headers={}):
'''
Prepares a batch request.
'''
return {"url": path, "method": method, "headers": headers, "body": data}
def make_a_batch_request(self, method, url, body, headers={}):
'''
Makes a batch request using django client.
'''
return self.client.post("/api/v1/batch/", json.dumps([self._batch_request(method, url, body, headers)]),
content_type="application/json")
def make_multiple_batch_request(self, requests):
'''
Makes multiple batch request using django client.
'''
batch_requests = [self._batch_request(method, path, data, headers) for method, path, data, headers in requests]
return self.client.post("/api/v1/batch/", json.dumps(batch_requests),
content_type="application/json")
|
tanwanirahul/django-batch-requests
|
tests/test_base.py
|
Python
|
mit
| 2,053
|
# Copyright 2022 Mark Bolhuis <mark@bolhuis.dev>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from . import ExtensionModule, ModuleReturnValue
from ..build import CustomTarget
from ..interpreter.type_checking import NoneType, in_set_validator
from ..interpreterbase import FeatureNew, typed_pos_args, typed_kwargs, KwargInfo
from ..mesonlib import File, MesonException, MachineChoice
class WaylandModule(ExtensionModule):
@FeatureNew('wayland module', '0.62.0')
def __init__(self, interpreter):
super().__init__(interpreter)
self.protocols_dep = None
self.pkgdatadir = None
self.scanner_bin = None
self.methods.update({
'scan_xml': self.scan_xml,
'find_protocol': self.find_protocol,
})
@typed_pos_args('wayland.scan_xml', varargs=(str, File), min_varargs=1)
@typed_kwargs(
'wayland.scan_xml',
KwargInfo('public', bool, default=False),
KwargInfo('client', bool, default=True),
KwargInfo('server', bool, default=False),
)
def scan_xml(self, state, args, kwargs):
if self.scanner_bin is None:
self.scanner_bin = state.find_program('wayland-scanner', for_machine=MachineChoice.BUILD)
scope = 'public' if kwargs['public'] else 'private'
sides = [i for i in ['client', 'server'] if kwargs[i]]
if not sides:
raise MesonException('At least one of client or server keyword argument must be set to true.')
xml_files = self.interpreter.source_strings_to_files(args[0])
targets = []
for xml_file in xml_files:
name = os.path.splitext(os.path.basename(xml_file.fname))[0]
code = CustomTarget(
f'{name}-protocol',
state.subdir,
state.subproject,
[self.scanner_bin, f'{scope}-code', '@INPUT@', '@OUTPUT@'],
[xml_file],
[f'{name}-protocol.c'],
backend=state.backend,
)
targets.append(code)
for side in sides:
header = CustomTarget(
f'{name}-{side}-protocol',
state.subdir,
state.subproject,
[self.scanner_bin, f'{side}-header', '@INPUT@', '@OUTPUT@'],
[xml_file],
[f'{name}-{side}-protocol.h'],
backend=state.backend,
)
targets.append(header)
return ModuleReturnValue(targets, targets)
@typed_pos_args('wayland.find_protocol', str)
@typed_kwargs(
'wayland.find_protocol',
KwargInfo('state', str, default='stable', validator=in_set_validator({'stable', 'staging', 'unstable'})),
KwargInfo('version', (int, NoneType)),
)
def find_protocol(self, state, args, kwargs):
base_name = args[0]
xml_state = kwargs['state']
version = kwargs['version']
if xml_state != 'stable' and version is None:
raise MesonException(f'{xml_state} protocols require a version number.')
if xml_state == 'stable' and version is not None:
raise MesonException('stable protocols do not require a version number.')
if self.protocols_dep is None:
self.protocols_dep = self.interpreter.func_dependency(state.current_node, ['wayland-protocols'], {})
if self.pkgdatadir is None:
self.pkgdatadir = self.protocols_dep.get_variable(pkgconfig='pkgdatadir', internal='pkgdatadir')
if xml_state == 'stable':
xml_name = f'{base_name}.xml'
elif xml_state == 'staging':
xml_name = f'{base_name}-v{version}.xml'
else:
xml_name = f'{base_name}-unstable-v{version}.xml'
path = os.path.join(self.pkgdatadir, xml_state, base_name, xml_name)
if not os.path.exists(path):
raise MesonException(f'The file {path} does not exist.')
return File.from_absolute_file(path)
def initialize(interpreter):
return WaylandModule(interpreter)
|
mesonbuild/meson
|
mesonbuild/modules/unstable_wayland.py
|
Python
|
apache-2.0
| 4,615
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Matthias Luescher
#
# Authors:
# Matthias Luescher
#
# This file is part of edi.
#
# edi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# edi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with edi. If not, see <http://www.gnu.org/licenses/>.
import os
import edi.lib.helpers
from codecs import open
from edi.lib.sshkeyhelpers import get_user_ssh_pub_keys
from tests.libtesting.helpers import get_command, get_sub_command
from edi.lib import mockablerun
import subprocess
fake_config = """
cow moo
identityfile ~/bingo
identityfile ~/bongo
identityfile ~/baz
myidentityfile ~/blabla
dog bark
"""
def create_file(tmpdir, file_name):
file_path = os.path.join(str(tmpdir), file_name)
with open(file_path, mode='w') as file:
file.write(file_name)
def fake_ssh_environment(monkeypatch, tmpdir):
def fake_which_ssh(*_):
return os.path.join(os.sep, 'usr', 'bin', 'ssh')
monkeypatch.setattr(edi.lib.helpers, 'which', fake_which_ssh)
def intercept_command_run(*popenargs, **kwargs):
if get_command(popenargs) == 'ssh' and get_sub_command(popenargs) == '-G':
return subprocess.CompletedProcess("fakerun", 0, stdout=fake_config)
elif get_command(popenargs) == "getent" and get_sub_command(popenargs) == "passwd":
return subprocess.CompletedProcess("fakerun", 0,
stdout='john:x:1000:1000:John Doe,,,:{}:/bin/bash\n'.format(str(tmpdir)))
else:
return subprocess.run(*popenargs, **kwargs)
monkeypatch.setattr(mockablerun, 'run_mockable', intercept_command_run)
def test_ssh_identity_files_no_ssh(monkeypatch):
def fake_which_no_ssh(*_):
return None
monkeypatch.setattr(edi.lib.helpers, 'which', fake_which_no_ssh)
assert get_user_ssh_pub_keys() == []
def test_ssh_identity_files(monkeypatch, tmpdir):
fake_ssh_environment(monkeypatch, tmpdir)
assert get_user_ssh_pub_keys() == []
create_file(tmpdir, 'blabla')
create_file(tmpdir, 'blabla.pub')
assert get_user_ssh_pub_keys() == []
create_file(tmpdir, 'bongo.pub')
assert get_user_ssh_pub_keys() == []
create_file(tmpdir, 'baz')
assert get_user_ssh_pub_keys() == []
create_file(tmpdir, 'baz.pub')
pub_keys = get_user_ssh_pub_keys()
assert len(pub_keys) == 1
assert str(os.path.join(str(tmpdir), 'baz.pub')) in pub_keys
create_file(tmpdir, 'bongo')
pub_keys = get_user_ssh_pub_keys()
assert len(pub_keys) == 2
assert str(os.path.join(str(tmpdir), 'baz.pub')) in pub_keys
assert str(os.path.join(str(tmpdir), 'bongo.pub')) in pub_keys
|
lueschem/edi
|
tests/lib/test_sshkeyhelpers.py
|
Python
|
lgpl-3.0
| 3,151
|
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return render(request, 'home/home.html')
|
KokareIITP/vms
|
vms/home/views.py
|
Python
|
gpl-2.0
| 139
|
#!/usr/bin/env python
# Copyright (C) 2015 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage Jenkins plugin module registry.
import copy
import logging
import operator
import pkg_resources
import re
import types
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.formatter import deep_format
__all__ = [
"ModuleRegistry"
]
logger = logging.getLogger(__name__)
class MacroRegistry(object):
_component_to_component_list_mapping = {}
_component_list_to_component_mapping = {}
_macros_by_component_type = {}
_macros_by_component_list_type = {}
def __init__(self):
for entrypoint in pkg_resources.iter_entry_points(
group='jenkins_jobs.macros'):
Mod = entrypoint.load()
self._component_list_to_component_mapping[
Mod.component_list_type] = Mod.component_type
self._component_to_component_list_mapping[
Mod.component_type] = Mod.component_list_type
self._macros_by_component_type[
Mod.component_type] = {}
self._macros_by_component_list_type[
Mod.component_list_type] = {}
self._mask_warned = {}
@property
def _nonempty_component_list_types(self):
return [clt for clt in self._macros_by_component_list_type
if len(self._macros_by_component_list_type[clt]) != 0]
@property
def component_types(self):
return self._macros_by_component_type.keys()
def _is_macro(self, component_name, component_list_type):
return (component_name in
self._macros_by_component_list_type[component_list_type])
def register(self, component_type, macro):
macro_name = macro["name"]
clt = self._component_to_component_list_mapping[component_type]
self._macros_by_component_type[component_type][macro_name] = macro
self._macros_by_component_list_type[clt][macro_name] = macro
def expand_macros(self, jobish, template_data=None):
"""Create a copy of the given job-like thing, expand macros in place on
the copy, and return that object to calling context.
:arg dict jobish: A job-like JJB data structure. Could be anything that
might provide JJB "components" that get expanded to XML configuration.
This includes "job", "job-template", and "default" DSL items. This
argument is not modified in place, but rather copied so that the copy
may be returned to calling context.
:arg dict template_data: If jobish is a job-template, use the same
template data used to fill in job-template variables to fill in macro
variables.
"""
for component_list_type in self._nonempty_component_list_types:
self._expand_macros_for_component_list_type(
jobish, component_list_type, template_data)
def _expand_macros_for_component_list_type(self,
jobish,
component_list_type,
template_data=None):
"""In-place expansion of macros on jobish.
:arg dict jobish: A job-like JJB data structure. Could be anything that
might provide JJB "components" that get expanded to XML configuration.
This includes "job", "job-template", and "default" DSL items. This
argument is not modified in place, but rather copied so that the copy
may be returned to calling context.
:arg str component_list_type: A string value indicating which type of
component we are expanding macros for.
:arg dict template_data: If jobish is a job-template, use the same
template data used to fill in job-template variables to fill in macro
variables.
"""
if (jobish.get("project-type", None) == "pipeline"
and component_list_type == "scm"):
# Pipeline projects have an atypical scm type, eg:
#
# - job:
# name: whatever
# project-type: pipeline
# pipeline-scm:
# script-path: nonstandard-scriptpath.groovy
# scm:
# - macro_name
#
# as opposed to the more typical:
#
# - job:
# name: whatever2
# scm:
# - macro_name
#
# So we treat that case specially here.
component_list = jobish.get("pipeline-scm", {}).get("scm", [])
else:
component_list = jobish.get(component_list_type, [])
component_substitutions = []
for component in component_list:
macro_component_list = self._maybe_expand_macro(
component, component_list_type, template_data)
if macro_component_list is not None:
# Since macros can contain other macros, we need to recurse
# into the newly-expanded macro component list to expand any
# macros that might be hiding in there. In order to do this we
# have to make the macro component list look like a job by
# embedding it in a dictionary like so.
self._expand_macros_for_component_list_type(
{component_list_type: macro_component_list},
component_list_type,
template_data)
component_substitutions.append(
(component, macro_component_list))
for component, macro_component_list in component_substitutions:
component_index = component_list.index(component)
component_list.remove(component)
i = 0
for macro_component in macro_component_list:
component_list.insert(component_index + i, macro_component)
i += 1
def _maybe_expand_macro(self,
component,
component_list_type,
template_data=None):
"""For a given component, if it refers to a macro, return the
components defined for that macro with template variables (if any)
interpolated in.
:arg str component_list_type: A string value indicating which type of
component we are expanding macros for.
:arg dict template_data: If component is a macro and contains template
variables, use the same template data used to fill in job-template
variables to fill in macro variables.
"""
component_copy = copy.deepcopy(component)
if isinstance(component, dict):
# The component is a singleton dictionary of name:
# dict(args)
component_name, component_data = next(iter(component_copy.items()))
else:
# The component is a simple string name, eg "run-tests".
component_name, component_data = component_copy, None
if template_data:
# Address the case where a macro name contains a variable to be
# interpolated by template variables.
component_name = deep_format(component_name, template_data, True)
# Check that the component under consideration actually is a
# macro.
if not self._is_macro(component_name, component_list_type):
return None
# Warn if the macro shadows an actual module type name for this
# component list type.
if ModuleRegistry.is_module_name(component_name, component_list_type):
self._mask_warned[component_name] = True
logger.warning(
"You have a macro ('%s') defined for '%s' "
"component list type that is masking an inbuilt "
"definition" % (component_name, component_list_type))
macro_component_list = self._get_macro_components(component_name,
component_list_type)
# If macro instance contains component_data, interpolate that
# into macro components.
if component_data:
# Also use template_data, but prefer data obtained directly from
# the macro instance.
if template_data:
template_data = copy.deepcopy(template_data)
template_data.update(component_data)
macro_component_list = deep_format(
macro_component_list, template_data, False)
else:
macro_component_list = deep_format(
macro_component_list, component_data, False)
return macro_component_list
def _get_macro_components(self, macro_name, component_list_type):
"""Return the list of components that a macro expands into. For example:
- wrapper:
name: timeout-wrapper
wrappers:
- timeout:
fail: true
elastic-percentage: 150
elastic-default-timeout: 90
type: elastic
Provides a single "wrapper" type (corresponding to the "wrappers" list
type) component named "timeout" with the values shown above.
The macro_name argument in this case would be "timeout-wrapper".
"""
macro_component_list = self._macros_by_component_list_type[
component_list_type][macro_name][component_list_type]
return copy.deepcopy(macro_component_list)
class ModuleRegistry(object):
_entry_points_cache = {}
def __init__(self, jjb_config, plugins_list=None):
self.modules = []
self.modules_by_component_type = {}
self.handlers = {}
self.jjb_config = jjb_config
self.masked_warned = {}
if plugins_list is None:
self.plugins_dict = {}
else:
self.plugins_dict = self._get_plugins_info_dict(plugins_list)
for entrypoint in pkg_resources.iter_entry_points(
group='jenkins_jobs.modules'):
Mod = entrypoint.load()
mod = Mod(self)
self.modules.append(mod)
self.modules.sort(key=operator.attrgetter('sequence'))
if mod.component_type is not None:
self.modules_by_component_type[mod.component_type] = entrypoint
@staticmethod
def _get_plugins_info_dict(plugins_list):
def mutate_plugin_info(plugin_info):
"""
We perform mutations on a single member of plugin_info here, then
return a dictionary with the longName and shortName of the plugin
mapped to its plugin info dictionary.
"""
version = plugin_info.get('version', '0')
plugin_info['version'] = re.sub(r'(.*)-(?:SNAPSHOT|BETA).*',
r'\g<1>.preview', version)
aliases = []
for key in ['longName', 'shortName']:
value = plugin_info.get(key, None)
if value is not None:
aliases.append(value)
plugin_info_dict = {}
for name in aliases:
plugin_info_dict[name] = plugin_info
return plugin_info_dict
list_of_dicts = [mutate_plugin_info(v) for v in plugins_list]
plugins_info_dict = {}
for d in list_of_dicts:
plugins_info_dict.update(d)
return plugins_info_dict
def get_plugin_info(self, plugin_name):
""" This method is intended to provide information about plugins within
a given module's implementation of Base.gen_xml. The return value is a
dictionary with data obtained directly from a running Jenkins instance.
This allows module authors to differentiate generated XML output based
on information such as specific plugin versions.
:arg string plugin_name: Either the shortName or longName of a plugin
as see in a query that looks like:
``http://<jenkins-hostname>/pluginManager/api/json?pretty&depth=2``
During a 'test' run, it is possible to override JJB's query to a live
Jenkins instance by passing it a path to a file containing a YAML list
of dictionaries that mimics the plugin properties you want your test
output to reflect::
jenkins-jobs test -p /path/to/plugins-info.yaml
Below is example YAML that might be included in
/path/to/plugins-info.yaml.
.. literalinclude:: /../../tests/cmd/fixtures/plugins-info.yaml
"""
return self.plugins_dict.get(plugin_name, {})
def registerHandler(self, category, name, method):
cat_dict = self.handlers.get(category, {})
if not cat_dict:
self.handlers[category] = cat_dict
cat_dict[name] = method
def getHandler(self, category, name):
return self.handlers[category][name]
@property
def parser_data(self):
return self.__parser_data
def set_parser_data(self, parser_data):
self.__parser_data = parser_data
def dispatch(self, component_type, xml_parent, component):
"""This is a method that you can call from your implementation of
Base.gen_xml or component. It allows modules to define a type
of component, and benefit from extensibility via Python
entry points and Jenkins Job Builder :ref:`Macros <macro>`.
:arg string component_type: the name of the component
(e.g., `builder`)
:arg YAMLParser parser: the global YAML Parser
:arg Element xml_parent: the parent XML element
See :py:class:`jenkins_jobs.modules.base.Base` for how to register
components of a module.
See the Publishers module for a simple example of how to use
this method.
"""
if component_type not in self.modules_by_component_type:
raise JenkinsJobsException("Unknown component type: "
"'{0}'.".format(component_type))
entry_point = self.modules_by_component_type[component_type]
component_list_type = entry_point.load().component_list_type
if isinstance(component, dict):
# The component is a singleton dictionary of name: dict(args)
name, component_data = next(iter(component.items()))
else:
# The component is a simple string name, eg "run-tests"
name = component
component_data = {}
# Look for a component function defined in an entry point
eps = self._entry_points_cache.get(component_list_type)
if eps is None:
module_eps = []
# auto build entry points by inferring from base component_types
mod = pkg_resources.EntryPoint(
"__all__", entry_point.module_name, dist=entry_point.dist)
Mod = mod.load()
func_eps = [Mod.__dict__.get(a) for a in dir(Mod)
if isinstance(Mod.__dict__.get(a),
types.FunctionType)]
for func_ep in func_eps:
try:
# extract entry point based on docstring
name_line = func_ep.__doc__.split('\n')
if not name_line[0].startswith('yaml:'):
logger.debug("Ignoring '%s' as an entry point" %
name_line)
continue
ep_name = name_line[0].split(' ')[1]
except (AttributeError, IndexError):
# AttributeError by docstring not being defined as
# a string to have split called on it.
# IndexError raised by name_line not containing anything
# after the 'yaml:' string.
logger.debug("Not including func '%s' as an entry point"
% func_ep.__name__)
continue
module_eps.append(
pkg_resources.EntryPoint(
ep_name, entry_point.module_name,
dist=entry_point.dist, attrs=(func_ep.__name__,)))
logger.debug(
"Adding auto EP '%s=%s:%s'" %
(ep_name, entry_point.module_name, func_ep.__name__))
# load from explicitly defined entry points
module_eps.extend(list(pkg_resources.iter_entry_points(
group='jenkins_jobs.{0}'.format(component_list_type))))
eps = {}
for module_ep in module_eps:
if module_ep.name in eps:
raise JenkinsJobsException(
"Duplicate entry point found for component type: "
"'{0}', '{0}',"
"name: '{1}'".format(component_type, name))
eps[module_ep.name] = module_ep
# cache both sets of entry points
self._entry_points_cache[component_list_type] = eps
logger.debug("Cached entry point group %s = %s",
component_list_type, eps)
if name in eps:
func = eps[name].load()
func(self, xml_parent, component_data)
else:
raise JenkinsJobsException("Unknown entry point or macro '{0}' "
"for component type: '{1}'.".
format(name, component_type))
@classmethod
def is_module_name(self, name, component_list_type):
eps = self._entry_points_cache.get(component_list_type)
if not eps:
return False
return (name in eps)
|
phinexus/jenkins-job-builder
|
jenkins_jobs/registry.py
|
Python
|
apache-2.0
| 18,370
|
#!/usr/bin/python3
from gi.repository import Gtk as gtk
from armadito import jrpc
import pprint
import sys
conn = jrpc.Connection('/tmp/.armadito-daemon')
conn.connect()
def on_status_received(result):
pprint.pprint(vars(result))
pprint.pprint(vars(result.module_infos[0]))
for b in result.module_infos[0].base_infos:
pprint.pprint(vars(b))
#sys.exit(0)
conn.call('status', callback = on_status_received)
def notify_event(ev):
pprint.pprint(vars(ev))
pprint.pprint(vars(ev.u))
if ev.type == 'EVENT_DETECTION':
pprint.pprint(vars(ev.u.ev_detection))
conn.map('notify_event', notify_event)
class ScanArgument(object):
def __init__(self, path):
self.root_path = path
self.send_progress = 1
conn.call('scan', ScanArgument(sys.argv[1]))
gtk.main()
|
armadito/armadito-systray-ui
|
python/tests/test-jrpc.py
|
Python
|
gpl-3.0
| 821
|
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
dimension = 2
sampleSize = 1000
# we create an analytical function
analytical = NumericalMathFunction(['x0', 'x1'], ['f'], ['10+3*x0+x1'])
# we create a collection of uniform distributions over [-Pi Pi[
aCollection = DistributionCollection()
for i in range(dimension):
aCollection.add(Distribution(Normal()))
# we create one distribution object
aDistribution = ComposedDistribution(
aCollection, Copula(IndependentCopula(dimension)))
randomVector = RandomVector(aDistribution)
composite = RandomVector(analytical, randomVector)
print("composite=", composite)
# we create two input samples for the function
inputSample = randomVector.getSample(sampleSize)
outputSample = analytical(inputSample)
src = CorrelationAnalysis.SRC(inputSample, outputSample)
print("src=", src)
except:
import sys
print("t_CorrelationAnalysis_sobol.py",
sys.exc_info()[0], sys.exc_info()[1])
|
dubourg/openturns
|
python/test/t_CorrelationAnalysis_std.py
|
Python
|
gpl-3.0
| 1,110
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameter values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numbers
import re
import six
from tensorflow.contrib.training.python.training import hparam_pb2
from tensorflow.python.framework import ops
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
# Define the regular expression for parsing a single clause of the input
# (delimited by commas). A legal clause looks like:
# <variable name>[<index>]? = <rhs>
# where <rhs> is either a single token or [] enclosed list of tokens.
# For example: "var[1] = a" or "x = [1,2,3]"
PARAM_RE = re.compile(r"""
(?P<name>[a-zA-Z][\w\.]*) # variable name: "var" or "x"
(\[\s*(?P<index>\d+)\s*\])? # (optional) index: "1" or None
\s*=\s*
((?P<val>[^,\[]*) # single value: "a" or None
|
\[(?P<vals>[^\]]*)\]) # list of values: None or "1,2,3"
($|,\s*)""", re.VERBOSE)
def _parse_fail(name, var_type, value, values):
"""Helper function for raising a value error for bad assignment."""
raise ValueError(
'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' %
(name, var_type.__name__, value, values))
def _reuse_fail(name, values):
"""Helper function for raising a value error for reuse of name."""
raise ValueError('Multiple assignments to variable \'%s\' in %s' % (name,
values))
def _process_scalar_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary with a scalar value.
Used to update the results_dictionary to be returned by parse_values when
encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("s" or "arr").
parse_fn: Function for parsing the actual value.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
m_dict['index']: List index value (or None)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has already been used.
"""
try:
parsed_value = parse_fn(m_dict['val'])
except ValueError:
_parse_fail(name, var_type, m_dict['val'], values)
# If no index is provided
if not m_dict['index']:
if name in results_dictionary:
_reuse_fail(name, values)
results_dictionary[name] = parsed_value
else:
if name in results_dictionary:
# The name has already been used as a scalar, then it
# will be in this dictionary and map to a non-dictionary.
if not isinstance(results_dictionary.get(name), dict):
_reuse_fail(name, values)
else:
results_dictionary[name] = {}
index = int(m_dict['index'])
# Make sure the index position hasn't already been assigned a value.
if index in results_dictionary[name]:
_reuse_fail('{}[{}]'.format(name, index), values)
results_dictionary[name][index] = parsed_value
def _process_list_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary from a list of values.
Used to update results_dictionary to be returned by parse_values when
encountering a clause with a list RHS (e.g. "arr=[1,2,3]".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("arr").
parse_fn: Function for parsing individual values.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has an index or the values cannot be parsed.
"""
if m_dict['index'] is not None:
raise ValueError('Assignment of a list to a list index.')
elements = filter(None, re.split('[ ,]', m_dict['vals']))
# Make sure the name hasn't already been assigned a value
if name in results_dictionary:
raise _reuse_fail(name, values)
try:
results_dictionary[name] = [parse_fn(e) for e in elements]
except ValueError:
_parse_fail(name, var_type, m_dict['vals'], values)
def _cast_to_type_if_compatible(name, param_type, value):
"""Cast hparam to the provided type, if compatible.
Args:
name: Name of the hparam to be cast.
param_type: The type of the hparam.
value: The value to be cast, if compatible.
Returns:
The result of casting `value` to `param_type`.
Raises:
ValueError: If the type of `value` is not compatible with param_type.
* If `param_type` is a string type, but `value` is not.
* If `param_type` is a boolean, but `value` is not, or vice versa.
* If `param_type` is an integer type, but `value` is not.
* If `param_type` is a float type, but `value` is not a numeric type.
"""
fail_msg = (
"Could not cast hparam '%s' of type '%s' from value %r" %
(name, param_type, value))
# Some callers use None, for which we can't do any casting/checking. :(
if issubclass(param_type, type(None)):
return value
# Avoid converting a non-string type to a string.
if (issubclass(param_type, (six.string_types, six.binary_type)) and
not isinstance(value, (six.string_types, six.binary_type))):
raise ValueError(fail_msg)
# Avoid converting a number or string type to a boolean or vice versa.
if issubclass(param_type, bool) != isinstance(value, bool):
raise ValueError(fail_msg)
# Avoid converting float to an integer (the reverse is fine).
if (issubclass(param_type, numbers.Integral) and
not isinstance(value, numbers.Integral)):
raise ValueError(fail_msg)
# Avoid converting a non-numeric type to a numeric type.
if (issubclass(param_type, numbers.Number) and
not isinstance(value, numbers.Number)):
raise ValueError(fail_msg)
return param_type(value)
def parse_values(values, type_map, ignore_unknown=False):
"""Parses hyperparameter values from a string into a python map.
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multiple times in `values`, a ValueError
is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2').
If a hyperparameter name in both an index assignment and scalar assignment,
a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1').
The hyperparameter name may contain '.' symbols, which will result in an
attribute name that is only accessible through the getattr and setattr
functions. (And must be first explicit added through add_hparam.)
WARNING: Use of '.' in your variable names is allowed, but is not well
supported and not recommended.
The `value` in `name=value` must follows the syntax according to the
type of the parameter:
* Scalar integer: A Python-parsable integer point value. E.g.: 1,
100, -12.
* Scalar float: A Python-parsable floating point value. E.g.: 1.0,
-.54e89.
* Boolean: Either true or false.
* Scalar string: A non-empty sequence of characters, excluding comma,
spaces, and square brackets. E.g.: foo, bar_1.
* List: A comma separated list of scalar values of the parameter type
enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low].
When index assignment is used, the corresponding type_map key should be the
list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not
"arr[1]").
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
type_map: A dictionary mapping hyperparameter names to types. Note every
parameter name in values must be a key in type_map. The values must
conform to the types indicated, where a value V is said to conform to a
type T if either V has type T, or V is a list of elements of type T.
Hence, for a multidimensional parameter 'x' taking float values,
'x=[0.1,0.2]' will parse successfully if type_map['x'] = float.
ignore_unknown: Bool. Whether values that are missing a type in type_map
should be ignored. If set to True, a ValueError will not be raised for
unknown hyperparameter type.
Returns:
A python map mapping each name to either:
* A scalar value.
* A list of scalar values.
* A dictionary mapping index numbers to scalar values.
(e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}")
Raises:
ValueError: If there is a problem with input.
* If `values` cannot be parsed.
* If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]').
* If the same rvalue is assigned two different values (e.g. 'a=1,a=2',
'a[1]=1,a[1]=2', or 'a=1,a=[1]')
"""
results_dictionary = {}
pos = 0
while pos < len(values):
m = PARAM_RE.match(values, pos)
if not m:
raise ValueError('Malformed hyperparameter value: %s' % values[pos:])
# Check that there is a comma between parameters and move past it.
pos = m.end()
# Parse the values.
m_dict = m.groupdict()
name = m_dict['name']
if name not in type_map:
if ignore_unknown:
continue
raise ValueError('Unknown hyperparameter type for %s' % name)
type_ = type_map[name]
# Set up correct parsing function (depending on whether type_ is a bool)
if type_ == bool:
def parse_bool(value):
if value in ['true', 'True']:
return True
elif value in ['false', 'False']:
return False
else:
try:
return bool(int(value))
except ValueError:
_parse_fail(name, type_, value, values)
parse = parse_bool
else:
parse = type_
# If a singe value is provided
if m_dict['val'] is not None:
_process_scalar_value(name, parse, type_, m_dict, values,
results_dictionary)
# If the assigned value is a list:
elif m_dict['vals'] is not None:
_process_list_value(name, parse, type_, m_dict, values,
results_dictionary)
else: # Not assigned a list or value
_parse_fail(name, type_, '', values)
return results_dictionary
class HParams(object):
"""Class to hold a set of hyperparameters as name-value pairs.
A `HParams` object holds hyperparameters used to build and train a model,
such as the number of hidden units in a neural net layer or the learning rate
to use when training.
You first create a `HParams` object by specifying the names and values of the
hyperparameters.
To make them easily accessible the parameter names are added as direct
attributes of the class. A typical usage is as follows:
```python
# Create a HParams object specifying names and values of the model
# hyperparameters:
hparams = HParams(learning_rate=0.1, num_hidden_units=100)
# The hyperparameter are available as attributes of the HParams object:
hparams.learning_rate ==> 0.1
hparams.num_hidden_units ==> 100
```
Hyperparameters have type, which is inferred from the type of their value
passed at construction type. The currently supported types are: integer,
float, boolean, string, and list of integer, float, boolean, or string.
You can override hyperparameter values by calling the
[`parse()`](#HParams.parse) method, passing a string of comma separated
`name=value` pairs. This is intended to make it possible to override
any hyperparameter values from a single command-line flag to which
the user passes 'hyper-param=value' pairs. It avoids having to define
one flag for each hyperparameter.
The syntax expected for each value depends on the type of the parameter.
See `parse()` for a description of the syntax.
Example:
```python
# Define a command line flag to pass name=value pairs.
# For example using argparse:
import argparse
parser = argparse.ArgumentParser(description='Train my model.')
parser.add_argument('--hparams', type=str,
help='Comma separated list of "name=value" pairs.')
args = parser.parse_args()
...
def my_program():
# Create a HParams object specifying the names and values of the
# model hyperparameters:
hparams = tf.HParams(learning_rate=0.1, num_hidden_units=100,
activations=['relu', 'tanh'])
# Override hyperparameters values by parsing the command line
hparams.parse(args.hparams)
# If the user passed `--hparams=learning_rate=0.3` on the command line
# then 'hparams' has the following attributes:
hparams.learning_rate ==> 0.3
hparams.num_hidden_units ==> 100
hparams.activations ==> ['relu', 'tanh']
# If the hyperparameters are in json format use parse_json:
hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}')
```
"""
_HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks.
def __init__(self, hparam_def=None, model_structure=None, **kwargs):
"""Create an instance of `HParams` from keyword arguments.
The keyword arguments specify name-values pairs for the hyperparameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `HParams` object, so they
can be accessed directly with the dot notation `hparams._name_`.
Example:
```python
# Define 3 hyperparameters: 'learning_rate' is a float parameter,
# 'num_hidden_units' an integer parameter, and 'activation' a string
# parameter.
hparams = tf.HParams(
learning_rate=0.1, num_hidden_units=100, activation='relu')
hparams.activation ==> 'relu'
```
Note that a few names are reserved and cannot be used as hyperparameter
names. If you use one of the reserved name the constructor raises a
`ValueError`.
Args:
hparam_def: Serialized hyperparameters, encoded as a hparam_pb2.HParamDef
protocol buffer. If provided, this object is initialized by
deserializing hparam_def. Otherwise **kwargs is used.
model_structure: An instance of ModelStructure, defining the feature
crosses to be used in the Trial.
**kwargs: Key-value pairs where the key is the hyperparameter name and
the value is the value for the parameter.
Raises:
ValueError: If both `hparam_def` and initialization values are provided,
or if one of the arguments is invalid.
"""
# Register the hyperparameters and their type in _hparam_types.
# This simplifies the implementation of parse().
# _hparam_types maps the parameter name to a tuple (type, bool).
# The type value is the type of the parameter for scalar hyperparameters,
# or the type of the list elements for multidimensional hyperparameters.
# The bool value is True if the value is a list, False otherwise.
self._hparam_types = {}
self._model_structure = model_structure
if hparam_def:
self._init_from_proto(hparam_def)
if kwargs:
raise ValueError('hparam_def and initialization values are '
'mutually exclusive')
else:
for name, value in six.iteritems(kwargs):
self.add_hparam(name, value)
def _init_from_proto(self, hparam_def):
"""Creates a new HParams from `HParamDef` protocol buffer.
Args:
hparam_def: `HParamDef` protocol buffer.
"""
assert isinstance(hparam_def, hparam_pb2.HParamDef)
for name, value in hparam_def.hparam.items():
kind = value.WhichOneof('kind')
if kind.endswith('_value'):
# Single value.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, int(getattr(value, kind)))
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(name, compat.as_str(getattr(value, kind)))
else:
self.add_hparam(name, getattr(value, kind))
else:
# List of values.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, [int(v) for v in getattr(value, kind).value])
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(
name, [compat.as_str(v) for v in getattr(value, kind).value])
else:
self.add_hparam(name, [v for v in getattr(value, kind).value])
def add_hparam(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# hyperparameter name.
if getattr(self, name, None) is not None:
raise ValueError('Hyperparameter name is reserved: %s' % name)
if isinstance(value, (list, tuple)):
if not value:
raise ValueError(
'Multi-valued hyperparameters cannot be empty: %s' % name)
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
def set_hparam(self, name, value):
"""Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError: If the hyperparameter doesn't exist.
ValueError: If there is a type mismatch.
"""
param_type, is_list = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError(
'Must not pass a list for single-valued parameter: %s' % name)
setattr(self, name, [
_cast_to_type_if_compatible(name, param_type, v) for v in value])
else:
if is_list:
raise ValueError(
'Must pass a list for multi-valued parameter: %s.' % name)
setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
def del_hparam(self, name):
"""Removes the hyperparameter with key 'name'.
Does nothing if it isn't present.
Args:
name: Name of the hyperparameter.
"""
if hasattr(self, name):
delattr(self, name)
del self._hparam_types[name]
def parse(self, values):
"""Override existing hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where 'value'
must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed or a hyperparameter in `values`
doesn't exist.
"""
type_map = dict()
for name, t in self._hparam_types.items():
param_type, _ = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self.override_from_dict(values_map)
def override_from_dict(self, values_dict):
"""Override existing hyperparameter values, parsing new values from a dictionary.
Args:
values_dict: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_dict` doesn't exist.
ValueError: If `values_dict` cannot be parsed.
"""
for name, value in values_dict.items():
self.set_hparam(name, value)
return self
@deprecation.deprecated(None, 'Use `override_from_dict`.')
def set_from_map(self, values_map):
"""DEPRECATED. Use override_from_dict."""
return self.override_from_dict(values_dict=values_map)
def set_model_structure(self, model_structure):
self._model_structure = model_structure
def get_model_structure(self):
return self._model_structure
def to_json(self, indent=None, separators=None, sort_keys=False):
"""Serializes the hyperparameters into JSON.
Args:
indent: If a non-negative integer, JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, or
negative, will only insert newlines. `None` (the default) selects the
most compact representation.
separators: Optional `(item_separator, key_separator)` tuple. Default is
`(', ', ': ')`.
sort_keys: If `True`, the output dictionaries will be sorted by key.
Returns:
A JSON string.
"""
return json.dumps(
self.values(),
indent=indent,
separators=separators,
sort_keys=sort_keys)
def parse_json(self, values_json):
"""Override existing hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_json` doesn't exist.
ValueError: If `values_json` cannot be parsed.
"""
values_map = json.loads(values_json)
return self.override_from_dict(values_map)
def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
"""
return {n: getattr(self, n) for n in self._hparam_types.keys()}
def get(self, key, default=None):
"""Returns the value of `key` if it exists, else `default`."""
if key in self._hparam_types:
# Ensure that default is compatible with the parameter type.
if default is not None:
param_type, is_param_list = self._hparam_types[key]
type_str = 'list<%s>' % param_type if is_param_list else str(param_type)
fail_msg = ("Hparam '%s' of type '%s' is incompatible with "
'default=%s' % (key, type_str, default))
is_default_list = isinstance(default, list)
if is_param_list != is_default_list:
raise ValueError(fail_msg)
try:
if is_default_list:
for value in default:
_cast_to_type_if_compatible(key, param_type, value)
else:
_cast_to_type_if_compatible(key, param_type, default)
except ValueError as e:
raise ValueError('%s. %s' % (fail_msg, e))
return getattr(self, key)
return default
def __contains__(self, key):
return key in self._hparam_types
def __str__(self):
return str(sorted(self.values().items()))
def __repr__(self):
return '%s(%s)' % (type(self).__name__, self.__str__())
@staticmethod
def _get_kind_name(param_type, is_list):
"""Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized.
"""
if issubclass(param_type, bool):
# This check must happen before issubclass(param_type, six.integer_types),
# since Python considers bool to be a subclass of int.
typename = 'bool'
elif issubclass(param_type, six.integer_types):
# Setting 'int' and 'long' types to be 'int64' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
# Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported parameter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix])
def to_proto(self, export_scope=None): # pylint: disable=unused-argument
"""Converts a `HParams` object to a `HParamDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `HParamDef` protocol buffer.
"""
hparam_proto = hparam_pb2.HParamDef()
for name in self._hparam_types:
# Parse the values.
param_type, is_list = self._hparam_types.get(name, (None, None))
kind = HParams._get_kind_name(param_type, is_list)
if is_list:
if kind.startswith('bytes'):
v_list = [compat.as_bytes(v) for v in getattr(self, name)]
else:
v_list = [v for v in getattr(self, name)]
getattr(hparam_proto.hparam[name], kind).value.extend(v_list)
else:
v = getattr(self, name)
if kind.startswith('bytes'):
v = compat.as_bytes(getattr(self, name))
setattr(hparam_proto.hparam[name], kind, v)
return hparam_proto
@staticmethod
def from_proto(hparam_def, import_scope=None): # pylint: disable=unused-argument
return HParams(hparam_def=hparam_def)
ops.register_proto_function(
'hparams',
proto_type=hparam_pb2.HParamDef,
to_proto=HParams.to_proto,
from_proto=HParams.from_proto)
|
jendap/tensorflow
|
tensorflow/contrib/training/python/training/hparam.py
|
Python
|
apache-2.0
| 26,772
|
#!/usr/bin/python
# -*- coding: ISO-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from six.moves import configparser
from six.moves import input
import getopt
import logging
import sys
from pycsw.core import admin, config
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
CONTEXT = config.StaticContext()
def usage():
"""Provide usage instructions"""
return '''
NAME
pycsw-admin.py - pycsw admin utility
SYNOPSIS
pycsw-admin.py -c <command> -f <cfg> [-h] [-p /path/to/records] [-r]
Available options:
-c Command to be performed:
- setup_db
- load_records
- export_records
- rebuild_db_indexes
- optimize_db
- refresh_harvested_records
- gen_sitemap
- post_xml
- get_sysprof
- validate_xml
- delete_records
-f Filepath to pycsw configuration
-h Usage message
-o path to output file
-p path to input/output directory or file to read/write metadata records
-r load records from directory recursively
-s XML Schema
-t Timeout (in seconds) for HTTP requests (default is 30)
-u URL of CSW
-x XML document
-y force confirmation
EXAMPLES
1.) setup_db: Creates repository tables and indexes
pycsw-admin.py -c setup_db -f default.cfg
2.) load_records: Loads metadata records from directory or file into repository
pycsw-admin.py -c load_records -p /path/to/records -f default.cfg
Load records from directory recursively
pycsw-admin.py -c load_records -p /path/to/records -f default.cfg -r
Load records from directory and force updates
pycsw-admin.py -c load_records -p /path/to/records -f default.cfg -y
Load metadata record from file into repository
pycsw-admin.py -c load_records -p /path/to/file.xml -f default.cfg
3.) export_records: Dump metadata records from repository into directory
pycsw-admin.py -c export_records -p /path/to/records -f default.cfg
4.) rebuild_db_indexes: Rebuild repository database indexes
pycsw-admin.py -c rebuild_db_indexes -f default.cfg
5.) optimize_db: Optimize repository database
pycsw-admin.py -c optimize_db -f default.cfg
6.) refresh_harvested_records: Refresh repository records
which have been harvested
pycsw-admin.py -c refresh_harvested_records -f default.cfg
7.) gen_sitemap: Generate XML Sitemap
pycsw-admin.py -c gen_sitemap -f default.cfg -o /path/to/sitemap.xml
8.) post_xml: Execute a CSW request via HTTP POST
pycsw-admin.py -c post_xml -u http://host/csw -x /path/to/request.xml
9.) get_sysprof: Get versions of dependencies
pycsw-admin.py -c get_sysprof
10.) validate_xml: Validate an XML document against an XML Schema
pycsw-admin.py -c validate_xml -x file.xml -s file.xsd
11.) delete_records: Deletes all records from repository
pycsw-admin.py -c delete_records -f default.cfg
12.) delete_records: Deletes all records from repository without prompting
pycsw-admin.py -c delete_records -f default.cfg -y
'''
COMMAND = None
XML_DIRPATH = None
CFG = None
RECURSIVE = False
OUTPUT_FILE = None
CSW_URL = None
XML = None
XSD = None
TIMEOUT = 30
FORCE_CONFIRM = False
if len(sys.argv) == 1:
print(usage())
sys.exit(1)
try:
OPTS, ARGS = getopt.getopt(sys.argv[1:], 'c:f:ho:p:ru:x:s:t:y')
except getopt.GetoptError as err:
print('\nERROR: %s' % err)
print(usage())
sys.exit(2)
for o, a in OPTS:
if o == '-c':
COMMAND = a
if o == '-f':
CFG = a
if o == '-o':
OUTPUT_FILE = a
if o == '-p':
XML_DIRPATH = a
if o == '-r':
RECURSIVE = True
if o == '-u':
CSW_URL = a
if o == '-x':
XML = a
if o == '-s':
XSD = a
if o == '-t':
TIMEOUT = int(a)
if o == '-h': # dump help and exit
print(usage())
sys.exit(3)
if o == '-y':
FORCE_CONFIRM = True
if COMMAND is None:
print('-c <command> is a required argument')
sys.exit(4)
if COMMAND not in ['setup_db', 'load_records', 'export_records',
'rebuild_db_indexes', 'optimize_db',
'refresh_harvested_records', 'gen_sitemap',
'post_xml', 'get_sysprof',
'validate_xml', 'delete_records']:
print('ERROR: invalid command name: %s' % COMMAND)
sys.exit(5)
if CFG is None and COMMAND not in ['post_xml', 'get_sysprof', 'validate_xml']:
print('ERROR: -f <cfg> is a required argument')
sys.exit(6)
if COMMAND in ['load_records', 'export_records'] and XML_DIRPATH is None:
print('ERROR: -p </path/to/records> is a required argument')
sys.exit(7)
if COMMAND == 'gen_sitemap' and OUTPUT_FILE is None:
print('ERROR: -o </path/to/sitemap.xml> is a required argument')
sys.exit(8)
if COMMAND not in ['post_xml', 'get_sysprof', 'validate_xml']:
SCP = configparser.SafeConfigParser()
with open(CFG) as f:
SCP.readfp(f)
DATABASE = SCP.get('repository', 'database')
URL = SCP.get('server', 'url')
HOME = SCP.get('server', 'home')
METADATA = dict(SCP.items('metadata:main'))
try:
TABLE = SCP.get('repository', 'table')
except configparser.NoOptionError:
TABLE = 'records'
elif COMMAND not in ['get_sysprof', 'validate_xml']:
if CSW_URL is None:
print('ERROR: -u <http://host/csw> is a required argument')
sys.exit(9)
if XML is None:
print('ERROR: -x /path/to/request.xml is a required argument')
sys.exit(10)
elif COMMAND == 'validate_xml':
if XML is None:
print('ERROR: -x /path/to/file.xml is a required argument')
sys.exit(11)
if XSD is None:
print('ERROR: -s /path/to/file.xsd is a required argument')
sys.exit(12)
if COMMAND == 'setup_db':
try:
admin.setup_db(DATABASE, TABLE, HOME)
except Exception as err:
print(err)
print('ERROR: DB creation error. Database tables already exist')
print('Delete tables or database to reinitialize')
elif COMMAND == 'load_records':
admin.load_records(CONTEXT, DATABASE, TABLE, XML_DIRPATH, RECURSIVE, FORCE_CONFIRM)
elif COMMAND == 'export_records':
admin.export_records(CONTEXT, DATABASE, TABLE, XML_DIRPATH)
elif COMMAND == 'rebuild_db_indexes':
admin.rebuild_db_indexes(DATABASE, TABLE)
elif COMMAND == 'optimize_db':
admin.optimize_db(CONTEXT, DATABASE, TABLE)
elif COMMAND == 'refresh_harvested_records':
admin.refresh_harvested_records(CONTEXT, DATABASE, TABLE, URL)
elif COMMAND == 'gen_sitemap':
admin.gen_sitemap(CONTEXT, DATABASE, TABLE, URL, OUTPUT_FILE)
elif COMMAND == 'post_xml':
print(admin.post_xml(CSW_URL, XML, TIMEOUT))
elif COMMAND == 'get_sysprof':
print(admin.get_sysprof())
elif COMMAND == 'validate_xml':
admin.validate_xml(XML, XSD)
elif COMMAND == 'delete_records':
if not FORCE_CONFIRM:
if input('This will delete all records! Continue? [Y/n] ') == 'Y':
FORCE_CONFIRM = True
if FORCE_CONFIRM:
admin.delete_records(CONTEXT, DATABASE, TABLE)
print('Done')
|
ocefpaf/pycsw
|
bin/pycsw-admin.py
|
Python
|
mit
| 8,634
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------
#
# fTools
# Copyright (C) 2008-2011 Carson Farmer
# EMAIL: carson.farmer (at) gmail.com
# WEB : http://www.ftools.ca/fTools.html
#
# A collection of data management and analysis tools for vector data
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt4.QtCore import Qt, QObject, SIGNAL, QThread
from PyQt4.QtGui import QDialog, QApplication, QDialogButtonBox, QMessageBox, QTableWidgetItem, QHeaderView
from qgis.core import QGis, QgsFeature, QgsDistanceArea, QgsFeatureRequest
from ui_frmVisual import Ui_Dialog
import ftools_utils
import math
class VisualDialog(QDialog, Ui_Dialog):
def __init__(self, iface, function):
QDialog.__init__(self, iface.mainWindow())
self.iface = iface
self.setupUi(self)
self.myFunction = function
## Set object visibility to False if tool is not Check geometry
self.ckBoxShpError.hide()
self.browseShpError.hide()
self.lineEditShpError.hide()
self.label_6.hide()
self.line.hide()
self.addToCanvasCheck.hide()
self.buttonBox_2.setOrientation(Qt.Horizontal)
if self.myFunction == 2 or self.myFunction == 3:
QObject.connect(self.inShape, SIGNAL("currentIndexChanged(QString)"), self.update)
self.manageGui()
self.cancel_close = self.buttonBox_2.button(QDialogButtonBox.Close)
self.buttonOk = self.buttonBox_2.button(QDialogButtonBox.Ok)
self.progressBar.setValue(0)
self.partProgressBar.setValue(0)
self.partProgressBar.setVisible(False)
def keyPressEvent(self, e):
'''
Reimplemented key press event:
'''
if (e.modifiers() == Qt.ControlModifier or e.modifiers() == Qt.MetaModifier) and e.key() == Qt.Key_C:
#selection = self.tblUnique.selectedItems()
items = ""
if self.myFunction in (1, 2):
for rec in range(self.tblUnique.rowCount()):
items += self.tblUnique.item(rec, 0).text() + "\n"
else:
for rec in range(self.tblUnique.rowCount()):
items += self.tblUnique.item(rec, 0).text() + ":" + self.tblUnique.item(rec, 1).text() + "\n"
if items:
clip_board = QApplication.clipboard()
clip_board.setText(items)
else:
QDialog.keyPressEvent(self, e)
def update(self):
self.cmbField.clear()
inputLayer = unicode(self.inShape.currentText())
if inputLayer != "":
changedLayer = ftools_utils.getVectorLayerByName(inputLayer)
changedField = changedLayer.dataProvider().fields()
# for Basic statistics (with or without selection)
if self.myFunction == 3:
if changedLayer.selectedFeatureCount() != 0:
self.useSelected.setCheckState(Qt.Checked)
else:
self.useSelected.setCheckState(Qt.Unchecked)
# add all fields in combobox because now we can work with text fields too
for f in changedField:
self.cmbField.addItem(unicode(f.name()))
def accept(self):
if self.inShape.currentText() == "":
QMessageBox.information(self, self.tr("Error!"), self.tr("Please specify input vector layer"))
elif self.cmbField.isVisible() and self.cmbField.currentText() == "":
QMessageBox.information(self, self.tr("Error!"), self.tr("Please specify input field"))
else:
self.visual(self.inShape.currentText(), self.cmbField.currentText(), self.useSelected.checkState())
def manageGui(self):
if self.myFunction == 2: # List unique values
self.setWindowTitle(self.tr("List unique values"))
self.label_2.setText(self.tr("Unique values"))
self.label_4.setText(self.tr("Total unique values"))
self.useSelected.setVisible(False)
elif self.myFunction == 3: # Basic statistics
self.setWindowTitle(self.tr("Basics statistics"))
self.label_2.setText(self.tr("Statistics output"))
self.label_4.setVisible(False)
self.lstCount.setVisible(False)
self.resize(381, 400)
elif self.myFunction == 4: # Nearest neighbour analysis
self.setWindowTitle(self.tr("Nearest neighbour analysis"))
self.cmbField.setVisible(False)
self.label.setVisible(False)
self.useSelected.setVisible(False)
self.label_2.setText(self.tr("Nearest neighbour statistics"))
self.label_4.setVisible(False)
self.lstCount.setVisible(False)
self.resize(381, 200)
self.inShape.clear()
if self.myFunction == 4:
myList = ftools_utils.getLayerNames([QGis.Point])
else:
myList = ftools_utils.getLayerNames([QGis.Point, QGis.Line, QGis.Polygon])
self.inShape.addItems(myList)
return
#1: Check geometry (disabled)
#2: List unique values
#3: Basic statistics
#4: Nearest neighbour analysis
def visual(self, myLayer, myField, mySelection):
vlayer = ftools_utils.getVectorLayerByName(myLayer)
self.tblUnique.clearContents()
self.tblUnique.setRowCount(0)
self.lstCount.clear()
self.buttonOk.setEnabled(False)
self.testThread = visualThread(self.iface.mainWindow(), self, self.myFunction, vlayer, myField, mySelection)
QObject.connect(self.testThread, SIGNAL("runFinished(PyQt_PyObject)"), self.runFinishedFromThread)
QObject.connect(self.testThread, SIGNAL("runStatus(PyQt_PyObject)"), self.runStatusFromThread)
QObject.connect(self.testThread, SIGNAL("runRange(PyQt_PyObject)"), self.runRangeFromThread)
QObject.connect(self.testThread, SIGNAL("runPartRange(PyQt_PyObject)"), self.runPartRangeFromThread)
QObject.connect(self.testThread, SIGNAL("runPartStatus(PyQt_PyObject)"), self.runPartStatusFromThread)
self.cancel_close.setText(self.tr("Cancel"))
QObject.connect(self.cancel_close, SIGNAL("clicked()"), self.cancelThread)
QApplication.setOverrideCursor(Qt.WaitCursor)
self.testThread.start()
return True
def cancelThread(self):
self.testThread.stop()
QApplication.restoreOverrideCursor()
self.buttonOk.setEnabled(True)
def runFinishedFromThread(self, output):
self.testThread.stop()
QApplication.restoreOverrideCursor()
self.buttonOk.setEnabled(True)
result = output[0]
numRows = len(result)
self.tblUnique.setRowCount(numRows)
if self.myFunction in (1, 2):
self.tblUnique.setColumnCount(1)
for rec in range(numRows):
item = QTableWidgetItem(result[rec])
self.tblUnique.setItem(rec, 0, item)
else:
self.tblUnique.setColumnCount(2)
for rec in range(numRows):
if ":" not in result[rec]:
tmp = result[rec].split(u"\uff1a")
else:
tmp = result[rec].split(":")
item = QTableWidgetItem(tmp[0])
self.tblUnique.setItem(rec, 0, item)
item = QTableWidgetItem(tmp[1])
self.tblUnique.setItem(rec, 1, item)
self.tblUnique.setHorizontalHeaderLabels([self.tr("Parameter"), self.tr("Value")])
self.tblUnique.horizontalHeader().setResizeMode(1, QHeaderView.ResizeToContents)
self.tblUnique.horizontalHeader().show()
self.tblUnique.horizontalHeader().setResizeMode(0, QHeaderView.Stretch)
self.tblUnique.resizeRowsToContents()
self.lstCount.insert(unicode(output[1]))
self.cancel_close.setText("Close")
QObject.disconnect(self.cancel_close, SIGNAL("clicked()"), self.cancelThread)
return True
def runStatusFromThread(self, status):
self.progressBar.setValue(status)
def runRangeFromThread(self, range_vals):
self.progressBar.setRange(range_vals[0], range_vals[1])
def runPartStatusFromThread(self, status):
self.partProgressBar.setValue(status)
if status >= self.part_max:
self.partProgressBar.setVisible(False)
def runPartRangeFromThread(self, range_vals):
self.part_max = range_vals[1]
self.partProgressBar.setVisible(True)
self.partProgressBar.setRange(range_vals[0], range_vals[1])
class visualThread(QThread):
def __init__(self, parentThread, parentObject, function, vlayer, myField, mySelection):
QThread.__init__(self, parentThread)
self.parent = parentObject
self.running = False
self.myFunction = function
self.vlayer = vlayer
self.myField = myField
self.mySelection = mySelection
def run(self):
self.running = True
# note that 1 used to be associated with check_geometry
if self.myFunction == 2: # List unique values
(lst, cnt) = self.list_unique_values(self.vlayer, self.myField)
elif self.myFunction == 3: # Basic statistics
(lst, cnt) = self.basic_statistics(self.vlayer, self.myField)
elif self.myFunction == 4: # Nearest neighbour analysis
(lst, cnt) = self.nearest_neighbour_analysis(self.vlayer)
self.emit(SIGNAL("runFinished(PyQt_PyObject)"), (lst, cnt))
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), 0)
def stop(self):
self.running = False
def list_unique_values(self, vlayer, myField):
vprovider = vlayer.dataProvider()
index = vprovider.fieldNameIndex(myField)
unique = ftools_utils.getUniqueValues(vprovider, int(index))
lstUnique = []
nFeat = len(unique)
nElement = 0
if nFeat > 0:
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), 0)
self.emit(SIGNAL("runRange(PyQt_PyObject)"), (0, nFeat))
for item in unique:
nElement += 1
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), nElement)
lstUnique.append(unicode(item).strip())
lstCount = len(unique)
return (lstUnique, lstCount)
def basic_statistics(self, vlayer, myField):
vprovider = vlayer.dataProvider()
index = vprovider.fieldNameIndex(myField)
feat = QgsFeature()
sumVal = 0.0
meanVal = 0.0
nVal = 0.0
values = []
first = True
nElement = 0
# determine selected field type
if ftools_utils.getFieldType(vlayer, myField) in (
'String', 'varchar', 'char', 'text'
):
fillVal = 0
emptyVal = 0
if self.mySelection: # only selected features
selection = vlayer.selectedFeatures()
nFeat = vlayer.selectedFeatureCount()
if nFeat > 0:
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), 0)
self.emit(SIGNAL("runRange(PyQt_PyObject)"), (0, nFeat))
for f in selection:
try:
lenVal = float(len(f[index]))
except TypeError:
lenVal = 0
if first:
minVal = lenVal
maxVal = lenVal
first = False
else:
if lenVal < minVal:
minVal = lenVal
if lenVal > maxVal:
maxVal = lenVal
if lenVal != 0.00:
fillVal += 1
else:
emptyVal += 1
values.append(lenVal)
sumVal = sumVal + lenVal
nElement += 1
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), nElement)
else: # there is no selection, process the whole layer
nFeat = vprovider.featureCount()
if nFeat > 0:
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), 0)
self.emit(SIGNAL("runRange(PyQt_PyObject)"), (0, nFeat))
fit = vprovider.getFeatures()
while fit.nextFeature(feat):
try:
lenVal = float(len(feat[index]))
except TypeError:
lenVal = 0
if first:
minVal = lenVal
maxVal = lenVal
first = False
else:
if lenVal < minVal:
minVal = lenVal
if lenVal > maxVal:
maxVal = lenVal
if lenVal != 0.00:
fillVal += 1
else:
emptyVal += 1
values.append(lenVal)
sumVal = sumVal + lenVal
nElement += 1
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), nElement)
nVal = float(len(values))
if nVal > 0:
meanVal = sumVal / nVal
lstStats = []
lstStats.append(self.tr("Max. len:") + unicode(maxVal))
lstStats.append(self.tr("Min. len:") + unicode(minVal))
lstStats.append(self.tr("Mean. len:") + unicode(meanVal))
lstStats.append(self.tr("Filled:") + unicode(fillVal))
lstStats.append(self.tr("Empty:") + unicode(emptyVal))
lstStats.append(self.tr("N:") + unicode(nVal))
return (lstStats, [])
else:
return (["Error:No features selected!"], [])
else: # numeric field
stdVal = 0.00
cvVal = 0.00
rangeVal = 0.00
medianVal = 0.00
maxVal = 0.00
minVal = 0.00
if self.mySelection: # only selected features
selection = vlayer.selectedFeatures()
nFeat = vlayer.selectedFeatureCount()
uniqueVal = ftools_utils.getUniqueValuesCount(vlayer, index, True)
if nFeat > 0:
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), 0)
self.emit(SIGNAL("runRange(PyQt_PyObject)"), (0, nFeat))
for f in selection:
value = float(f[index])
if first:
minVal = value
maxVal = value
first = False
else:
if value < minVal:
minVal = value
if value > maxVal:
maxVal = value
values.append(value)
sumVal = sumVal + value
nElement += 1
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), nElement)
else: # there is no selection, process the whole layer
nFeat = vprovider.featureCount()
uniqueVal = ftools_utils.getUniqueValuesCount(vlayer, index, False)
if nFeat > 0:
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), 0)
self.emit(SIGNAL("runRange(PyQt_PyObject)"), (0, nFeat))
fit = vprovider.getFeatures()
while fit.nextFeature(feat):
value = float(feat[index])
if first:
minVal = value
maxVal = value
first = False
else:
if value < minVal:
minVal = value
if value > maxVal:
maxVal = value
values.append(value)
sumVal = sumVal + value
nElement += 1
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), nElement)
nVal = float(len(values))
if nVal > 0.00:
rangeVal = maxVal - minVal
meanVal = sumVal / nVal
if meanVal != 0.00:
for val in values:
stdVal += ((val - meanVal) * (val - meanVal))
stdVal = math.sqrt(stdVal / nVal)
cvVal = stdVal / meanVal
if nVal > 1:
lstVal = sorted(values)
if (nVal % 2) == 0:
medianVal = 0.5 * (lstVal[int((nVal - 1) / 2)] + lstVal[int((nVal) / 2)])
else:
medianVal = lstVal[int((nVal + 1) / 2 - 1)]
lstStats = []
lstStats.append(self.tr("Mean:") + unicode(meanVal))
lstStats.append(self.tr("StdDev:") + unicode(stdVal))
lstStats.append(self.tr("Sum:") + unicode(sumVal))
lstStats.append(self.tr("Min:") + unicode(minVal))
lstStats.append(self.tr("Max:") + unicode(maxVal))
lstStats.append(self.tr("N:") + unicode(nVal))
lstStats.append(self.tr("CV:") + unicode(cvVal))
lstStats.append(self.tr("Number of unique values:") + unicode(uniqueVal))
lstStats.append(self.tr("Range:") + unicode(rangeVal))
lstStats.append(self.tr("Median:") + unicode(medianVal))
return (lstStats, [])
else:
return (["Error:No features selected!"], [])
def nearest_neighbour_analysis(self, vlayer):
vprovider = vlayer.dataProvider()
sumDist = 0.00
distance = QgsDistanceArea()
A = vlayer.extent()
A = float(A.width() * A.height())
index = ftools_utils.createIndex(vprovider)
nFeat = vprovider.featureCount()
nElement = 0
if nFeat > 0:
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), 0)
self.emit(SIGNAL("runRange(PyQt_PyObject)"), (0, nFeat))
feat = QgsFeature()
neighbour = QgsFeature()
fit = vprovider.getFeatures()
while fit.nextFeature(feat):
neighbourID = index.nearestNeighbor(feat.geometry().asPoint(), 2)[1]
vprovider.getFeatures(QgsFeatureRequest().setFilterFid(neighbourID).setSubsetOfAttributes([])).nextFeature(neighbour)
nearDist = distance.measureLine(neighbour.geometry().asPoint(), feat.geometry().asPoint())
sumDist += nearDist
nElement += 1
self.emit(SIGNAL("runStatus(PyQt_PyObject)"), nElement)
nVal = vprovider.featureCount()
do = float(sumDist) / nVal
de = float(0.5 / math.sqrt(nVal / A))
d = float(do / de)
SE = float(0.26136 / math.sqrt((nVal * nVal) / A))
zscore = float((do - de) / SE)
lstStats = []
lstStats.append(self.tr("Observed mean distance:") + unicode(do))
lstStats.append(self.tr("Expected mean distance:") + unicode(de))
lstStats.append(self.tr("Nearest neighbour index:") + unicode(d))
lstStats.append(self.tr("N:") + unicode(nVal))
lstStats.append(self.tr("Z-Score:") + unicode(zscore))
return (lstStats, [])
|
NINAnor/QGIS
|
python/plugins/fTools/tools/doVisual.py
|
Python
|
gpl-2.0
| 20,290
|
#!/usr/bin/env python
# coding=utf-8
import requests
import gevent.timeout
import logging
__author__ = 'chenfengyuan'
PROXIES = {
'http': 'http://127.0.0.1:9999',
'https': 'http://127.0.0.1:9999',
}
PROXIES = {}
logger = logging.getLogger(__name__)
class Chunk:
def __init__(self, start, chunk_size):
self.chunk_max_size = chunk_size
self.chunk = []
self.chunk_size = 0
self.start = start
def consume(self, data_iter):
for data in data_iter:
if self.chunk_size + len(data) > self.chunk_max_size:
remain = data[self.chunk_max_size - self.chunk_size:]
self.chunk.append(data[:self.chunk_max_size - self.chunk_size])
rv = self.chunk
chunk_size = self.chunk_size
self.chunk = [remain]
self.chunk_size = len(remain)
yield self.start, chunk_size, rv,
self.start += self.chunk_max_size
else:
self.chunk_size += len(data)
self.chunk.append(data)
yield self.start, self.chunk_size, self.chunk
class HttpChunkClient:
def __init__(self, url, headers, range_start, range_end, chunk_size, chunk_timeout, filesize):
if range_start > 0:
headers = headers + [['Range', 'bytes=%d-' % range_start]]
headers = dict(headers)
del headers['Host']
self.range_end = range_end
self.range_start = range_start
self.headers = headers
self.url = url
self.r = None
self.chunk_timeout = chunk_timeout
self.chunk_size = chunk_size
self.filesize = filesize
def get_resp_with_redirect_headers(self):
url = self.url
for redirect_times in range(10):
r = requests.get(url, headers=self.headers, stream=True, allow_redirects=False, proxies=PROXIES)
if r.status_code == 302:
url = r.headers['location']
continue
assert r.status_code // 100 == 2, 'get status code: %s' % r.status_code
assert int(r.headers['content-length']) + self.range_start == self.filesize,\
'get file size: %d' % int(r.headers['content-length'])
return r
def iter_chunk(self):
t = None
try:
chunk = Chunk(self.range_start, self.chunk_size)
self.r = self.get_resp_with_redirect_headers()
t = gevent.timeout.Timeout.start_new(self.chunk_timeout)
downloaded_size = 0
for start, size, data in chunk.consume(self.r.iter_content(4096)):
t.cancel()
chunk_ = start, data
downloaded_size += size
assert downloaded_size <= self.range_end - self.range_start
assert downloaded_size % self.chunk_size == 0 or downloaded_size == self.range_end - self.range_start, \
downloaded_size
yield chunk_
if downloaded_size == self.range_end - self.range_start:
self.r = None
yield True,
return
t = gevent.timeout.Timeout.start_new(self.chunk_timeout)
t.cancel()
except gevent.timeout.Timeout as e:
if e is t:
logger.info('timeout')
yield False,
return
else:
logger.error('unknow timeout %s', e)
yield False,
return
except Exception:
logger.error('failed to download %s', self.url, exc_info=True)
self.r = None
yield False,
return
|
chenfengyuan/mcurl
|
python3/mcurl/utils/http_chunk_client.py
|
Python
|
mit
| 3,712
|
import sys
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import (QWidget, QToolTip, QPushButton, QApplication, QVBoxLayout, QTabWidget, QLineEdit, QColorDialog,
QSlider, QInputDialog, QFileDialog)
from PyQt5.QtGui import QFont, QColor
from PyQt5.QtCore import QCoreApplication, Qt
from matplotlib import figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import numpy as np
import xml.etree.cElementTree as ET
import threading
import time
import multiprocessing
from multiprocessing import Process, Queue, Pipe
from multiprocessing.sharedctypes import Value, Array
import ctypes as c
from matplotlib.lines import Line2D
import matplotlib.animation as animation
import pyopencl as cl
import pyopencl.cltypes
import verlet as vrl
import random
from operator import attrgetter
class SubplotAnimation(animation.TimedAnimation):
def __init__(self, fig, ax1, tn, xSun, ySun, xEarth, yEarth, xMoon, yMoon):
#fig = plt.figure()
#ax1 = fig.add_subplot(1, 2, 1)
#ax2 = fig.add_subplot(2, 2, 2)
#ax3 = fig.add_subplot(2, 2, 4)
self.t = tn
self.xS = xSun
self.yS = ySun
self.xE = xEarth
self.yE = yEarth
self.xM = xMoon
self.yM = yMoon
self.z = 10 * self.t
#ax1.set_xlabel('x')
#ax1.set_ylabel('y')
self.line1S = Line2D([], [], color='yellow')
self.line1aS = Line2D([], [], color='yellow', linewidth=2)
self.line1eS = Line2D(
[], [], color='yellow', marker='o', markeredgecolor='r')
ax1.add_line(self.line1S)
ax1.add_line(self.line1aS)
ax1.add_line(self.line1eS)
self.line1E = Line2D([], [], color='blue')
self.line1aE = Line2D([], [], color='blue', linewidth=2)
self.line1eE = Line2D(
[], [], color='blue', marker='o', markeredgecolor='r')
ax1.add_line(self.line1E)
ax1.add_line(self.line1aE)
ax1.add_line(self.line1eE)
self.line1M = Line2D([], [], color='grey')
self.line1aM = Line2D([], [], color='grey', linewidth=2)
self.line1eM = Line2D(
[], [], color='grey', marker='o', markeredgecolor='r')
ax1.add_line(self.line1M)
ax1.add_line(self.line1aM)
ax1.add_line(self.line1eM)
#ax1.set_xlim(-1, 1)
#ax1.set_ylim(-2, 2)
ax1.set_aspect('equal', 'datalim')
#ekurin@geo-lab.ru
animation.TimedAnimation.__init__(self, fig, interval = 1, blit=True)
def _draw_frame(self, framedata):
i = framedata
head = i - 1
head_slice = (self.t > self.t[i] - 1.0) & (self.t < self.t[i])
self.line1S.set_data(self.xS[:i], self.yS[:i])
self.line1aS.set_data(self.xS[head_slice], self.yS[head_slice])
self.line1eS.set_data(self.xS[head], self.yS[head])
self.line1E.set_data(self.xE[:i], self.yE[:i])
self.line1aE.set_data(self.xE[head_slice], self.yE[head_slice])
self.line1eE.set_data(self.xE[head], self.yE[head])
self.line1M.set_data(self.xM[:i], self.yM[:i])
self.line1aM.set_data(self.xM[head_slice], self.yM[head_slice])
self.line1eM.set_data(self.xM[head], self.yM[head])
self._drawn_artists = [self.line1S, self.line1aS, self.line1eS,
self.line1E, self.line1aE, self.line1eE,
self.line1M, self.line1aM, self.line1eM]
def new_frame_seq(self):
return iter(range(len(self.t)))
def _init_draw(self):
lines = [self.line1S, self.line1aS, self.line1eS,
self.line1E, self.line1aE, self.line1eE,
self.line1M, self.line1aM, self.line1eM]
for l in lines:
l.set_data([], [])
def dest(r1, r2, r3, t1, t2, t3):
return ((r1- t1)**2+(r2- t2)**2+(r3- t3)**2)**(0.5)
def vectorfield(w, t, p):
r11, v11, r12, v12, r13, v13, r21, v21, r22, v22, r23, v23, r31, v31, r32, v32, r33, v33 = w
#r11 = r1[0]
#r12 = r1[1]
#r13 = r1[2]
#v11 = v1[0]
#v12 = v1[1]
#v13 = v1[2]
#r21 = r2[0]
#r22 = r2[1]
#r23 = r2[2]
#v21 = v2[0]
#v22 = v2[1]
#v23 = v2[2]
#r31 = r3[0]
#r32 = r3[1]
#r33 = r3[2]
#v31 = v3[0]
#v32 = v3[1]
#v33 = v3[2]
G, m1, m2, m3 = p
# Create f = (x1',y1',x2',y2'):
f = [v11,
G*m2*(r21 - r11)/(dest(r21, r22, r23, r11, r12, r13)**3)+ G*m3*(r31-r11)/(dest(r31, r32, r33, r11, r12, r13)**3),
v12,
G*m2*(r22 - r12)/(dest(r21, r22, r23, r11, r12, r13)**3)+ G*m3*(r32-r12)/(dest(r31, r32, r33, r11, r12, r13)**3),
v13,
G*m2*(r23 - r13)/(dest(r21, r22, r23, r11, r12, r13)**3)+ G*m3*(r33-r13)/(dest(r31, r32, r33, r11, r12, r13)**3),
v21,
G*m1*(r11 - r21)/(dest(r21, r22, r23, r11, r12, r13)**3)+ G*m3*(r31-r21)/(dest(r31, r32, r33, r21, r22, r23)**3),
v22,
G*m1*(r12 - r22)/(dest(r21, r22, r23, r11, r12, r13)**3)+ G*m3*(r32-r22)/(dest(r31, r32, r33, r21, r22, r23)**3),
v23,
G*m1*(r13 - r23)/(dest(r21, r22, r23, r11, r12, r13)**3)+ G*m3*(r33-r23)/(dest(r31, r32, r33, r21, r22, r23)**3),
v31,
G*m1*(r11 - r31)/(dest(r31, r32, r33, r11, r12, r13)**3)+ G*m2*(r21-r31)/(dest(r31, r32, r33, r21, r22, r23)**3),
v32,
G*m1*(r12 - r32)/(dest(r31, r32, r33, r11, r12, r13)**3)+ G*m2*(r22-r32)/(dest(r31, r32, r33, r21, r22, r23)**3),
v33,
G*m1*(r13 - r33)/(dest(r31, r32, r33, r11, r12, r13)**3)+ G*m2*(r23-r33)/(dest(r31, r32, r33, r21, r22, r23)**3)]
return f
class Circles():
def __init__(self):
self.elements = []
def Add(self, el):
self.elements.append(el)
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.radius = 10
self.color = 'r'
self.circles = Circles()
# Initialize tab screen
self.tabs = QTabWidget()
self.tab1 = QWidget()
self.tab2 = QWidget()
self.tabs.resize(3000,2000)
# Add tabs
self.tabs.addTab(self.tab1,"Edit")
self.tabs.addTab(self.tab2,"Model")
#self.setMouseTracking(True)
self.figure = plt.figure()
self.figureEarth = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.canvasEarth = FigureCanvas(self.figureEarth)
self.canvas.mpl_connect('motion_notify_event', self.mouseMoveEvent)
self.canvas.mpl_connect('button_press_event', self.mousePrintEvent)
self.button_group = QtWidgets.QButtonGroup() # Number group
self.r0 = QtWidgets.QRadioButton("verlet")
self.button_group.addButton(self.r0)
self.r1 = QtWidgets.QRadioButton("scipy")
self.button_group.addButton(self.r1)
self.r2 = QtWidgets.QRadioButton("threading")
self.button_group.addButton(self.r2)
self.r3 = QtWidgets.QRadioButton("multiprocessing")
self.button_group.addButton(self.r3)
self.r4 = QtWidgets.QRadioButton("compare all")
self.button_group.addButton(self.r4)
self.button_group.buttonClicked.connect(self.RadioButtonClicked)
self.ax = self.figure.add_subplot(111) # create an axis
self.ax.set_xlim([-100, 100])
self.ax.set_ylim([-100, 100])
self.axEarth = self.figureEarth.add_subplot(111) # create an axis
self.axEarth.set_xlim([-2.0* (10**11), 2.0* (10**11)])
self.axEarth.set_ylim([-2.0* (10**11), 2.0* (10**11)])
# Just some button connected to method
self.buttonPlus = QPushButton('+')
self.buttonPlus.clicked.connect(self.IncreaseAxes)
self.buttonPlus.resize(self.buttonPlus.sizeHint())
self.buttonPlus.move(50, 50)
self.buttonMinus = QPushButton('-')
self.buttonMinus.clicked.connect(self.DecreaseAxes)
self.buttonMinus.resize(self.buttonMinus.sizeHint())
self.buttonMinus.move(70, 70)
self.buttonSave = QPushButton('Save to xml file', self)
self.buttonSave.move(10,10)
self.buttonSave.clicked.connect(self.saveFileDialog)
self.buttonOpen = QPushButton('Load from xml file', self)
self.buttonOpen.clicked.connect(self.openFileDialog)
self.buttonOpen.resize(self.buttonMinus.sizeHint())
self.buttonOpen.move(70, 70)
self.buttonColor = QPushButton('Open color dialog', self)
self.buttonColor.setToolTip('Opens color dialog')
self.buttonColor.move(10,10)
self.buttonColor.clicked.connect(self.openColorDialog)
self.textboxX = QLineEdit(self)
self.textboxX.move(20, 20)
self.textboxX.resize(120,40)
#self.textboxX.setText('0')
self.textboxY = QLineEdit(self)
self.textboxY.move(220, 20)
self.textboxY.resize(120,40)
#self.textboxY.setText('0')
self.sld = QSlider(Qt.Horizontal, self)
self.sld.setFocusPolicy(Qt.NoFocus)
self.sld.setGeometry(30, 40, 100, 30)
self.sld.setRange(1.0, 100.0)
self.sld.setValue(10.0)
self.sld.valueChanged.connect(self.changeSliderRadius)
self.textboxSld = QLineEdit(self)
self.textboxSld.move(420, 20)
self.textboxSld.resize(120,40)
self.textboxSld.setText("10")
self.textboxSld.textChanged.connect(self.changeTextRadius)
# set the layout
self.layout = QVBoxLayout(self)
self.tab1.layout = QVBoxLayout(self)
self.tab1.layout.addWidget(self.textboxX)
self.tab1.layout.addWidget(self.textboxY)
self.tab1.layout.addWidget(self.buttonColor)
self.tab1.layout.addWidget(self.sld)
self.tab1.layout.addWidget(self.textboxSld)
self.tab1.layout.addWidget(self.canvas)
self.tab1.layout.addWidget(self.buttonPlus)
self.tab1.layout.addWidget(self.buttonMinus)
self.tab1.layout.addWidget(self.buttonOpen)
self.tab1.layout.addWidget(self.buttonSave)
self.tab1.setLayout(self.tab1.layout)
self.tab2.layout = QVBoxLayout(self)
self.tab2.layout.addWidget(self.r0)
self.tab2.layout.addWidget(self.r1)
self.tab2.layout.addWidget(self.r2)
self.tab2.layout.addWidget(self.r3)
self.tab2.layout.addWidget(self.r4)
self.tab2.layout.addWidget(self.canvasEarth)
self.tab2.setLayout(self.tab2.layout)
self.layout.addWidget(self.tabs)
self.setLayout(self.layout)
#self.setLayout(layout)
self.canvas.draw()
self.canvasEarth.draw()
#btn = QPushButton('Button', self)
#btn.setToolTip('This is a <b>QuitButton</b> widget')
#btn.clicked.connect(self.changeButtonName)
#btn.resize(btn.sizeHint())
#btn.move(50, 50)
self.setGeometry(300, 300, 1000, 1000)
self.setWindowTitle('Circles')
self.show()
def changeSliderRadius(self, value):
self.radius = value
print(value)
self.textboxSld.setText(str(self.radius))
def changeTextRadius(self, value):
if value != '' and self.IsFloat(value):
radius = float(value)
self.radius = radius
print(value)
if radius < self.sld.minimum():
self.textboxSld.setText(str(self.sld.minimum()))
self.radius = self.sld.minimum()
elif radius > self.sld.maximum():
self.textboxSld.setText(str(self.sld.maximum()))
self.radius = self.sld.maximum()
self.sld.setValue(int(radius))
def openColorDialog(self):
color = QColorDialog.getColor()
self.color = color.name()
if color.isValid():
print(color.name())
def mouseMoveEvent(self, e):
print('mouseEvent')
#print(e)
if (e.inaxes):
x = e.xdata
y = e.ydata
self.textboxX.setText("{0}".format(x))
self.textboxY.setText("{0}".format(y))
def changeButtonName(self):
self.setWindowTitle(Circle.CircleName());
def IncreaseAxes(self):
xlim = self.ax.get_xlim()
self.ax.set_xlim(np.multiply(xlim, 1.5))
ylim = self.ax.get_ylim()
self.ax.set_ylim(np.multiply(ylim, 1.5))
self.canvas.draw()
self.sld.setMaximum(self.sld.maximum()*1.5)
self.sld.setMinimum(self.sld.minimum()*1.5)
def DecreaseAxes(self):
xlim = self.ax.get_xlim()
self.ax.set_xlim(np.divide(xlim, 1.5))
ylim = self.ax.get_ylim()
self.ax.set_ylim(np.divide(ylim, 1.5))
self.canvas.draw()
self.sld.setMaximum(self.sld.maximum()/1.5)
self.sld.setMinimum(self.sld.minimum()/1.5)
def mousePrintEvent(self, event):
#if event.button() == QtCore.Qt.LeftButton:
# print("Press!")
#super(GraphicsView, self).mousePressEvent(event)
print('circle')
#Circle.Draw(0, 0, self.radius, self.color, self.figure)
circle1 = MyCircle(event.xdata, event.ydata, self.radius, self.color)
self.ax.add_artist(circle1)
self.canvas.draw()
self.circles.Add(circle1)
def CreateXMLRoot(self):
root = ET.Element("root")
doc = ET.SubElement(root, "doc")
fig = ET.SubElement(doc, "figure")
ET.SubElement(fig, "X").text = str(self.ax.get_xlim()[1])
ET.SubElement(fig, "Y").text = str(self.ax.get_ylim()[1])
ET.SubElement(fig, "Color").text = self.color
slider = ET.SubElement(doc, "slider")
ET.SubElement(slider, "Max").text = str(self.sld.maximum())
ET.SubElement(slider, "Min").text = str(self.sld.minimum())
ET.SubElement(slider, "Radius").text = str(self.radius)
circles = ET.SubElement(doc, "circles")
i = 0
for element in self.circles.elements:
el = ET.SubElement(circles, "circle{0}".format(i))
ET.SubElement(el, "X").text = str(element.x)
ET.SubElement(el, "Y").text = str(element.y)
ET.SubElement(el, "radius").text = str(element.radius)
ET.SubElement(el, "color").text = str(element.color)
i += 1
return root
def saveFileDialog(self):
tree = ET.ElementTree(self.CreateXMLRoot())
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getSaveFileName(self,"Save File as", "","All Files (*);;Python Files (*.py)", options=options)
if fileName:
print(fileName)
tree.write(fileName)
def ParseXMLFile(self, filename):
tree = ET.ElementTree(file=filename)
for elem in tree.iter(tag = "figure"):
x = float(elem.find("X").text)
y = float(elem.find("Y").text)
self.color = elem.find("Color").text
self.ax.set_xlim([-x, x])
self.ax.set_ylim([-y, y])
for elem in tree.iter(tag = "slider"):
max = float(elem.find("Max").text)
min = float(elem.find("Min").text)
self.sld.setRange(min, max)
self.radius = int(float(elem.find("Radius").text))
self.sld.setValue(self.radius)
self.textboxSld.setText(str(self.radius))
self.ax.clear()
for elem in tree.iter(tag = "circles"):
for el in elem:
x1 = float(el.find("X").text)
y1 = float(el.find("Y").text)
r = float(el.find("radius").text)
col = el.find("color").text
print('circle')
circle1 = MyCircle(x1, y1, r, col)
self.ax.add_artist(circle1)
self.canvas.draw()
self.circles.Add(circle1)
def openFileDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"Open File","","All Files (*);;Text Files (*.txt)", options=options)
if fileName:
print(fileName)
self.ParseXMLFile(fileName)
def RadioButtonClicked(self, button):
#self.axEarth.set_xlim([-2.0* (10**11), 2.0* (10**11)])
#self.axEarth.set_ylim([-2.0* (10**11), 2.0* (10**11)])
MEarth = 5.9724 * (10**24)
MMoon = 7.34767309 * (10**22)
MSun = 1988500 * (10**24)
if (self.r0.isChecked()):
N = 40000
dt = 1200
#Tn = np.linspace(0, 40*1200, 40)
Earth = Cosmic(N, MEarth, 0, -1.496*(10**11), 29.783*(10**3), 0, 0, 0, [])
Moon = Cosmic(N, MMoon, 0, -1.496*(10**11) - 384.467*(10**6), 29.783*(10**3) + 1022, 0, 0, 0, [])
Sun = Cosmic(N, MSun, 0, 0, 0, 0, 0, 0, [])
cosmics = [Sun, Earth, Moon]
for obj in cosmics:
for interactionObj in cosmics:
if (not interactionObj is obj):
obj.Interactions.append((interactionObj.M, interactionObj.R))
#verlet = Verlet(N, dt, cosmics)
#verlet.VerletMain()
verlet = VerletOpenCL(N, dt, cosmics)
verlet.VerletMain()
print("Verlet calculated")
self.axEarth.clear()
Tn = np.linspace(0, 4, 40001)
ani = SubplotAnimation(self.figureEarth, self.axEarth, Tn, Sun.R[0, :], Sun.R[1, :], Earth.R[0, :], Earth.R[1, :], Moon.R[0, :], Moon.R[1, :])
self.canvasEarth.draw()
if (self.r1.isChecked()):
p = [6.67408 * (10**(-11)), 1988500 * (10**24), 5.9724 * (10**24), 7.34767309 * (10**22)]
w0 = [0, 0, 0, 0, 0, 0,
0, 29.783*(10**3), -1.496*(10**11), 0, 0, 0,
0, 29.783*(10**3) + 1022, -1.496*(10**11) - 384.467*(10**6), 0, 0, 0]
t = [1200*float(i) for i in range(40000)]
wsol = odeint(vectorfield, w0, t, args=(p,))
xSun = wsol[:, 0]
ySun = wsol[:, 2]
xEarth = wsol[:, 6]
yEarth = wsol[:, 8]
xMoon = wsol[:, 12]
yMoon = wsol[:, 14]
print(5)
self.axEarth.clear()
Tn = np.linspace(0, 4, 40000)
ani = SubplotAnimation(self.figureEarth, self.axEarth, Tn, xSun, ySun, xEarth, yEarth, xMoon, yMoon)
self.canvasEarth.draw()
if (self.r2.isChecked()):
N = 40000
dt = 1200
Earth = Cosmic(N, MEarth, 0, -1.496*(10**11), 29.783*(10**3), 0, 0, 0, [])
Moon = Cosmic(N, MMoon, 0, -1.496*(10**11) - 384.467*(10**6), 29.783*(10**3) + 1022, 0, 0, 0, [])
Sun = Cosmic(N, MSun, 0, 0, 0, 0, 0, 0, [])
cosmics = [Sun, Earth, Moon]
for obj in cosmics:
for interactionObj in cosmics:
if (not interactionObj is obj):
obj.Interactions.append((interactionObj.M, interactionObj.R))
verlet = VerletThreads(N, dt, cosmics)
verlet.VerletMain()
self.figureEarth.clear()
Tn = np.linspace(0, 4, 40001)
ani = SubplotAnimation(self.figureEarth, self.axEarth, Tn, Sun.R[0, :], Sun.R[1, :], Earth.R[0, :], Earth.R[1, :], Moon.R[0, :], Moon.R[1, :])
self.canvasEarth.draw()
if (self.r3.isChecked()):
N = 40000
dt = 1200
Earth = CosmicMulti(N, MEarth, 0, -1.496*(10**11), 29.783*(10**3), 0, 0, 0, [])
Moon = CosmicMulti(N, MMoon, 0, -1.496*(10**11) - 384.467*(10**6), 29.783*(10**3) + 1022, 0, 0, 0, [])
Sun = CosmicMulti(N, MSun, 0, 0, 0, 0, 0, 0, [])
cosmics = [Sun, Earth, Moon]
for obj in cosmics:
for interactionObj in cosmics:
if (not interactionObj is obj):
obj.Interactions.append((interactionObj.M, interactionObj.R))
verlet = VerletMultiProcessing(N, dt, cosmics)
verlet.VerletMain()
print("Verlet calculated")
self.figureEarth.clear()
Tn = np.linspace(0, 4, 40001)
ani = SubplotAnimation(self.figureEarth, self.axEarth, Tn, Sun.bR[0, :], Sun.bR[1, :], Earth.bR[0, :], Earth.bR[1, :], Moon.bR[0, :], Moon.bR[1, :])
self.canvasEarth.draw()
if (self.r4.isChecked()):
#TimesAll()
TimesAllKBodies(50)
#verlet = VerletThreads(400000,120)
#verlet.REarth[0,0] = 0
#verlet.REarth[1,0] = -1.496*(10**11)
#verlet.VEarth[0,0] = 29.783*(10**3)
#verlet.RMoon[1,0] = -1.496*(10**11) - 384.467*(10**6)
#verlet.VMoon[0,0] = 29.783*(10**3) + 1022
#verlet.VerletMain()
#print(5)
#self.axEarth.plot(verlet.REarth[0,:], verlet.REarth[1, :], color = 'blue')
#self.axEarth.plot(verlet.RMoon[0,:], verlet.RMoon[1, :], color = 'gray')
#print(6)
#self.canvasEarth.draw()
def IsFloat(self, value):
try:
float(value)
return True
except:
return False
class MyCircle(Circle):
def __init__(self, x, y, radius, color):
super().__init__((x, y), radius, color = color)
self.x = x
self.y = y
self.radius = radius
self.color = color
def CircleName():
return "circle"
def Draw(x, y, radius, color, plt):
circle1 = plt.Circle((x, y), radius, color = color)
plt.ax
ax = plt.gca()
ax.add_artist(circle1)
class Cosmic:
def __init__(self, n, mass, RxInit, RyInit, VxInit, VyInit, AxInit, AyInit, interactions):
self.N = n
self.M = mass
self.R = np.zeros(shape = (3, self.N+1))
self.V = np.zeros(shape = (3, self.N+1))
self.A = np.zeros(shape = (3, self.N+1))
self.R[0,0] = RxInit
self.R[1,0] = RyInit
self.V[0,0] = VxInit
self.V[1,0] = VyInit
self.A[0,0] = AxInit
self.A[1,0] = AyInit
self.Interactions = interactions
class CosmicMulti:
def __init__(self, n, mass, RxInit, RyInit, VxInit, VyInit, AxInit, AyInit, interactions):
self.N = n
self.M = Value('d', mass, lock=False)
self.R = Array('d', 3*(self.N+1))
self.V = Array('d', 3*(self.N+1))
self.A = Array('d', 3*(self.N+1))
arrR = np.frombuffer(self.R.get_obj()) # mp_arr and arr share the same memory
self.bR = arrR.reshape((3, self.N+1)) # b and arr share the same memory
self.bR[0,0] = RxInit
self.bR[1,0] = RyInit
arrV = np.frombuffer(self.V.get_obj()) # mp_arr and arr share the same memory
self.bV = arrV.reshape((3, self.N+1)) # b and arr share the same memory
self.bV[0,0] = VxInit
self.bV[1,0] = VyInit
arrA = np.frombuffer(self.A.get_obj()) # mp_arr and arr share the same memory
self.bA = arrA.reshape((3, self.N+1)) # b and arr share the same memory
self.bA[0,0] = AxInit
self.bA[1,0] = AyInit
#self.R = np.zeros(shape = (3, self.N+1))
#self.V = np.zeros(shape = (3, self.N+1))
#self.A = np.zeros(shape = (3, self.N+1))
#self.R[0,0] = RxInit
#self.R[1,0] = RyInit
#self.V[0,0] = VxInit
#self.V[1,0] = VyInit
#self.A[0,0] = AxInit
#self.A[1,0] = AyInit
self.Interactions = interactions
class Verlet:
def __init__(self, Num, Dt, objects):
self.N = Num
self.dt = Dt
self.G = 6.67408 * (10**(-11))
self.Objects = objects
def Acceleration(self, coordFirst, coordSecond, mass):
if (np.linalg.norm(coordSecond - coordFirst) < 0.0000001):
return 0
return self.G*mass*(coordSecond - coordFirst)/(np.linalg.norm(coordSecond - coordFirst)**3)
def VerletStep(self, R, V, A, i, kwargs):
for (mass, r) in kwargs:
A[:, i] += self.Acceleration(R[:, i], r[:, i], mass)
R[:, i+1] = R[:, i] + V[:, i]*self.dt + (A[:, i]*(self.dt**2)*0.5)
V[:, i+1] = V[:, i] + A[:, i]*self.dt
def VerletMain(self):
t = time.time()
for i in range(self.N):
for obj in self.Objects:
interactions = []
#for interactionObj in self.Objects:
# if (not interactionObj is obj):
# interactions.append((interactionObj.M, interactionObj.R))
self.VerletStep(obj.R, obj.V, obj.A, i, obj.Interactions)
return t - time.time()
class VerletThreads:
def __init__(self, Num, Dt, objects):
self.N = Num
self.dt = Dt
self.G = 6.67408 * (10**(-11))
self.Objects = objects
def Acceleration(self, coordFirst, coordSecond, mass):
if (np.linalg.norm(coordSecond - coordFirst) < 0.0000001):
return 0
return self.G*mass*(coordSecond - coordFirst)/(np.linalg.norm(coordSecond - coordFirst)**3)
def VerletStep(self, R, V, A, eventMy, eventOther, kwargs):
for i in range(self.N):
for (mass, r) in kwargs:
A[:, i] += self.Acceleration(R[:, i], r[:, i], mass)
R[:, i+1] = R[:, i] + V[:, i]*self.dt + (A[:, i]*(self.dt**2)*0.5)
V[:, i+1] = V[:, i] + A[:, i]*self.dt
eventMy.set()
eventOther.wait()
eventOther.clear()
eventMy.set()
def MainThreadOperation(self, eventsMy, eventsOther):
for i in range(self.N):
#print("Main1")
for evt in eventsMy:
evt.wait()
evt.clear()
#print("Main2")
for evt in eventsOther:
evt.set()
#print("Main3")
def VerletMain(self):
t = time.time()
eventMy = []
eventTheir = []
threads = []
threadMain = threading.Thread(target = self.MainThreadOperation, args = (eventMy, eventTheir))
for obj in self.Objects:
interactions = []
#for interactionObj in self.Objects:
# if (not interactionObj is obj):
# interactions.append((interactionObj.M, interactionObj.R))
e = threading.Event()
e.clear()
eventTheir.append(e)
ev = threading.Event()
ev.clear()
eventMy.append(ev)
threads.append(threading.Thread(target = self.VerletStep, args = (obj.R, obj.V, obj.A, ev, e, obj.Interactions)))
print(-1)
for thrd in threads:
thrd.start()
threadMain.start()
for thrd in threads:
thrd.join()
threadMain.join()
print("AllJoined")
return t - time.time()
class VerletMultiProcessing:
def __init__(self, Num, Dt, objects):
self.N = Num
self.dt = Dt
self.G = 6.67408 * (10**(-11))
self.Objects = objects
def Acceleration(self, coordFirst, coordSecond, mass):
if (np.linalg.norm(coordSecond - coordFirst) < 0.0000001):
return 0
return self.G*mass*(coordSecond - coordFirst)/(np.linalg.norm(coordSecond - coordFirst)**3)
def VerletStep(self, R, V, A, pipeS, pipeR, kwargs):
arrR = np.frombuffer(R.get_obj())
bR = arrR.reshape((3, self.N+1))
arrV = np.frombuffer(V.get_obj())
bV = arrV.reshape((3, self.N+1))
arrA = np.frombuffer(A.get_obj())
bA = arrA.reshape((3, self.N+1))
kwNp = []
for (mass, r) in kwargs:
arrr = np.frombuffer(r.get_obj())
br = arrr.reshape((3, self.N+1))
kwNp.append((mass, br))
for i in range(self.N):
for (mass, r) in kwNp:
bA[:, i] += self.Acceleration(bR[:, i], r[:, i], mass.value)
bR[:, i+1] = bR[:, i] + bV[:, i]*self.dt + (bA[:, i]*(self.dt**2)*0.5)
bV[:, i+1] = bV[:, i] + bA[:, i]*self.dt
#print('before send child')
pipeS.send(0)
#print('after send child')
pipeR.recv()
#print('after recv child')
#print(i)
pipeS.send(1)
def MainThreadOperation(self, pipesR, pipesS):
for i in range(self.N):
#print("Main1")
for pipe in pipesR:
pipe.recv()
#print("Main2")
for pipe in pipesS:
pipe.send(0)
#print("Main3")
def VerletMain(self):
print('multi')
pipesRecv = []
pipesSend = []
processes = []
procMain = Process(target = self.MainThreadOperation, args = (pipesRecv, pipesSend))
for obj in self.Objects:
#interactions = []
#for interactionObj in self.Objects:
# if (not interactionObj is obj):
# interactions.append((interactionObj.M, interactionObj.R))
parentRecv, childSend = Pipe()
pipesRecv.append(parentRecv)
parentSend, childRecv = Pipe()
pipesSend.append(parentSend)
processes.append(Process(target = self.VerletStep, args = (obj.R, obj.V, obj.A, childSend, childRecv, obj.Interactions)))
print(-1)
for proc in processes:
proc.start()
procMain.start()
for proc in processes:
proc.join()
procMain.join()
print("AllJoined")
class VerletOpenCL:
def __init__(self, Num, Dt, objects):
self.N = Num
self.dt = Dt
self.G = 6.67408 * (10**(-11))
self.Objects = objects
def Acceleration(self, coordFirst, coordSecond, mass):
if (np.linalg.norm(coordSecond - coordFirst) < 0.0000001):
return 0
return self.G*mass*(coordSecond - coordFirst)/(np.linalg.norm(coordSecond - coordFirst)**3)
def VerletStep(self, R, V, A, i, kwargs):
for (mass, r) in kwargs:
A[:, i] += self.Acceleration(R[:, i], r[:, i], mass)
R[:, i+1] = R[:, i] + V[:, i]*self.dt + (A[:, i]*(self.dt**2)*0.5)
V[:, i+1] = V[:, i] + A[:, i]*self.dt
def VerletMain(self):
listR = []
listV = []
listA = []
listM = []
for obj in self.Objects:
listR.append(obj.R.flatten('F'))
listV.append(obj.V.flatten('F'))
listA.append(obj.A.flatten('F'))
listM.append(obj.M)
totalR = np.hstack(listR)
totalV = np.hstack(listV)
totalA = np.hstack(listA)
t = time.time()
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
#dev = get_default_device()
totalRcl = np.array(totalR, dtype = cl.cltypes.double)
print('totalRcl before:')
#print(totalRcl)
totalVcl = np.array(totalV, dtype = cl.cltypes.double)
totalAcl = np.array(totalA, dtype = cl.cltypes.double)
Mscl = np.array(listM, dtype = cl.cltypes.double)
dT = np.array(self.dt)
N = np.array(self.N)
M = np.array(len(self.Objects))
#print(dT)
#print(N)
#print(M)
mf = cl.mem_flags
bufR = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=totalRcl)
bufV = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=totalVcl)
bufA = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=totalAcl)
bufMs = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=Mscl)
bufdt = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=dT)
bufM = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=M)
bufN = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=N)
prg = cl.Program(ctx,
"""
#pragma OPENCL EXTENSION cl_khr_fp64: enable
double norm(__global double *R, int i, int j)
{
double temp=0;
for (int k=0; k<3; k++)
temp+=(R[i+k]-R[j+k])*(R[i+k]-R[j+k]);
return sqrt(temp);
}
__kernel void verlet_cl(__global double *R, __global double *V, __global double *A, __global double *Ms, __global int *dtP, __global int *MP , __global int *NP)
{
double G = 6.67e-11;
int dT=*dtP;
printf(\"dT : %d\\n\",dT);
int M=*MP;
int N=*NP;
for (int i=0; i<N; i++)
{
for (int j=0; j<M; j++)
{
for (int k=0; k<3; k++)
{
//printf(\"R : %1.4e\\n\",R[3*(N+1)*j+3*i+k]);
for (int l = 0; l < M; l++)
{
if (l != j)
{
if (norm(R,3*(N+1)*l+3*i,3*(N+1)*j+3*i) <= 0.00000001)
{
A[3*(N+1)*j+3*i+k] = 0;
}
else
{
A[3*(N+1)*j+3*i+k] += G*Ms[l]*(R[3*(N+1)*l+3*i+k]-R[3*(N+1)*j+3*i+k])/pow(norm(R,3*(N+1)*l+3*i,3*(N+1)*j+3*i),3);
}
}
}
R[3*(N+1)*j+3*(i+1)+k] = R[3*(N+1)*j+3*i+k] + V[3*(N+1)*j+3*i+k]*dT + (A[3*(N+1)*j+3*i+k]*dT*dT*0.5);
V[3*(N+1)*j+3*(i+1)+k] = V[3*(N+1)*j+3*i+k] + A[3*(N+1)*j+3*i+k]*dT;
}
}
}
}""").build()
#try:
# prg.build()
# print ('build')
# except:
# print("Error:")
# #print(prg.get_build_info(ctx.devices[0], cl.program_build_info.LOG))
# raise
#try:
# prg.build(options=['-Werror'], devices=[dev], cache_dir=None)
## try:
## prg.build()
## prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
#except:
## print("Error:")
## print(prog.get_build_info(context.devices[0], cl.program_build_info.LOG))
# print('Build log:')
# print(prg.get_build_info(dev, cl.program_build_info.LOG))
#raise
t = time.time()
prg.verlet_cl(queue, (1,), None, bufR, bufV, bufA, bufMs, bufdt, bufM, bufN)
cl.enqueue_read_buffer(queue, bufR, totalRcl).wait()
cl.enqueue_read_buffer(queue, bufV, totalVcl).wait()
cl.enqueue_read_buffer(queue, bufA, totalAcl).wait()
print('totalRcl after:')
#print(totalRcl)
listRAfter = np.split(totalRcl, (self.N+1)*len(self.Objects))
#print(listRAfter)
for i in range(len(self.Objects)):
sublistR = listRAfter[i*(self.N+1):(i+1)*(self.N+1)]
self.Objects[i].R = np.array(sublistR).transpose()
return t - time.time()
def TimesAll():
print('Methods:')
print('1. Verlet')
print('2. SciPy')
print('3. Threading')
print('4. MultiProcessing')
print('5. Cython without TM without OMP')
print('6. Cython with TM without OMP')
print('7. Cython without TM with OMP')
print('8. Cython with TM with OMP')
print('9. OpenCL')
listTimes = []
MEarth = 5.9724 * (10**24)
MMoon = 7.34767309 * (10**22)
MSun = 1988500 * (10**24)
N = 40000
dt = 1200
#Tn = np.linspace(0, 40*1200, 40)
Earth = Cosmic(N, MEarth, 0, -1.496*(10**11), 29.783*(10**3), 0, 0, 0, [])
Moon = Cosmic(N, MMoon, 0, -1.496*(10**11) - 384.467*(10**6), 29.783*(10**3) + 1022, 0, 0, 0, [])
Sun = Cosmic(N, MSun, 0, 0, 0, 0, 0, 0, [])
cosmics = [Sun, Earth, Moon]
for obj in cosmics:
for interactionObj in cosmics:
if (not interactionObj is obj):
obj.Interactions.append((interactionObj.M, interactionObj.R))
verlet = Verlet(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
p = [6.67408 * (10**(-11)), 1988500 * (10**24), 5.9724 * (10**24), 7.34767309 * (10**22)]
w0 = [0, 0, 0, 0, 0, 0,
0, 29.783*(10**3), -1.496*(10**11), 0, 0, 0,
0, 29.783*(10**3) + 1022, -1.496*(10**11) - 384.467*(10**6), 0, 0, 0]
t = [1200*float(i) for i in range(40000)]
t1 = time.time()
wsol = odeint(vectorfield, w0, t, args=(p,))
listTimes.append(time.time() - t1)
verlet = VerletThreads(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
Earth = CosmicMulti(N, MEarth, 0, -1.496*(10**11), 29.783*(10**3), 0, 0, 0, [])
Moon = CosmicMulti(N, MMoon, 0, -1.496*(10**11) - 384.467*(10**6), 29.783*(10**3) + 1022, 0, 0, 0, [])
Sun = CosmicMulti(N, MSun, 0, 0, 0, 0, 0, 0, [])
cosmics = [Sun, Earth, Moon]
for obj in cosmics:
for interactionObj in cosmics:
if (not interactionObj is obj):
obj.Interactions.append((interactionObj.M, interactionObj.R))
verlet = VerletMultiProcessing(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
Earth = Cosmic(N, MEarth, 0, -1.496*(10**11), 29.783*(10**3), 0, 0, 0, [])
Moon = Cosmic(N, MMoon, 0, -1.496*(10**11) - 384.467*(10**6), 29.783*(10**3) + 1022, 0, 0, 0, [])
Sun = Cosmic(N, MSun, 0, 0, 0, 0, 0, 0, [])
cosmics = [Sun, Earth, Moon]
for obj in cosmics:
for interactionObj in cosmics:
if (not interactionObj is obj):
obj.Interactions.append((interactionObj.M, interactionObj.R))
verlet = vrl.Verlet(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
verlet = vrl.VerletTM(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
verlet = vrl.VerletOpenMP(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
verlet = vrl.VerletOpenMPTM(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
verlet = VerletOpenCL(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
print("Verlet calculated")
minT = min(listTimes)
minPos = listTimes.index(min(listTimes))
maxT = max(listTimes)
maxPos = listTimes.index(max(listTimes))
listAcc = []
for val in listTimes:
listAcc.append(val/maxT)
print('Min is ', minPos+1)
print('Max is ', maxPos+1)
print('Times:')
for i in range(len(listTimes)):
print(i+1, ': ', listTimes[i])
print('Accelerations:')
for i in range(len(listAcc)):
print(i+1, ': ', listAcc[i])
def TimesAllKBodies(K):
print('Methods:')
print('1. Verlet')
print('2. Threading')
print('3. Cython without TM without OMP')
print('4. Cython with TM without OMP')
print('5. Cython without TM with OMP')
print('6. Cython with TM with OMP')
print('7. OpenCL')
listTimes = []
MEarth = 5.9724 * (10**24)
MMoon = 7.34767309 * (10**22)
MSun = 1988500 * (10**24)
listMasses = []
for b in range(K-3):
listMasses.append(random.randrange(pow(10, 3),pow(10, 4)))
N = 40000
dt = 1200
#Tn = np.linspace(0, 40*1200, 40)
Earth = Cosmic(N, MEarth, 0, -1.496*(10**11), 29.783*(10**3), 0, 0, 0, [])
Moon = Cosmic(N, MMoon, 0, -1.496*(10**11) - 384.467*(10**6), 29.783*(10**3) + 1022, 0, 0, 0, [])
Sun = Cosmic(N, MSun, 0, 0, 0, 0, 0, 0, [])
cosmics = [Sun, Earth, Moon]
for mass in listMasses:
cosmics.append(Cosmic(N, mass, 0, 0, 0, 0, 0, 0, []))
for obj in cosmics:
for interactionObj in cosmics:
if (not interactionObj is obj):
obj.Interactions.append((interactionObj.M, interactionObj.R))
verlet = Verlet(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
verlet = VerletThreads(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
#Earth = CosmicMulti(N, MEarth, 0, -1.496*(10**11), 29.783*(10**3), 0, 0, 0, [])
#Moon = CosmicMulti(N, MMoon, 0, -1.496*(10**11) - 384.467*(10**6), 29.783*(10**3) + 1022, 0, 0, 0, [])
#Sun = CosmicMulti(N, MSun, 0, 0, 0, 0, 0, 0, [])
#cosmics = [Sun, Earth, Moon]
#for mass in listMasses:
# cosmics.append(CosmicMulti(N, mass, 0, 0, 0, 0, 0, 0, []))
#for obj in cosmics:
# for interactionObj in cosmics:
# if (not interactionObj is obj):
# obj.Interactions.append((interactionObj.M, interactionObj.R))
#verlet = VerletMultiProcessing(N, dt, cosmics)
#t1 = time.time()
#verlet.VerletMain()
#listTimes.append(time.time() - t1)
#Earth = Cosmic(N, MEarth, 0, -1.496*(10**11), 29.783*(10**3), 0, 0, 0, [])
#Moon = Cosmic(N, MMoon, 0, -1.496*(10**11) - 384.467*(10**6), 29.783*(10**3) + 1022, 0, 0, 0, [])
#Sun = Cosmic(N, MSun, 0, 0, 0, 0, 0, 0, [])
#cosmics = [Sun, Earth, Moon]
#for mass in listMasses:
# cosmics.append(Cosmic(N, mass, 0, 0, 0, 0, 0, 0, []))
#for obj in cosmics:
# for interactionObj in cosmics:
# if (not interactionObj is obj):
# obj.Interactions.append((interactionObj.M, interactionObj.R))
verlet = vrl.Verlet(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
verlet = vrl.VerletTM(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
verlet = vrl.VerletOpenMP(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
verlet = vrl.VerletOpenMPTM(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
verlet = VerletOpenCL(N, dt, cosmics)
t1 = time.time()
verlet.VerletMain()
listTimes.append(time.time() - t1)
print("Verlet calculated")
minT = min(listTimes)
minPos = listTimes.index(min(listTimes))
maxT = max(listTimes)
maxPos = listTimes.index(max(listTimes))
listAcc = []
for val in listTimes:
listAcc.append(val/maxT)
print('Min is ', minPos+1)
print('Max is ', maxPos+1)
print('Times:')
for i in range(len(listTimes)):
print(i+1, ': ', listTimes[i])
print('Accelerations:')
for i in range(len(listAcc)):
print(i+1, ': ', listAcc[i])
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
ChesnakovKonstantin/Test-Example
|
Python projects/PythonApplication3/PythonApplication3/PythonApplication3.py
|
Python
|
mit
| 44,107
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from netstat import NetstatCollector
################################################################################
class TestNetstatCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NetstatCollector', {
})
self.collector = NetstatCollector(config, None)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
NetstatCollector.PROC_TCP = self.getFixturePath('proc_net_tcp')
self.collector.collect()
metrics = {
'LISTEN': 9
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
print publish_mock
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
skbkontur/Diamond
|
src/collectors/netstat/test/testnetstat.py
|
Python
|
mit
| 1,230
|
# (c) 2016 RedHat
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import ansible.constants as C
import time
import random
from ansible.compat.six import text_type
from ansible.compat.six.moves import shlex_quote
_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
class ShellBase(object):
def __init__(self):
self.env = dict()
if C.DEFAULT_MODULE_SET_LOCALE:
self.env.update(
dict(
LANG = C.DEFAULT_MODULE_LANG,
LC_ALL = C.DEFAULT_MODULE_LANG,
LC_MESSAGES = C.DEFAULT_MODULE_LANG,
)
)
def env_prefix(self, **kwargs):
env = self.env.copy()
env.update(kwargs)
return ' '.join(['%s=%s' % (k, shlex_quote(text_type(v))) for k,v in env.items()])
def join_path(self, *args):
return os.path.join(*args)
# some shells (eg, powershell) are snooty about filenames/extensions, this lets the shell plugin have a say
def get_remote_filename(self, pathname):
base_name = os.path.basename(pathname.strip())
return base_name.strip()
def path_has_trailing_slash(self, path):
return path.endswith('/')
def chmod(self, paths, mode):
cmd = ['chmod', mode]
cmd.extend(paths)
cmd = [shlex_quote(c) for c in cmd]
return ' '.join(cmd)
def chown(self, paths, user):
cmd = ['chown', user]
cmd.extend(paths)
cmd = [shlex_quote(c) for c in cmd]
return ' '.join(cmd)
def set_user_facl(self, paths, user, mode):
"""Only sets acls for users as that's really all we need"""
cmd = ['setfacl', '-m', 'u:%s:%s' % (user, mode)]
cmd.extend(paths)
cmd = [shlex_quote(c) for c in cmd]
return ' '.join(cmd)
def remove(self, path, recurse=False):
path = shlex_quote(path)
cmd = 'rm -f '
if recurse:
cmd += '-r '
return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL)
def exists(self, path):
cmd = ['test', '-e', shlex_quote(path)]
return ' '.join(cmd)
def mkdtemp(self, basefile=None, system=False, mode=None, tmpdir=None):
if not basefile:
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
# When system is specified we have to create this in a directory where
# other users can read and access the temp directory. This is because
# we use system to create tmp dirs for unprivileged users who are
# sudo'ing to a second unprivileged user. The only dirctories where
# that is standard are the tmp dirs, /tmp and /var/tmp. So we only
# allow one of those two locations if system=True. However, users
# might want to have some say over which of /tmp or /var/tmp is used
# (because /tmp may be a tmpfs and want to conserve RAM or persist the
# tmp files beyond a reboot. So we check if the user set REMOTE_TMP
# to somewhere in or below /var/tmp and if so use /var/tmp. If
# anything else we use /tmp (because /tmp is specified by POSIX nad
# /var/tmp is not).
if system:
if C.DEFAULT_REMOTE_TMP.startswith('/var/tmp'):
basetmpdir = '/var/tmp'
else:
basetmpdir = '/tmp'
elif tmpdir is None:
basetmpdir = C.DEFAULT_REMOTE_TMP
else:
basetmpdir = tmpdir
basetmp = self.join_path(basetmpdir, basefile)
cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
cmd += ' %s echo %s=%s echo %s %s' % (self._SHELL_AND, basefile, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
# change the umask in a subshell to achieve the desired mode
# also for directories created with `mkdir -p`
if mode:
tmp_umask = 0o777 & ~mode
cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT)
return cmd
def expand_user(self, user_home_path):
''' Return a command to expand tildes in a path
It can be either "~" or "~username". We use the POSIX definition of
a username:
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
'''
# Check that the user_path to expand is safe
if user_home_path != '~':
if not _USER_HOME_PATH_RE.match(user_home_path):
# shlex_quote will make the shell return the string verbatim
user_home_path = shlex_quote(user_home_path)
return 'echo %s' % user_home_path
def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):
# don't quote the cmd if it's an empty string, because this will break pipelining mode
if cmd.strip() != '':
cmd = shlex_quote(cmd)
cmd_parts = []
if shebang:
shebang = shebang.replace("#!", "").strip()
else:
shebang = ""
cmd_parts.extend([env_string.strip(), shebang, cmd])
if arg_path is not None:
cmd_parts.append(arg_path)
new_cmd = " ".join(cmd_parts)
if rm_tmp:
new_cmd = '%s; rm -rf "%s" %s' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL)
return new_cmd
def append_command(self, cmd, cmd_to_append):
"""Append an additional command if supported by the shell"""
if self._SHELL_AND:
cmd += ' %s %s' % (self._SHELL_AND, cmd_to_append)
return cmd
|
naslanidis/ansible
|
lib/ansible/plugins/shell/__init__.py
|
Python
|
gpl-3.0
| 6,488
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python wrapper around the C extension for the pair counter in
``mocks/DDrppi_mocks/``. This python wrapper is
:py:mod:`Corrfunc.mocks.DDrppi_mocks`
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__author__ = ('Manodeep Sinha')
__all__ = ('DDrppi_mocks', )
def DDrppi_mocks(autocorr, cosmology, nthreads, pimax, binfile,
RA1, DEC1, CZ1, weights1=None,
RA2=None, DEC2=None, CZ2=None, weights2=None,
is_comoving_dist=False,
verbose=False, output_rpavg=False,
fast_divide_and_NR_steps=0,
xbin_refine_factor=2, ybin_refine_factor=2,
zbin_refine_factor=1, max_cells_per_dim=100,
copy_particles=True, enable_min_sep_opt=True,
c_api_timer=False, isa=r'fastest', weight_type=None):
"""
Calculate the 2-D pair-counts corresponding to the projected correlation
function, :math:`\\xi(r_p, \pi)`. Pairs which are separated by less
than the ``rp`` bins (specified in ``binfile``) in the
X-Y plane, and less than ``pimax`` in the Z-dimension are
counted. The input positions are expected to be on-sky co-ordinates.
This module is suitable for calculating correlation functions for mock
catalogs.
If ``weights`` are provided, the resulting pair counts are weighted. The
weighting scheme depends on ``weight_type``.
Returns a numpy structured array containing the pair counts for the
specified bins.
.. note:: that this module only returns pair counts and not the actual
correlation function :math:`\\xi(r_p, \pi)` or :math:`wp(r_p)`. See the
utilities :py:mod:`Corrfunc.utils.convert_3d_counts_to_cf` and
:py:mod:`Corrfunc.utils.convert_rp_pi_counts_to_wp` for computing
:math:`\\xi(r_p, \pi)` and :math:`wp(r_p)` respectively from the
pair counts.
Parameters
-----------
autocorr : boolean, required
Boolean flag for auto/cross-correlation. If autocorr is set to 1,
then the second set of particle positions are not required.
cosmology : integer, required
Integer choice for setting cosmology. Valid values are 1->LasDamas
cosmology and 2->Planck cosmology. If you need arbitrary cosmology,
easiest way is to convert the ``CZ`` values into co-moving distance,
based on your preferred cosmology. Set ``is_comoving_dist=True``, to
indicate that the co-moving distance conversion has already been done.
Choices:
1. LasDamas cosmology. :math:`\\Omega_m=0.25`, :math:`\\Omega_\Lambda=0.75`
2. Planck cosmology. :math:`\\Omega_m=0.302`, :math:`\\Omega_\Lambda=0.698`
To setup a new cosmology, add an entry to the function,
``init_cosmology`` in ``ROOT/utils/cosmology_params.c`` and re-install
the entire package.
nthreads : integer
The number of OpenMP threads to use. Has no effect if OpenMP was not
enabled during library compilation.
pimax : double
A double-precision value for the maximum separation along
the Z-dimension.
Distances along the :math:`\\pi` direction are binned with unit
depth. For instance, if ``pimax=40``, then 40 bins will be created
along the ``pi`` direction. Only pairs with ``0 <= dz < pimax``
are counted (no equality).
binfile: string or an list/array of floats
For string input: filename specifying the ``rp`` bins for
``DDrppi_mocks``. The file should contain white-space separated values
of (rpmin, rpmax) for each ``rp`` wanted. The bins need to be
contiguous and sorted in increasing order (smallest bins come first).
For array-like input: A sequence of ``rp`` values that provides the
bin-edges. For example,
``np.logspace(np.log10(0.1), np.log10(10.0), 15)`` is a valid
input specifying **14** (logarithmic) bins between 0.1 and 10.0. This
array does not need to be sorted.
RA1 : array-like, real (float/double)
The array of Right Ascensions for the first set of points. RA's
are expected to be in [0.0, 360.0], but the code will try to fix cases
where the RA's are in [-180, 180.0]. For peace of mind, always supply
RA's in [0.0, 360.0].
Calculations are done in the precision of the supplied arrays.
DEC1 : array-like, real (float/double)
Array of Declinations for the first set of points. DEC's are expected
to be in the [-90.0, 90.0], but the code will try to fix cases where
the DEC's are in [0.0, 180.0]. Again, for peace of mind, always supply
DEC's in [-90.0, 90.0].
Must be of same precision type as RA1.
CZ1 : array-like, real (float/double)
Array of (Speed Of Light * Redshift) values for the first set of
points. Code will try to detect cases where ``redshifts`` have been
passed and multiply the entire array with the ``speed of light``.
If is_comoving_dist is set, then ``CZ1`` is interpreted as the
co-moving distance, rather than `cz`.
weights1 : array_like, real (float/double), optional
A scalar, or an array of weights of shape (n_weights, n_positions) or (n_positions,).
`weight_type` specifies how these weights are used; results are returned
in the `weightavg` field. If only one of weights1 and weights2 is
specified, the other will be set to uniform weights.
RA2 : array-like, real (float/double)
The array of Right Ascensions for the second set of points. RA's
are expected to be in [0.0, 360.0], but the code will try to fix cases
where the RA's are in [-180, 180.0]. For peace of mind, always supply
RA's in [0.0, 360.0].
Must be of same precision type as RA1/DEC1/CZ1.
DEC2 : array-like, real (float/double)
Array of Declinations for the second set of points. DEC's are expected
to be in the [-90.0, 90.0], but the code will try to fix cases where
the DEC's are in [0.0, 180.0]. Again, for peace of mind, always supply
DEC's in [-90.0, 90.0].
Must be of same precision type as RA1/DEC1/CZ1.
CZ2 : array-like, real (float/double)
Array of (Speed Of Light * Redshift) values for the second set of
points. Code will try to detect cases where ``redshifts`` have been
passed and multiply the entire array with the ``speed of light``.
If is_comoving_dist is set, then ``CZ2`` is interpreted as the
co-moving distance, rather than `cz`.
Must be of same precision type as RA1/DEC1/CZ1.
weights2 : array-like, real (float/double), optional
Same as weights1, but for the second set of positions
is_comoving_dist : boolean (default false)
Boolean flag to indicate that ``cz`` values have already been
converted into co-moving distances. This flag allows arbitrary
cosmologies to be used in ``Corrfunc``.
verbose : boolean (default false)
Boolean flag to control output of informational messages
output_rpavg : boolean (default false)
Boolean flag to output the average ``rp`` for each bin. Code will
run slower if you set this flag.
If you are calculating in single-precision, ``rpavg`` will suffer
suffer from numerical loss of precision and can not be trusted. If
you need accurate ``rpavg`` values, then pass in double precision
arrays for the particle positions.
fast_divide_and_NR_steps: integer (default 0)
Replaces the division in ``AVX`` implementation with an approximate
reciprocal, followed by ``fast_divide_and_NR_steps`` of Newton-Raphson.
Can improve runtime by ~15-20% on older computers. Value of 0 uses
the standard division operation.
(xyz)bin_refine_factor : integer, default is (2,2,1); typically within [1-3]
Controls the refinement on the cell sizes. Can have up to a 20% impact
on runtime.
max_cells_per_dim: integer, default is 100, typical values in [50-300]
Controls the maximum number of cells per dimension. Total number of
cells can be up to (max_cells_per_dim)^3. Only increase if ``rpmax`` is
too small relative to the boxsize (and increasing helps the runtime).
copy_particles: boolean (default True)
Boolean flag to make a copy of the particle positions
If set to False, the particles will be re-ordered in-place
.. versionadded:: 2.3.0
enable_min_sep_opt: boolean (default true)
Boolean flag to allow optimizations based on min. separation between
pairs of cells. Here to allow for comparison studies.
.. versionadded:: 2.3.0
c_api_timer : boolean (default false)
Boolean flag to measure actual time spent in the C libraries. Here
to allow for benchmarking and scaling studies.
isa: string, case-insensitive (default ``fastest``)
Controls the runtime dispatch for the instruction set to use. Possible
options are: [``fastest``, ``avx512f``, ``avx``, ``sse42``, ``fallback``]
Setting isa to ``fastest`` will pick the fastest available instruction
set on the current computer. However, if you set ``isa`` to, say,
``avx`` and ``avx`` is not available on the computer, then the code will
revert to using ``fallback`` (even though ``sse42`` might be available).
Unless you are benchmarking the different instruction sets, you should
always leave ``isa`` to the default value. And if you *are*
benchmarking, then the string supplied here gets translated into an
``enum`` for the instruction set defined in ``utils/defs.h``.
weight_type : string, optional (default None)
The type of weighting to apply. One of ["pair_product", None].
Returns
--------
results : Numpy structured array
A numpy structured array containing [rpmin, rpmax, rpavg, pimax,
npairs, weightavg] for each radial bin specified in the ``binfile``.
If ``output_ravg`` is not set, then ``rpavg`` will be set to 0.0 for
all bins; similarly for ``weightavg``. ``npairs`` contains the number
of pairs in that bin and can be used to compute the actual
:math:`\\xi(r_p, \pi)` or :math:`wp(rp)` by combining with
(DR, RR) counts.
api_time : float, optional
Only returned if ``c_api_timer`` is set. ``api_time`` measures only
the time spent within the C library and ignores all python overhead.
Example
--------
>>> from __future__ import print_function
>>> import numpy as np
>>> from os.path import dirname, abspath, join as pjoin
>>> import Corrfunc
>>> from Corrfunc.mocks.DDrppi_mocks import DDrppi_mocks
>>> import math
>>> binfile = pjoin(dirname(abspath(Corrfunc.__file__)),
... "../mocks/tests/", "bins")
>>> N = 100000
>>> boxsize = 420.0
>>> seed = 42
>>> np.random.seed(seed)
>>> X = np.random.uniform(-0.5*boxsize, 0.5*boxsize, N)
>>> Y = np.random.uniform(-0.5*boxsize, 0.5*boxsize, N)
>>> Z = np.random.uniform(-0.5*boxsize, 0.5*boxsize, N)
>>> weights = np.ones_like(X)
>>> CZ = np.sqrt(X*X + Y*Y + Z*Z)
>>> inv_cz = 1.0/CZ
>>> X *= inv_cz
>>> Y *= inv_cz
>>> Z *= inv_cz
>>> DEC = 90.0 - np.arccos(Z)*180.0/math.pi
>>> RA = (np.arctan2(Y, X)*180.0/math.pi) + 180.0
>>> autocorr = 1
>>> cosmology = 1
>>> nthreads = 2
>>> pimax = 40.0
>>> results = DDrppi_mocks(autocorr, cosmology, nthreads,
... pimax, binfile, RA, DEC, CZ,
... weights1=weights, weight_type='pair_product',
... output_rpavg=True, is_comoving_dist=True)
>>> for r in results[519:]: print("{0:10.6f} {1:10.6f} {2:10.6f} {3:10.1f}"
... " {4:10d} {5:10.6f}".format(r['rmin'], r['rmax'],
... r['rpavg'], r['pimax'], r['npairs'], r['weightavg']))
... # doctest: +NORMALIZE_WHITESPACE
11.359969 16.852277 14.285169 40.0 104850 1.000000
16.852277 25.000000 21.181246 1.0 274144 1.000000
16.852277 25.000000 21.190844 2.0 272876 1.000000
16.852277 25.000000 21.183321 3.0 272294 1.000000
16.852277 25.000000 21.188486 4.0 272506 1.000000
16.852277 25.000000 21.170832 5.0 272100 1.000000
16.852277 25.000000 21.165379 6.0 271788 1.000000
16.852277 25.000000 21.175246 7.0 270040 1.000000
16.852277 25.000000 21.187417 8.0 269492 1.000000
16.852277 25.000000 21.172066 9.0 269682 1.000000
16.852277 25.000000 21.182460 10.0 268266 1.000000
16.852277 25.000000 21.170594 11.0 268744 1.000000
16.852277 25.000000 21.178608 12.0 266820 1.000000
16.852277 25.000000 21.187184 13.0 266510 1.000000
16.852277 25.000000 21.184937 14.0 265484 1.000000
16.852277 25.000000 21.180184 15.0 265258 1.000000
16.852277 25.000000 21.191504 16.0 262952 1.000000
16.852277 25.000000 21.187746 17.0 262602 1.000000
16.852277 25.000000 21.189778 18.0 260206 1.000000
16.852277 25.000000 21.188882 19.0 259410 1.000000
16.852277 25.000000 21.185684 20.0 256806 1.000000
16.852277 25.000000 21.194036 21.0 255574 1.000000
16.852277 25.000000 21.184115 22.0 255406 1.000000
16.852277 25.000000 21.178255 23.0 252394 1.000000
16.852277 25.000000 21.184644 24.0 252220 1.000000
16.852277 25.000000 21.187020 25.0 251668 1.000000
16.852277 25.000000 21.183827 26.0 249648 1.000000
16.852277 25.000000 21.183121 27.0 247160 1.000000
16.852277 25.000000 21.180872 28.0 246238 1.000000
16.852277 25.000000 21.185251 29.0 246030 1.000000
16.852277 25.000000 21.183488 30.0 242124 1.000000
16.852277 25.000000 21.194538 31.0 242426 1.000000
16.852277 25.000000 21.190702 32.0 239778 1.000000
16.852277 25.000000 21.188985 33.0 239046 1.000000
16.852277 25.000000 21.187092 34.0 237640 1.000000
16.852277 25.000000 21.185515 35.0 236256 1.000000
16.852277 25.000000 21.190278 36.0 233536 1.000000
16.852277 25.000000 21.183240 37.0 233274 1.000000
16.852277 25.000000 21.183796 38.0 231628 1.000000
16.852277 25.000000 21.200668 39.0 230378 1.000000
16.852277 25.000000 21.181153 40.0 229006 1.000000
"""
try:
from Corrfunc._countpairs_mocks import countpairs_rp_pi_mocks as\
DDrppi_extn
except ImportError:
msg = "Could not import the C extension for the on-sky"\
"pair counter."
raise ImportError(msg)
import numpy as np
from Corrfunc.utils import translate_isa_string_to_enum, fix_ra_dec,\
return_file_with_rbins, convert_to_native_endian,\
sys_pipes, process_weights
from future.utils import bytes_to_native_str
if not autocorr:
if RA2 is None or DEC2 is None or CZ2 is None:
msg = "Must pass valid arrays for RA2/DEC2/CZ2 for "\
"computing cross-correlation"
raise ValueError(msg)
else:
RA2 = np.empty(1)
DEC2 = np.empty(1)
CZ2 = np.empty(1)
weights1, weights2 = process_weights(weights1, weights2, RA1, RA2, weight_type, autocorr)
# Ensure all input arrays are native endian
RA1, DEC1, CZ1, weights1, RA2, DEC2, CZ2, weights2 = [
convert_to_native_endian(arr, warn=True) for arr in
[RA1, DEC1, CZ1, weights1, RA2, DEC2, CZ2, weights2]]
fix_ra_dec(RA1, DEC1)
if autocorr == 0:
fix_ra_dec(RA2, DEC2)
# Passing None parameters breaks the parsing code, so avoid this
kwargs = {}
for k in ['weights1', 'weights2', 'weight_type', 'RA2', 'DEC2', 'CZ2']:
v = locals()[k]
if v is not None:
kwargs[k] = v
integer_isa = translate_isa_string_to_enum(isa)
rbinfile, delete_after_use = return_file_with_rbins(binfile)
with sys_pipes():
extn_results = DDrppi_extn(autocorr, cosmology, nthreads,
pimax, rbinfile,
RA1, DEC1, CZ1,
is_comoving_dist=is_comoving_dist,
verbose=verbose,
output_rpavg=output_rpavg,
fast_divide_and_NR_steps=fast_divide_and_NR_steps,
xbin_refine_factor=xbin_refine_factor,
ybin_refine_factor=ybin_refine_factor,
zbin_refine_factor=zbin_refine_factor,
max_cells_per_dim=max_cells_per_dim,
copy_particles=copy_particles,
enable_min_sep_opt=enable_min_sep_opt,
c_api_timer=c_api_timer,
isa=integer_isa, **kwargs)
if extn_results is None:
msg = "RuntimeError occurred"
raise RuntimeError(msg)
else:
extn_results, api_time = extn_results
if delete_after_use:
import os
os.remove(rbinfile)
results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float64),
(bytes_to_native_str(b'rmax'), np.float64),
(bytes_to_native_str(b'rpavg'), np.float64),
(bytes_to_native_str(b'pimax'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float64)])
results = np.array(extn_results, dtype=results_dtype)
if not c_api_timer:
return results
else:
return results, api_time
if __name__ == '__main__':
import doctest
doctest.testmod()
|
manodeep/Corrfunc
|
Corrfunc/mocks/DDrppi_mocks.py
|
Python
|
mit
| 18,727
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse # noqa
from django import forms
from django import http
from django import shortcuts
from mox import IsA # noqa
from horizon import tables
from horizon.tables import formset as table_formset
from horizon.tables import views as table_views
from horizon.test import helpers as test
class FakeObject(object):
def __init__(self, id, name, value, status, optional=None, excluded=None):
self.id = id
self.name = name
self.value = value
self.status = status
self.optional = optional
self.excluded = excluded
self.extra = "extra"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
TEST_DATA = (
FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'),
FakeObject('2', 'object_2', '<strong>evil</strong>', 'down', 'optional_2'),
FakeObject('3', 'object_3', 'value_3', 'up'),
)
TEST_DATA_2 = (
FakeObject('1', 'object_1', 'value_1', 'down', 'optional_1', 'excluded_1'),
)
TEST_DATA_3 = (
FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'),
)
TEST_DATA_4 = (
FakeObject('1', 'object_1', 2, 'up'),
FakeObject('2', 'object_2', 4, 'up'),
)
TEST_DATA_5 = (
FakeObject('1', 'object_1', 'A Value That is longer than 35 characters!',
'down', 'optional_1'),
)
class MyLinkAction(tables.LinkAction):
name = "login"
verbose_name = "Log In"
url = "login"
attrs = {
"class": "ajax-modal",
}
def get_link_url(self, datum=None, *args, **kwargs):
return reverse(self.url)
class MyAction(tables.Action):
name = "delete"
verbose_name = "Delete Me"
verbose_name_plural = "Delete Them"
def allowed(self, request, obj=None):
return getattr(obj, 'status', None) != 'down'
def handle(self, data_table, request, object_ids):
return shortcuts.redirect('http://example.com/?ids=%s'
% ",".join(object_ids))
class MyColumn(tables.Column):
pass
class MyRow(tables.Row):
ajax = True
@classmethod
def get_data(cls, request, obj_id):
return TEST_DATA_2[0]
class MyBatchAction(tables.BatchAction):
name = "batch"
action_present = "Batch"
action_past = "Batched"
data_type_singular = "Item"
data_type_plural = "Items"
def action(self, request, object_ids):
pass
class MyToggleAction(tables.BatchAction):
name = "toggle"
action_present = ("Down", "Up")
action_past = ("Downed", "Upped")
data_type_singular = "Item"
data_type_plural = "Items"
def allowed(self, request, obj=None):
if not obj:
return False
self.down = getattr(obj, 'status', None) == 'down'
if self.down:
self.current_present_action = 1
return self.down or getattr(obj, 'status', None) == 'up'
def action(self, request, object_ids):
if self.down:
#up it
self.current_past_action = 1
class MyFilterAction(tables.FilterAction):
def filter(self, table, objs, filter_string):
q = filter_string.lower()
def comp(obj):
if q in obj.name.lower():
return True
return False
return filter(comp, objs)
class MyUpdateAction(tables.UpdateAction):
def allowed(self, *args):
return True
def update_cell(self, *args):
pass
class MyUpdateActionNotAllowed(MyUpdateAction):
def allowed(self, *args):
return False
def get_name(obj):
return "custom %s" % obj.name
def get_link(obj):
return reverse('login')
class MyTable(tables.DataTable):
id = tables.Column('id', hidden=True, sortable=False)
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.CharField(required=True),
form_field_attributes={'class': 'test'},
update_action=MyUpdateAction)
value = tables.Column('value',
sortable=True,
link='http://example.com/',
attrs={'class': 'green blue'},
summation="average",
truncate=35,
link_classes=('link-modal',))
status = tables.Column('status', link=get_link)
optional = tables.Column('optional', empty_value='N/A')
excluded = tables.Column('excluded')
class Meta:
name = "my_table"
verbose_name = "My Table"
status_columns = ["status"]
columns = ('id', 'name', 'value', 'optional', 'status')
row_class = MyRow
column_class = MyColumn
table_actions = (MyFilterAction, MyAction, MyBatchAction)
row_actions = (MyAction, MyLinkAction, MyBatchAction, MyToggleAction)
class MyTableNotAllowedInlineEdit(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.CharField(required=True),
form_field_attributes={'class': 'test'},
update_action=MyUpdateActionNotAllowed)
class Meta:
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
row_class = MyRow
class NoActionsTable(tables.DataTable):
id = tables.Column('id')
class Meta:
name = "no_actions_table"
verbose_name = "No Actions Table"
table_actions = ()
row_actions = ()
class DataTableTests(test.TestCase):
def test_table_instantiation(self):
"""Tests everything that happens when the table is instantiated."""
self.table = MyTable(self.request, TEST_DATA)
# Properties defined on the table
self.assertEqual(self.table.data, TEST_DATA)
self.assertEqual(self.table.name, "my_table")
# Verify calculated options that weren't specified explicitly
self.assertTrue(self.table._meta.actions_column)
self.assertTrue(self.table._meta.multi_select)
# Test for verbose_name
self.assertEqual(unicode(self.table), u"My Table")
# Column ordering and exclusion.
# This should include auto-columns for multi_select and actions,
# but should not contain the excluded column.
# Additionally, auto-generated columns should use the custom
# column class specified on the table.
self.assertQuerysetEqual(self.table.columns.values(),
['<MyColumn: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: optional>',
'<Column: status>',
'<MyColumn: actions>'])
# Actions (these also test ordering)
self.assertQuerysetEqual(self.table.base_actions.values(),
['<MyBatchAction: batch>',
'<MyAction: delete>',
'<MyFilterAction: filter>',
'<MyLinkAction: login>',
'<MyToggleAction: toggle>'])
self.assertQuerysetEqual(self.table.get_table_actions(),
['<MyFilterAction: filter>',
'<MyAction: delete>',
'<MyBatchAction: batch>'])
self.assertQuerysetEqual(self.table.get_row_actions(TEST_DATA[0]),
['<MyAction: delete>',
'<MyLinkAction: login>',
'<MyBatchAction: batch>',
'<MyToggleAction: toggle>'])
# Auto-generated columns
multi_select = self.table.columns['multi_select']
self.assertEqual(multi_select.auto, "multi_select")
self.assertEqual(multi_select.get_final_attrs().get('class', ""),
"multi_select_column")
actions = self.table.columns['actions']
self.assertEqual(actions.auto, "actions")
self.assertEqual(actions.get_final_attrs().get('class', ""),
"actions_column")
# In-line edit action on column.
name_column = self.table.columns['name']
self.assertEqual(name_column.update_action, MyUpdateAction)
self.assertEqual(name_column.form_field.__class__, forms.CharField)
self.assertEqual(name_column.form_field_attributes, {'class': 'test'})
def test_table_force_no_multiselect(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
multi_select = False
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: id>',
'<Column: actions>'])
def test_table_force_no_actions_column(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
actions_column = False
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>'])
def test_table_natural_no_inline_editing(self):
class TempTable(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True)
class Meta:
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
self.table = TempTable(self.request, TEST_DATA_2)
name_column = self.table.columns['name']
self.assertEqual(name_column.update_action, None)
self.assertEqual(name_column.form_field, None)
self.assertEqual(name_column.form_field_attributes, {})
def test_table_natural_no_actions_column(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>'])
def test_table_natural_no_multiselect(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
row_actions = (MyAction, MyLinkAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: id>',
'<Column: actions>'])
def test_table_column_inheritance(self):
class TempTable(MyTable):
extra = tables.Column('extra')
class Meta:
name = "temp_table"
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: status>',
'<Column: optional>',
'<Column: excluded>',
'<Column: extra>',
'<Column: actions>'])
def test_table_construction(self):
self.table = MyTable(self.request, TEST_DATA)
# Verify we retrieve the right columns for headers
columns = self.table.get_columns()
self.assertQuerysetEqual(columns, ['<MyColumn: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: optional>',
'<Column: status>',
'<MyColumn: actions>'])
# Verify we retrieve the right rows from our data
rows = self.table.get_rows()
self.assertQuerysetEqual(rows, ['<MyRow: my_table__row__1>',
'<MyRow: my_table__row__2>',
'<MyRow: my_table__row__3>'])
# Verify each row contains the right cells
self.assertQuerysetEqual(rows[0].get_cells(),
['<Cell: multi_select, my_table__row__1>',
'<Cell: id, my_table__row__1>',
'<Cell: name, my_table__row__1>',
'<Cell: value, my_table__row__1>',
'<Cell: optional, my_table__row__1>',
'<Cell: status, my_table__row__1>',
'<Cell: actions, my_table__row__1>'])
def test_table_column(self):
self.table = MyTable(self.request, TEST_DATA)
row = self.table.get_rows()[0]
row3 = self.table.get_rows()[2]
id_col = self.table.columns['id']
name_col = self.table.columns['name']
value_col = self.table.columns['value']
# transform
self.assertEqual(row.cells['id'].data, '1') # Standard attr access
self.assertEqual(row.cells['name'].data, 'custom object_1') # Callable
# name and verbose_name
self.assertEqual(unicode(id_col), "Id")
self.assertEqual(unicode(name_col), "Verbose Name")
# sortable
self.assertEqual(id_col.sortable, False)
self.assertNotIn("sortable", id_col.get_final_attrs().get('class', ""))
self.assertEqual(name_col.sortable, True)
self.assertIn("sortable", name_col.get_final_attrs().get('class', ""))
# hidden
self.assertEqual(id_col.hidden, True)
self.assertIn("hide", id_col.get_final_attrs().get('class', ""))
self.assertEqual(name_col.hidden, False)
self.assertNotIn("hide", name_col.get_final_attrs().get('class', ""))
# link, link_classes and get_link_url
self.assertIn('href="http://example.com/"', row.cells['value'].value)
self.assertIn('class="link-modal"', row.cells['value'].value)
self.assertIn('href="/auth/login/"', row.cells['status'].value)
# empty_value
self.assertEqual(row3.cells['optional'].value, "N/A")
# classes
self.assertEqual(value_col.get_final_attrs().get('class', ""),
"green blue sortable anchor normal_column")
# status
cell_status = row.cells['status'].status
self.assertEqual(cell_status, True)
self.assertEqual(row.cells['status'].get_status_class(cell_status),
'status_up')
# status_choices
id_col.status = True
id_col.status_choices = (('1', False), ('2', True), ('3', None))
cell_status = row.cells['id'].status
self.assertEqual(cell_status, False)
self.assertEqual(row.cells['id'].get_status_class(cell_status),
'status_down')
cell_status = row3.cells['id'].status
self.assertEqual(cell_status, None)
self.assertEqual(row.cells['id'].get_status_class(cell_status),
'status_unknown')
# Ensure data is not cached on the column across table instances
self.table = MyTable(self.request, TEST_DATA_2)
row = self.table.get_rows()[0]
self.assertTrue("down" in row.cells['status'].value)
def test_table_row(self):
self.table = MyTable(self.request, TEST_DATA)
row = self.table.get_rows()[0]
self.assertEqual(row.table, self.table)
self.assertEqual(row.datum, TEST_DATA[0])
self.assertEqual(row.id, 'my_table__row__1')
# Verify row status works even if status isn't set on the column
self.assertEqual(row.status, True)
self.assertEqual(row.status_class, 'status_up')
# Check the cells as well
cell_status = row.cells['status'].status
self.assertEqual(cell_status, True)
self.assertEqual(row.cells['status'].get_status_class(cell_status),
'status_up')
def test_table_column_truncation(self):
self.table = MyTable(self.request, TEST_DATA_5)
row = self.table.get_rows()[0]
self.assertEqual(len(row.cells['value'].data), 35)
self.assertEqual(row.cells['value'].data,
u'A Value That is longer than 35 c...')
def test_table_rendering(self):
self.table = MyTable(self.request, TEST_DATA)
# Table actions
table_actions = self.table.render_table_actions()
resp = http.HttpResponse(table_actions)
self.assertContains(resp, "table_search", 1)
self.assertContains(resp, "my_table__filter__q", 1)
self.assertContains(resp, "my_table__delete", 1)
self.assertContains(resp, 'id="my_table__action_delete"', 1)
# Row actions
row_actions = self.table.render_row_actions(TEST_DATA[0])
resp = http.HttpResponse(row_actions)
self.assertContains(resp, "<li", 3)
self.assertContains(resp, "my_table__delete__1", 1)
self.assertContains(resp, "my_table__toggle__1", 1)
self.assertContains(resp, "/auth/login/", 1)
self.assertContains(resp, "ajax-modal", 1)
self.assertContains(resp, 'id="my_table__row_1__action_delete"', 1)
# Whole table
resp = http.HttpResponse(self.table.render())
self.assertContains(resp, '<table id="my_table"', 1)
self.assertContains(resp, '<th ', 8)
self.assertContains(resp, 'id="my_table__row__1"', 1)
self.assertContains(resp, 'id="my_table__row__2"', 1)
self.assertContains(resp, 'id="my_table__row__3"', 1)
update_string = "action=row_update&table=my_table&obj_id="
self.assertContains(resp, update_string, 3)
self.assertContains(resp, "data-update-interval", 3)
# Verify our XSS protection
self.assertContains(resp, '<a href="http://example.com/" '
'class="link-modal">'
'<strong>evil</strong></a>', 1)
# Filter = False hides the search box
self.table._meta.filter = False
table_actions = self.table.render_table_actions()
resp = http.HttpResponse(table_actions)
self.assertContains(resp, "table_search", 0)
def test_inline_edit_available_cell_rendering(self):
self.table = MyTable(self.request, TEST_DATA_2)
row = self.table.get_rows()[0]
name_cell = row.cells['name']
# Check if in-line edit is available in the cell,
# but is not in inline_edit_mod.
self.assertEqual(name_cell.inline_edit_available,
True)
self.assertEqual(name_cell.inline_edit_mod,
False)
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(resp,
'data-update-url="?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'table_cell_data_wrapper', 1)
self.assertContains(resp, 'table_cell_action', 1)
self.assertContains(resp, 'ajax-inline-edit', 1)
def test_inline_edit_available_not_allowed_cell_rendering(self):
self.table = MyTableNotAllowedInlineEdit(self.request, TEST_DATA_2)
row = self.table.get_rows()[0]
name_cell = row.cells['name']
# Check if in-line edit is available in the cell,
# but is not in inline_edit_mod.
self.assertEqual(name_cell.inline_edit_available,
True)
self.assertEqual(name_cell.inline_edit_mod,
False)
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(resp,
'data-update-url="?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 0)
self.assertContains(resp, 'table_cell_data_wrapper', 0)
self.assertContains(resp, 'table_cell_action', 0)
self.assertContains(resp, 'ajax-inline-edit', 0)
def test_inline_edit_mod_cell_rendering(self):
self.table = MyTable(self.request, TEST_DATA_2)
name_col = self.table.columns['name']
name_col.auto = "form_field"
row = self.table.get_rows()[0]
name_cell = row.cells['name']
name_cell.inline_edit_mod = True
# Check if in-line edit is available in the cell,
# and is in inline_edit_mod, also column auto must be
# set as form_field.
self.assertEqual(name_cell.inline_edit_available,
True)
self.assertEqual(name_cell.inline_edit_mod,
True)
self.assertEqual(name_col.auto,
'form_field')
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp,
'<input class="test" id="name__1" name="name__1"'
' type="text" value="custom object_1" />',
count=1, html=True)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(resp,
'data-update-url="?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'inline-edit-error', 1)
self.assertContains(resp, 'inline-edit-form', 1)
self.assertContains(resp, 'inline-edit-actions', 1)
self.assertContains(resp, 'inline-edit-submit', 1)
self.assertContains(resp, 'inline-edit-cancel', 1)
def test_inline_edit_mod_checkbox_with_label(self):
class TempTable(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.BooleanField(
required=True,
label="Verbose Name"),
form_field_attributes={'class': 'test'},
update_action=MyUpdateAction)
class Meta:
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
self.table = TempTable(self.request, TEST_DATA_2)
name_col = self.table.columns['name']
name_col.auto = "form_field"
row = self.table.get_rows()[0]
name_cell = row.cells['name']
name_cell.inline_edit_mod = True
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp,
'<input checked="checked" class="test" '
'id="name__1" name="name__1" type="checkbox" '
'value="custom object_1" />',
count=1, html=True)
self.assertContains(resp,
'<label class="inline-edit-label" for="name__1">'
'Verbose Name</label>',
count=1, html=True)
def test_inline_edit_mod_textarea(self):
class TempTable(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.CharField(
widget=forms.Textarea(),
required=False),
form_field_attributes={'class': 'test'},
update_action=MyUpdateAction)
class Meta:
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
self.table = TempTable(self.request, TEST_DATA_2)
name_col = self.table.columns['name']
name_col.auto = "form_field"
row = self.table.get_rows()[0]
name_cell = row.cells['name']
name_cell.inline_edit_mod = True
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp,
'<textarea class="test" cols="40" id="name__1" '
'name="name__1" rows="10">\r\ncustom object_1'
'</textarea>',
count=1, html=True)
def test_table_actions(self):
# Single object action
action_string = "my_table__delete__1"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'delete', '1'))
handled = self.table.maybe_handle()
self.assertEqual(handled.status_code, 302)
self.assertEqual(handled["location"], "http://example.com/?ids=1")
# Batch action (without toggle) conjugation behavior
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_3)
toggle_action = self.table.get_row_actions(TEST_DATA_3[0])[2]
self.assertEqual(unicode(toggle_action.verbose_name), "Batch Item")
# Single object toggle action
# GET page - 'up' to 'down'
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_3)
self.assertEqual(len(self.table.get_row_actions(TEST_DATA_3[0])), 4)
toggle_action = self.table.get_row_actions(TEST_DATA_3[0])[3]
self.assertEqual(unicode(toggle_action.verbose_name), "Down Item")
# Toggle from status 'up' to 'down'
# POST page
action_string = "my_table__toggle__1"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'toggle', '1'))
handled = self.table.maybe_handle()
self.assertEqual(handled.status_code, 302)
self.assertEqual(handled["location"], "/my_url/")
self.assertEqual(list(req._messages)[0].message,
u"Downed Item: object_1")
# Toggle from status 'down' to 'up'
# GET page - 'down' to 'up'
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_2)
self.assertEqual(len(self.table.get_row_actions(TEST_DATA_2[0])), 3)
toggle_action = self.table.get_row_actions(TEST_DATA_2[0])[2]
self.assertEqual(unicode(toggle_action.verbose_name), "Up Item")
# POST page
action_string = "my_table__toggle__2"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'toggle', '2'))
handled = self.table.maybe_handle()
self.assertEqual(handled.status_code, 302)
self.assertEqual(handled["location"], "/my_url/")
self.assertEqual(list(req._messages)[0].message,
u"Upped Item: object_2")
# Multiple object action
action_string = "my_table__delete"
req = self.factory.post('/my_url/', {'action': action_string,
'object_ids': [1, 2]})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'delete', None))
handled = self.table.maybe_handle()
self.assertEqual(handled.status_code, 302)
self.assertEqual(handled["location"], "http://example.com/?ids=1,2")
# Action with nothing selected
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'delete', None))
handled = self.table.maybe_handle()
self.assertEqual(handled, None)
self.assertEqual(list(req._messages)[0].message,
"Please select a row before taking that action.")
# Action with specific id and multiple ids favors single id
action_string = "my_table__delete__3"
req = self.factory.post('/my_url/', {'action': action_string,
'object_ids': [1, 2]})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'delete', '3'))
handled = self.table.maybe_handle()
self.assertEqual(handled.status_code, 302)
self.assertEqual(handled["location"],
"http://example.com/?ids=3")
# At least one object in table
# BatchAction is available
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_2)
self.assertQuerysetEqual(self.table.get_table_actions(),
['<MyFilterAction: filter>',
'<MyAction: delete>',
'<MyBatchAction: batch>'])
# Zero objects in table
# BatchAction not available
req = self.factory.get('/my_url/')
self.table = MyTable(req, None)
self.assertQuerysetEqual(self.table.get_table_actions(),
['<MyFilterAction: filter>',
'<MyAction: delete>'])
# Filtering
action_string = "my_table__filter__q"
req = self.factory.post('/my_url/', {action_string: '2'})
self.table = MyTable(req, TEST_DATA)
handled = self.table.maybe_handle()
self.assertEqual(handled, None)
self.assertQuerysetEqual(self.table.filtered_data,
['<FakeObject: object_2>'])
# Ensure fitering respects the request method, e.g. no filter here
req = self.factory.get('/my_url/', {action_string: '2'})
self.table = MyTable(req, TEST_DATA)
handled = self.table.maybe_handle()
self.assertEqual(handled, None)
self.assertQuerysetEqual(self.table.filtered_data,
['<FakeObject: object_1>',
'<FakeObject: object_2>',
'<FakeObject: object_3>'])
# Updating and preemptive actions
params = {"table": "my_table", "action": "row_update", "obj_id": "1"}
req = self.factory.get('/my_url/',
params,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.table = MyTable(req)
resp = self.table.maybe_preempt()
self.assertEqual(resp.status_code, 200)
# Make sure the data returned differs from the original
self.assertContains(resp, "my_table__row__1")
self.assertContains(resp, "status_down")
# Verify that we don't get a response for a valid action with the
# wrong method.
params = {"table": "my_table", "action": "delete", "obj_id": "1"}
req = self.factory.get('/my_url/', params)
self.table = MyTable(req)
resp = self.table.maybe_preempt()
self.assertEqual(resp, None)
resp = self.table.maybe_handle()
self.assertEqual(resp, None)
# Verbose names
table_actions = self.table.get_table_actions()
self.assertEqual(unicode(table_actions[0].verbose_name), "Filter")
self.assertEqual(unicode(table_actions[1].verbose_name), "Delete Me")
row_actions = self.table.get_row_actions(TEST_DATA[0])
self.assertEqual(unicode(row_actions[0].verbose_name), "Delete Me")
self.assertEqual(unicode(row_actions[1].verbose_name), "Log In")
def test_inline_edit_update_action_get_non_ajax(self):
# Non ajax inline edit request should return None.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.get(url, {})
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(handled, None)
def test_inline_edit_update_action_get(self):
# Get request should return td field with data.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(handled.status_code, 200)
# Checking the response content.
resp = handled
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(
resp,
'data-update-url="/my_url/?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'table_cell_data_wrapper', 1)
self.assertContains(resp, 'table_cell_action', 1)
self.assertContains(resp, 'ajax-inline-edit', 1)
def test_inline_edit_update_action_get_not_allowed(self):
# Name column has required validation, sending blank
# will return error.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {})
self.table = MyTableNotAllowedInlineEdit(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(handled.status_code, 401)
def test_inline_edit_update_action_get_inline_edit_mod(self):
# Get request in inline_edit_mode should return td with form field.
url = ('/my_url/?inline_edit_mod=true&action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(handled.status_code, 200)
# Checking the response content.
resp = handled
self.assertContains(resp,
'<input class="test" id="name__1" name="name__1"'
' type="text" value="custom object_1" />',
count=1, html=True)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(
resp,
'data-update-url="/my_url/?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'inline-edit-error', 1)
self.assertContains(resp, 'inline-edit-form', 1)
self.assertContains(resp, 'inline-edit-actions', 1)
self.assertContains(resp, '<button', 2)
self.assertContains(resp, 'inline-edit-submit', 1)
self.assertContains(resp, 'inline-edit-cancel', 1)
def test_inline_edit_update_action_post(self):
# Post request should invoke the cell update table action.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {'name__1': 'test_name'})
self.table = MyTable(req, TEST_DATA_2)
# checking the response header
handled = self.table.maybe_preempt()
self.assertEqual(handled.status_code, 200)
def test_inline_edit_update_action_post_not_allowed(self):
# Post request should invoke the cell update table action.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {'name__1': 'test_name'})
self.table = MyTableNotAllowedInlineEdit(req, TEST_DATA_2)
# checking the response header
handled = self.table.maybe_preempt()
self.assertEqual(handled.status_code, 401)
def test_inline_edit_update_action_post_validation_error(self):
# Name column has required validation, sending blank
# will return error.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {})
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(handled.status_code, 400)
self.assertEqual(handled._headers['content-type'],
('Content-Type', 'application/json'))
# Checking the response content.
resp = handled
self.assertContains(resp,
'"message": "This field is required."',
count=1, status_code=400)
def test_column_uniqueness(self):
table1 = MyTable(self.request)
table2 = MyTable(self.request)
# Regression test for launchpad bug 964345.
self.assertNotEqual(id(table1), id(table2))
self.assertNotEqual(id(table1.columns), id(table2.columns))
t1cols = table1.columns.values()
t2cols = table2.columns.values()
self.assertEqual(t1cols[0].name, t2cols[0].name)
self.assertNotEqual(id(t1cols[0]), id(t2cols[0]))
self.assertNotEqual(id(t1cols[0].table),
id(t2cols[0].table))
self.assertNotEqual(id(t1cols[0].table._data_cache),
id(t2cols[0].table._data_cache))
def test_summation_row(self):
# Test with the "average" method.
table = MyTable(self.request, TEST_DATA_4)
res = http.HttpResponse(table.render())
self.assertContains(res, '<tr class="summation"', 1)
self.assertContains(res, '<td>Summary</td>', 1)
self.assertContains(res, '<td>3.0</td>', 1)
# Test again with the "sum" method.
table.columns['value'].summation = "sum"
res = http.HttpResponse(table.render())
self.assertContains(res, '<tr class="summation"', 1)
self.assertContains(res, '<td>Summary</td>', 1)
self.assertContains(res, '<td>6</td>', 1)
# One last test with no summation.
table.columns['value'].summation = None
table.needs_summary_row = False
res = http.HttpResponse(table.render())
self.assertNotContains(res, '<tr class="summation"')
self.assertNotContains(res, '<td>3.0</td>')
self.assertNotContains(res, '<td>6</td>')
def test_table_action_attributes(self):
table = MyTable(self.request, TEST_DATA)
self.assertTrue(table.has_actions)
self.assertTrue(table.needs_form_wrapper)
res = http.HttpResponse(table.render())
self.assertContains(res, "<form")
table = MyTable(self.request, TEST_DATA, needs_form_wrapper=False)
self.assertTrue(table.has_actions)
self.assertFalse(table.needs_form_wrapper)
res = http.HttpResponse(table.render())
self.assertNotContains(res, "<form")
table = NoActionsTable(self.request, TEST_DATA)
self.assertFalse(table.has_actions)
self.assertFalse(table.needs_form_wrapper)
res = http.HttpResponse(table.render())
self.assertNotContains(res, "<form")
def test_table_action_object_display_is_none(self):
action_string = "my_table__toggle__1"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.mox.StubOutWithMock(self.table, 'get_object_display')
self.table.get_object_display(IsA(FakeObject)).AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'toggle', '1'))
handled = self.table.maybe_handle()
self.assertEqual(handled.status_code, 302)
self.assertEqual(handled["location"], "/my_url/")
self.assertEqual(list(req._messages)[0].message,
u"Downed Item: N/A")
class SingleTableView(table_views.DataTableView):
table_class = MyTable
name = "Single Table"
slug = "single"
template_name = "horizon/common/_detail_table.html"
def get_data(self):
return TEST_DATA
class TableWithPermissions(tables.DataTable):
id = tables.Column('id')
class Meta:
name = "table_with_permissions"
permissions = ('horizon.test',)
class SingleTableViewWithPermissions(SingleTableView):
table_class = TableWithPermissions
class MultiTableView(tables.MultiTableView):
table_classes = (TableWithPermissions, MyTable)
def get_table_with_permissions_data(self):
return TEST_DATA
def get_my_table_data(self):
return TEST_DATA
class DataTableViewTests(test.TestCase):
def _prepare_view(self, cls, *args, **kwargs):
req = self.factory.get('/my_url/')
req.user = self.user
view = cls()
view.request = req
view.args = args
view.kwargs = kwargs
return view
def test_data_table_view(self):
view = self._prepare_view(SingleTableView)
context = view.get_context_data()
self.assertEqual(context['table'].__class__,
SingleTableView.table_class)
def test_data_table_view_not_authorized(self):
view = self._prepare_view(SingleTableViewWithPermissions)
context = view.get_context_data()
self.assertNotIn('table', context)
def test_data_table_view_authorized(self):
view = self._prepare_view(SingleTableViewWithPermissions)
self.set_permissions(permissions=['test'])
context = view.get_context_data()
self.assertIn('table', context)
self.assertEqual(context['table'].__class__,
SingleTableViewWithPermissions.table_class)
def test_multi_table_view_not_authorized(self):
view = self._prepare_view(MultiTableView)
context = view.get_context_data()
self.assertEqual(context['my_table_table'].__class__, MyTable)
self.assertNotIn('table_with_permissions_table', context)
def test_multi_table_view_authorized(self):
view = self._prepare_view(MultiTableView)
self.set_permissions(permissions=['test'])
context = view.get_context_data()
self.assertEqual(context['my_table_table'].__class__, MyTable)
self.assertEqual(context['table_with_permissions_table'].__class__,
TableWithPermissions)
class FormsetTableTests(test.TestCase):
def test_populate(self):
"""Create a FormsetDataTable and populate it with data."""
class TableForm(forms.Form):
name = forms.CharField()
value = forms.IntegerField()
TableFormset = forms.formsets.formset_factory(TableForm, extra=0)
class Table(table_formset.FormsetDataTable):
formset_class = TableFormset
name = tables.Column('name')
value = tables.Column('value')
class Meta:
name = 'table'
table = Table(self.request)
table.data = TEST_DATA_4
formset = table.get_formset()
self.assertEqual(len(formset), 2)
form = formset[0]
form_data = form.initial
self.assertEqual(form_data['name'], 'object_1')
self.assertEqual(form_data['value'], 2)
|
ikargis/horizon_fod
|
horizon/test/tests/tables.py
|
Python
|
apache-2.0
| 46,698
|
import os
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from tests.settings import POSTGRESQL_ENGINE
from tests.utils import get_repository_path, DBTest
from ukbrest.common.pheno2sql import Pheno2SQL
from ukbrest.common.postloader import Postloader
class PostloaderTest(DBTest):
def test_postload_codings_table_basic(self):
# prepare
directory = get_repository_path('postloader/codings01')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_codings(directory)
# validate
## Check samples table exists
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('codings'),
create_engine(POSTGRESQL_ENGINE))
assert table.iloc[0, 0]
codings = pd.read_sql("select * from codings order by data_coding, coding", create_engine(POSTGRESQL_ENGINE))
assert codings is not None
expected_columns = ['data_coding', 'coding', 'meaning']
assert len(codings.columns) >= len(expected_columns)
assert all(x in codings.columns for x in expected_columns)
assert not codings.empty
assert codings.shape[0] == 4
cidx = 0
assert codings.loc[cidx, 'data_coding'] == 7
assert codings.loc[cidx, 'coding'] == '0'
assert codings.loc[cidx, 'meaning'] == 'No'
cidx += 1
assert codings.loc[cidx, 'data_coding'] == 7
assert codings.loc[cidx, 'coding'] == '1'
assert codings.loc[cidx, 'meaning'] == 'Yes'
cidx += 1
assert codings.loc[cidx, 'data_coding'] == 9
assert codings.loc[cidx, 'coding'] == '0'
assert codings.loc[cidx, 'meaning'] == 'Female'
cidx += 1
assert codings.loc[cidx, 'data_coding'] == 9
assert codings.loc[cidx, 'coding'] == '1'
assert codings.loc[cidx, 'meaning'] == 'Male'
def test_postload_codings_negative_coding(self):
# prepare
directory = get_repository_path('postloader/codings02_negative')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_codings(directory)
# validate
## Check samples table exists
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('codings'),
create_engine(POSTGRESQL_ENGINE))
assert table.iloc[0, 0]
codings = pd.read_sql("select * from codings order by data_coding, coding", create_engine(POSTGRESQL_ENGINE))
assert codings is not None
expected_columns = ['data_coding', 'coding', 'meaning']
assert len(codings.columns) >= len(expected_columns)
assert all(x in codings.columns for x in expected_columns)
assert not codings.empty
assert codings.shape[0] == 2
cidx = 0
assert codings.loc[cidx, 'data_coding'] == 13
assert codings.loc[cidx, 'coding'] == '-1'
assert codings.loc[cidx, 'meaning'] == 'Date uncertain or unknown'
cidx += 1
assert codings.loc[cidx, 'data_coding'] == 13
assert codings.loc[cidx, 'coding'] == '-3'
assert codings.loc[cidx, 'meaning'] == 'Preferred not to answer'
def test_postload_codings_tree_structured(self):
# prepare
directory = get_repository_path('postloader/codings03_tree')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_codings(directory)
# validate
## Check samples table exists
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('codings'),
create_engine(POSTGRESQL_ENGINE))
assert table.iloc[0, 0]
codings = pd.read_sql("select * from codings order by data_coding, coding::int, node_id asc", create_engine(POSTGRESQL_ENGINE))
assert codings is not None
expected_columns = ['data_coding', 'coding', 'meaning', 'node_id', 'parent_id', 'selectable']
assert len(codings.columns) >= len(expected_columns)
assert all(x in codings.columns for x in expected_columns)
assert not codings.empty
assert codings.shape[0] == 474 + 2
assert set(np.unique(codings.loc[:, 'data_coding'])) == {6, 7}
cidx = 0
assert codings.loc[cidx, 'data_coding'] == 6
assert codings.loc[cidx, 'coding'] == '-1'
assert codings.loc[cidx, 'meaning'] == 'cardiovascular'
assert codings.loc[cidx, 'node_id'] == 1071
assert codings.loc[cidx, 'parent_id'] == 0
assert codings.loc[cidx, 'selectable'] == False
cidx += 1
assert codings.loc[cidx, 'data_coding'] == 6
assert codings.loc[cidx, 'coding'] == '-1'
assert codings.loc[cidx, 'meaning'] == 'respiratory/ent'
assert codings.loc[cidx, 'node_id'] == 1072
assert codings.loc[cidx, 'parent_id'] == 0
assert codings.loc[cidx, 'selectable'] == False
cidx = 10
assert codings.loc[cidx, 'data_coding'] == 6
assert codings.loc[cidx, 'coding'] == '-1'
assert codings.loc[cidx, 'meaning'] == 'cerebrovascular disease'
assert codings.loc[cidx, 'node_id'] == 1083
assert codings.loc[cidx, 'parent_id'] == 1071
assert codings.loc[cidx, 'selectable'] == False
cidx = 28
assert codings.loc[cidx, 'data_coding'] == 6
assert codings.loc[cidx, 'coding'] == '1065'
assert codings.loc[cidx, 'meaning'] == 'hypertension'
assert codings.loc[cidx, 'node_id'] == 1081
assert codings.loc[cidx, 'parent_id'] == 1071
assert codings.loc[cidx, 'selectable'] == True
cidx = 277
assert codings.loc[cidx, 'data_coding'] == 6
assert codings.loc[cidx, 'coding'] == '1478'
assert codings.loc[cidx, 'meaning'] == 'cervical spondylosis'
assert codings.loc[cidx, 'node_id'] == 1541
assert codings.loc[cidx, 'parent_id'] == 1608
assert codings.loc[cidx, 'selectable'] == True
cidx = 473
assert codings.loc[cidx, 'data_coding'] == 6
assert codings.loc[cidx, 'coding'] == '99999'
assert codings.loc[cidx, 'meaning'] == 'unclassifiable'
assert codings.loc[cidx, 'node_id'] == 99999
assert codings.loc[cidx, 'parent_id'] == 0
assert codings.loc[cidx, 'selectable'] == False
cidx = 474
assert codings.loc[cidx, 'data_coding'] == 7
assert codings.loc[cidx, 'coding'] == '0'
assert codings.loc[cidx, 'meaning'] == 'No'
assert pd.isnull(codings.loc[cidx, 'node_id'])
assert pd.isnull(codings.loc[cidx, 'parent_id'])
assert pd.isnull(codings.loc[cidx, 'selectable'])
cidx = 475
assert codings.loc[cidx, 'data_coding'] == 7
assert codings.loc[cidx, 'coding'] == '1'
assert codings.loc[cidx, 'meaning'] == 'Yes'
assert pd.isnull(codings.loc[cidx, 'node_id'])
assert pd.isnull(codings.loc[cidx, 'parent_id'])
assert pd.isnull(codings.loc[cidx, 'selectable'])
def test_postload_codings_check_constrains_exist(self):
# prepare
directory = get_repository_path('postloader/codings03_tree')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_codings(directory)
# Validate
## Check samples table exists
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('codings'),
create_engine(POSTGRESQL_ENGINE))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('codings', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(POSTGRESQL_ENGINE))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 3
assert 'data_coding' in columns
assert 'coding' in columns
assert 'meaning' in columns
# index on 'event' column
constraint_sql = self._get_table_contrains('codings', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(POSTGRESQL_ENGINE))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 5
assert 'data_coding' in columns
assert 'coding' in columns
assert 'node_id' in columns
assert 'parent_id' in columns
assert 'selectable' in columns
def test_postload_codings_vacuum(self):
# prepare
directory = get_repository_path('postloader/codings03_tree')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_codings(directory)
# Validate
db_engine = create_engine(POSTGRESQL_ENGINE)
## Check samples table exists
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('codings'), db_engine)
assert table.iloc[0, 0]
vacuum_data = pd.DataFrame()
query_count = 0
# FIXME waits for vacuum to finish
while vacuum_data.empty and query_count < 150:
vacuum_data = pd.read_sql("""
select relname, last_vacuum, last_analyze
from pg_stat_user_tables
where schemaname = 'public' and last_vacuum is not null and last_analyze is not null
""", db_engine)
query_count += 1
assert vacuum_data is not None
assert not vacuum_data.empty
tables = vacuum_data['relname'].tolist()
assert 'codings' in tables
def test_postload_load_samples_data_one_file(self):
# prepare
directory = get_repository_path('postloader/samples_data01')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_samples_data(directory)
# Validate
db_engine = create_engine(POSTGRESQL_ENGINE)
## Check samples table exists
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('samplesqc'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from samplesqc order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['ccolumn_name_0_0', 'canothercolumn_0_0', 'cthird_column_0_0', 'cother_measure_col_umn_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 4
assert samplesqc.loc[10, 'ccolumn_name_0_0'] == 'UKBB'
assert samplesqc.loc[10, 'canothercolumn_0_0'] == 'Batch'
assert samplesqc.loc[10, 'cthird_column_0_0'] == 'SomeValue'
assert samplesqc.loc[10, 'cother_measure_col_umn_0_0'] == 8.33992
assert samplesqc.loc[20, 'ccolumn_name_0_0'] == 'Other'
assert samplesqc.loc[20, 'canothercolumn_0_0'] == 'Some'
assert samplesqc.loc[20, 'cthird_column_0_0'] == 'AnotherValue'
assert samplesqc.loc[20, 'cother_measure_col_umn_0_0'] == -772.1234
assert samplesqc.loc[30, 'ccolumn_name_0_0'] == 'Other12'
assert samplesqc.loc[30, 'canothercolumn_0_0'] == 'Some12'
assert samplesqc.loc[30, 'cthird_column_0_0'] == 'AnotherValue12'
assert samplesqc.loc[30, 'cother_measure_col_umn_0_0'] == -0.000001234
assert samplesqc.loc[2222240, 'ccolumn_name_0_0'] == 'Other13'
assert samplesqc.loc[2222240, 'canothercolumn_0_0'] == 'Some13'
assert samplesqc.loc[2222240, 'cthird_column_0_0'] == 'AnotherValue13'
assert samplesqc.loc[2222240, 'cother_measure_col_umn_0_0'] == 0.051234
def test_postload_load_samples_data_two_files(self):
# prepare
directory = get_repository_path('postloader/samples_data02')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_samples_data(directory)
# Validate
db_engine = create_engine(POSTGRESQL_ENGINE)
# samplesqc
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('samplesqc'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from samplesqc order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['ccolumn_name_0_0', 'canothercolumn_0_0', 'cpc1_0_0', 'cpc2_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 2
assert samplesqc.loc[10, 'ccolumn_name_0_0'] == 'UKBB'
assert samplesqc.loc[10, 'canothercolumn_0_0'] == 'Batch'
assert samplesqc.loc[10, 'cpc1_0_0'] == -1.76106
assert samplesqc.loc[10, 'cpc2_0_0'] == 0.357072
assert samplesqc.loc[2222240, 'ccolumn_name_0_0'] == 'Other13'
assert samplesqc.loc[2222240, 'canothercolumn_0_0'] == 'Some13'
assert samplesqc.loc[2222240, 'cpc1_0_0'] == 2.47186
assert samplesqc.loc[2222240, 'cpc2_0_0'] == -5.46438
# relatedness
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('relatedness'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from relatedness order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['cid2_0_0', 'chethet_0_0', 'cibs0_0_0', 'ckinship_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 3
assert samplesqc.loc[10, 'cid2_0_0'] == 10
assert samplesqc.loc[10, 'chethet_0_0'] == 0.016
assert samplesqc.loc[10, 'cibs0_0_0'] == 0.0148
assert samplesqc.loc[10, 'ckinship_0_0'] == 0.1367
assert samplesqc.loc[20, 'cid2_0_0'] == 20
assert samplesqc.loc[20, 'chethet_0_0'] == 0.02
assert samplesqc.loc[20, 'cibs0_0_0'] == 0.0143
assert samplesqc.loc[20, 'ckinship_0_0'] == 0.0801
assert samplesqc.loc[2222240, 'cid2_0_0'] == 2222240
assert samplesqc.loc[2222240, 'chethet_0_0'] == 0.038
assert samplesqc.loc[2222240, 'cibs0_0_0'] == 0.0227
assert samplesqc.loc[2222240, 'ckinship_0_0'] == 0.0742
def test_postload_load_samples_data_no_eid_column(self):
# prepare
directory = get_repository_path('postloader/samples_data03')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_samples_data(directory)
# Validate
db_engine = create_engine(POSTGRESQL_ENGINE)
# samplesqc
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('samplesqc'), db_engine)
assert not table.iloc[0, 0]
# relatedness
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('relatedness'), db_engine)
assert not table.iloc[0, 0]
def test_postload_load_samples_data_identifier_column_specified(self):
# prepare
directory = get_repository_path('postloader/samples_data03')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_samples_data(directory, {
'relatedness.txt': 'ID1',
'samplesqc.txt': 'ID',
})
# Validate
db_engine = create_engine(POSTGRESQL_ENGINE)
# samplesqc
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('samplesqc'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from samplesqc order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['ccolumn_name_0_0', 'canothercolumn_0_0', 'cpc1_0_0', 'cpc2_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 2
assert samplesqc.loc[10, 'ccolumn_name_0_0'] == 'UKBB'
assert samplesqc.loc[10, 'canothercolumn_0_0'] == 'Batch'
assert samplesqc.loc[10, 'cpc1_0_0'] == -1.76106
assert samplesqc.loc[10, 'cpc2_0_0'] == 0.357072
assert samplesqc.loc[2222240, 'ccolumn_name_0_0'] == 'Other13'
assert samplesqc.loc[2222240, 'canothercolumn_0_0'] == 'Some13'
assert samplesqc.loc[2222240, 'cpc1_0_0'] == 2.47186
assert samplesqc.loc[2222240, 'cpc2_0_0'] == -5.46438
# relatedness
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('relatedness'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from relatedness order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['cid2_0_0', 'chethet_0_0', 'cibs0_0_0', 'ckinship_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 3
assert samplesqc.loc[10, 'cid2_0_0'] == 10
assert samplesqc.loc[10, 'chethet_0_0'] == 0.016
assert samplesqc.loc[10, 'cibs0_0_0'] == 0.0148
assert samplesqc.loc[10, 'ckinship_0_0'] == 0.1367
assert samplesqc.loc[20, 'cid2_0_0'] == 20
assert samplesqc.loc[20, 'chethet_0_0'] == 0.02
assert samplesqc.loc[20, 'cibs0_0_0'] == 0.0143
assert samplesqc.loc[20, 'ckinship_0_0'] == 0.0801
assert samplesqc.loc[2222240, 'cid2_0_0'] == 2222240
assert samplesqc.loc[2222240, 'chethet_0_0'] == 0.038
assert samplesqc.loc[2222240, 'cibs0_0_0'] == 0.0227
assert samplesqc.loc[2222240, 'ckinship_0_0'] == 0.0742
def test_postload_load_samples_data_skip_column(self):
# prepare
directory = get_repository_path('postloader/samples_data03')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_samples_data(directory,
identifier_columns={
'relatedness.txt': 'ID1',
'samplesqc.txt': 'ID',
},
skip_columns={
'relatedness.txt': ['ID2'],
'samplesqc.txt': ['PC1', 'column.name'],
}
)
# Validate
db_engine = create_engine(POSTGRESQL_ENGINE)
# samplesqc
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('samplesqc'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from samplesqc order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['canothercolumn_0_0', 'cpc2_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 2
assert samplesqc.loc[10, 'canothercolumn_0_0'] == 'Batch'
assert samplesqc.loc[10, 'cpc2_0_0'] == 0.357072
assert samplesqc.loc[2222240, 'canothercolumn_0_0'] == 'Some13'
assert samplesqc.loc[2222240, 'cpc2_0_0'] == -5.46438
# relatedness
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('relatedness'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from relatedness order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['chethet_0_0', 'cibs0_0_0', 'ckinship_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 3
assert samplesqc.loc[10, 'chethet_0_0'] == 0.016
assert samplesqc.loc[10, 'cibs0_0_0'] == 0.0148
assert samplesqc.loc[10, 'ckinship_0_0'] == 0.1367
assert samplesqc.loc[20, 'chethet_0_0'] == 0.02
assert samplesqc.loc[20, 'cibs0_0_0'] == 0.0143
assert samplesqc.loc[20, 'ckinship_0_0'] == 0.0801
assert samplesqc.loc[2222240, 'chethet_0_0'] == 0.038
assert samplesqc.loc[2222240, 'cibs0_0_0'] == 0.0227
assert samplesqc.loc[2222240, 'ckinship_0_0'] == 0.0742
def test_postload_load_samples_data_different_separators(self):
# prepare
directory = get_repository_path('postloader/samples_data04')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_samples_data(directory,
identifier_columns={
'relatedness.txt': 'ID1',
'samplesqc.txt': 'ID',
},
separators={
'relatedness.txt': '\t',
'samplesqc.txt': ',',
}
)
# Validate
db_engine = create_engine(POSTGRESQL_ENGINE)
# samplesqc
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('samplesqc'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from samplesqc order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['ccolumn_name_0_0', 'canothercolumn_0_0', 'cpc1_0_0', 'cpc2_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 2
assert samplesqc.loc[10, 'ccolumn_name_0_0'] == 'UKBB'
assert samplesqc.loc[10, 'canothercolumn_0_0'] == 'Batch'
assert samplesqc.loc[10, 'cpc1_0_0'] == -1.76106
assert samplesqc.loc[10, 'cpc2_0_0'] == 0.357072
assert samplesqc.loc[2222240, 'ccolumn_name_0_0'] == 'Other13'
assert samplesqc.loc[2222240, 'canothercolumn_0_0'] == 'Some13'
assert samplesqc.loc[2222240, 'cpc1_0_0'] == 2.47186
assert samplesqc.loc[2222240, 'cpc2_0_0'] == -5.46438
# relatedness
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('relatedness'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from relatedness order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['cid2_0_0', 'chethet_0_0', 'cibs0_0_0', 'ckinship_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 3
assert samplesqc.loc[10, 'cid2_0_0'] == 10
assert samplesqc.loc[10, 'chethet_0_0'] == 0.016
assert samplesqc.loc[10, 'cibs0_0_0'] == 0.0148
assert samplesqc.loc[10, 'ckinship_0_0'] == 0.1367
assert samplesqc.loc[20, 'cid2_0_0'] == 20
assert samplesqc.loc[20, 'chethet_0_0'] == 0.02
assert samplesqc.loc[20, 'cibs0_0_0'] == 0.0143
assert samplesqc.loc[20, 'ckinship_0_0'] == 0.0801
assert samplesqc.loc[2222240, 'cid2_0_0'] == 2222240
assert samplesqc.loc[2222240, 'chethet_0_0'] == 0.038
assert samplesqc.loc[2222240, 'cibs0_0_0'] == 0.0227
assert samplesqc.loc[2222240, 'ckinship_0_0'] == 0.0742
def test_postload_load_samples_fields_table_filled(self):
# prepare
postloader_directory = get_repository_path('postloader/samples_data04')
pl = Postloader(POSTGRESQL_ENGINE)
pheno_directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(pheno_directory, 'example12_diseases.csv'))
p2sql = Pheno2SQL(csv_file, POSTGRESQL_ENGINE, bgen_sample_file=os.path.join(pheno_directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# run
p2sql.load_data()
pl.load_samples_data(postloader_directory,
identifier_columns={
'relatedness.txt': 'ID1',
'samplesqc.txt': 'ID',
},
separators={
'relatedness.txt': '\t',
'samplesqc.txt': ',',
}
)
# Validate
db_engine = create_engine(POSTGRESQL_ENGINE)
# samplesqc
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('samplesqc'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from samplesqc order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['ccolumn_name_0_0', 'canothercolumn_0_0', 'cpc1_0_0', 'cpc2_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 2
# check samplesqc columns are in fields table
tmp = pd.read_sql("select * from fields where table_name = 'samplesqc'", db_engine, index_col='column_name')
assert tmp is not None
assert tmp.shape[0] == len(expected_columns)
assert all(x in tmp.index.tolist() for x in expected_columns)
assert tmp.loc['ccolumn_name_0_0', 'table_name'] == 'samplesqc'
assert tmp.loc['ccolumn_name_0_0', 'field_id'] == 'ccolumn_name_0_0'
assert tmp.loc['ccolumn_name_0_0', 'type'] == 'Text'
assert tmp.loc['canothercolumn_0_0', 'table_name'] == 'samplesqc'
assert tmp.loc['canothercolumn_0_0', 'field_id'] == 'canothercolumn_0_0'
assert tmp.loc['canothercolumn_0_0', 'type'] == 'Text'
assert tmp.loc['cpc1_0_0', 'table_name'] == 'samplesqc'
assert tmp.loc['cpc1_0_0', 'field_id'] == 'cpc1_0_0'
assert tmp.loc['cpc1_0_0', 'type'] == 'Continuous'
assert tmp.loc['cpc2_0_0', 'table_name'] == 'samplesqc'
assert tmp.loc['cpc2_0_0', 'field_id'] == 'cpc2_0_0'
assert tmp.loc['cpc2_0_0', 'type'] == 'Continuous'
# relatedness
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('relatedness'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from relatedness order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['cid2_0_0', 'chethet_0_0', 'cibs0_0_0', 'ckinship_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 3
# check relatedness columns are in fields table
tmp = pd.read_sql("select * from fields where table_name = 'relatedness'", db_engine, index_col='column_name')
assert tmp is not None
assert tmp.shape[0] == len(expected_columns)
assert all(x in tmp.index.tolist() for x in expected_columns)
assert tmp.loc['cid2_0_0', 'table_name'] == 'relatedness'
assert tmp.loc['cid2_0_0', 'field_id'] == 'cid2_0_0'
assert tmp.loc['cid2_0_0', 'type'] == 'Integer'
assert tmp.loc['chethet_0_0', 'table_name'] == 'relatedness'
assert tmp.loc['chethet_0_0', 'field_id'] == 'chethet_0_0'
assert tmp.loc['chethet_0_0', 'type'] == 'Continuous'
assert tmp.loc['cibs0_0_0', 'table_name'] == 'relatedness'
assert tmp.loc['cibs0_0_0', 'field_id'] == 'cibs0_0_0'
assert tmp.loc['cibs0_0_0', 'type'] == 'Continuous'
assert tmp.loc['ckinship_0_0', 'table_name'] == 'relatedness'
assert tmp.loc['ckinship_0_0', 'field_id'] == 'ckinship_0_0'
assert tmp.loc['ckinship_0_0', 'type'] == 'Continuous'
def test_postload_samples_data_check_constrains_exist(self):
# prepare
directory = get_repository_path('postloader/samples_data04')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_samples_data(directory,
identifier_columns={
'relatedness.txt': 'ID1',
'samplesqc.txt': 'ID',
},
separators={
'relatedness.txt': '\t',
'samplesqc.txt': ',',
}
)
# Validate
## Check samplesqc table exists
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('samplesqc'),
create_engine(POSTGRESQL_ENGINE))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('samplesqc', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(POSTGRESQL_ENGINE))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 1
assert 'eid' in columns
## Check relatedness table exists
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('relatedness'),
create_engine(POSTGRESQL_ENGINE))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('relatedness', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(POSTGRESQL_ENGINE))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 1
assert 'eid' in columns
def test_postload_codings_table_many_tab_characters_and_na(self):
# prepare
directory = get_repository_path('postloader/codings04_many_tabs')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_codings(directory)
# validate
## Check samples table exists
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('codings'),
create_engine(POSTGRESQL_ENGINE))
assert table.iloc[0, 0]
codings = pd.read_sql("select * from codings order by data_coding, coding", create_engine(POSTGRESQL_ENGINE))
assert codings is not None
expected_columns = ['data_coding', 'coding', 'meaning']
assert len(codings.columns) >= len(expected_columns)
assert all(x in codings.columns for x in expected_columns)
assert not codings.empty
assert codings.shape[0] == 5
cidx = 0
assert codings.loc[cidx, 'data_coding'] == 7
assert codings.loc[cidx, 'coding'] == '0'
assert codings.loc[cidx, 'meaning'] == 'No'
cidx += 1
assert codings.loc[cidx, 'data_coding'] == 7
assert codings.loc[cidx, 'coding'] == '1'
assert codings.loc[cidx, 'meaning'] == 'Yes'
cidx += 1
assert codings.loc[cidx, 'data_coding'] == 9
assert codings.loc[cidx, 'coding'] == '0'
assert codings.loc[cidx, 'meaning'] == 'Female'
cidx += 1
assert codings.loc[cidx, 'data_coding'] == 9
assert codings.loc[cidx, 'coding'] == '1'
assert codings.loc[cidx, 'meaning'] == 'Male'
cidx += 1
assert codings.loc[cidx, 'data_coding'] == 9
assert codings.loc[cidx, 'coding'] == '2'
assert codings.loc[cidx, 'meaning'] == 'N/A'
def test_postload_load_samples_data_multiple_identifiers(self):
# prepare
directory = get_repository_path('postloader/samples_data05_multiple_identifiers')
# run
pl = Postloader(POSTGRESQL_ENGINE)
pl.load_samples_data(directory,
identifier_columns={
'relatedness.txt': ['ID1', 'ID2'],
'samplesqc.txt': 'ID',
},
separators={
'relatedness.txt': '\t',
'samplesqc.txt': ',',
},
skip_columns={
'samplesqc.txt': ['PC1', 'column.name'],
}
)
# Validate
db_engine = create_engine(POSTGRESQL_ENGINE)
# samplesqc
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('samplesqc'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from samplesqc order by eid asc",
create_engine(POSTGRESQL_ENGINE), index_col='eid')
assert samplesqc is not None
expected_columns = ['canothercolumn_0_0', 'cpc2_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 2
assert samplesqc.loc[10, 'canothercolumn_0_0'] == 'Batch'
assert samplesqc.loc[10, 'cpc2_0_0'] == 0.357072
assert samplesqc.loc[2222240, 'canothercolumn_0_0'] == 'Some13'
assert samplesqc.loc[2222240, 'cpc2_0_0'] == -5.46438
# relatedness
table = pd.read_sql("""
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = 'public' AND tablename = '{}'
)""".format('relatedness'), db_engine)
assert table.iloc[0, 0]
samplesqc = pd.read_sql("select * from relatedness order by id1 asc, id2 asc",
create_engine(POSTGRESQL_ENGINE), index_col=['id1', 'id2'])
assert samplesqc is not None
expected_columns = ['chethet_0_0', 'cibs0_0_0', 'ckinship_0_0']
assert len(samplesqc.columns) == len(expected_columns)
assert all(x in samplesqc.columns for x in expected_columns)
assert not samplesqc.empty
assert samplesqc.shape[0] == 6
assert samplesqc.loc[10].loc[10, 'chethet_0_0'] == 0.016
assert samplesqc.loc[10].loc[10, 'cibs0_0_0'] == 0.0148
assert samplesqc.loc[10].loc[10, 'ckinship_0_0'] == 0.1367
assert samplesqc.loc[10].loc[20, 'chethet_0_0'] == 0.316
assert samplesqc.loc[10].loc[20, 'cibs0_0_0'] == 0.9148
assert samplesqc.loc[10].loc[20, 'ckinship_0_0'] == 0.0667
assert samplesqc.loc[20].loc[20, 'chethet_0_0'] == 0.02
assert samplesqc.loc[20].loc[20, 'cibs0_0_0'] == 0.0143
assert samplesqc.loc[20].loc[20, 'ckinship_0_0'] == 0.0801
assert samplesqc.loc[2222240].loc[2222240, 'chethet_0_0'] == 0.038
assert samplesqc.loc[2222240].loc[2222240, 'cibs0_0_0'] == 0.0227
assert samplesqc.loc[2222240].loc[2222240, 'ckinship_0_0'] == 0.0742
assert samplesqc.loc[2222240].loc[10, 'chethet_0_0'] == 0.138
assert samplesqc.loc[2222240].loc[10, 'cibs0_0_0'] == 0.1227
assert samplesqc.loc[2222240].loc[10, 'ckinship_0_0'] == 0.1742
assert samplesqc.loc[2222240].loc[20, 'chethet_0_0'] == 0.238
assert samplesqc.loc[2222240].loc[20, 'cibs0_0_0'] == 0.2227
assert samplesqc.loc[2222240].loc[20, 'ckinship_0_0'] == 0.2742
|
miltondp/ukbrest
|
tests/test_postloader.py
|
Python
|
gpl-3.0
| 38,352
|
"""
N-Queens Problem: Given a chess board having cells, we need to place queens in such a way that no queen is attacked by any other queen. A queen can attack horizontally, vertically and diagonally.
So initially we are having unattacked cells where we need to place queens. Let's place the first queen at a cell , so now the number of unattacked cells is reduced, and number of queens to be placed is . Place the next queen at some unattacked cell. This again reduces the number of unattacked cells and number of queens to be placed becomes . Continue doing this, as long as following conditions hold.
The number of unattacked cells is not .
The number of queens to be placed is not .
"""
__author__ = 'Rohan Khale'
try:
import sys
import os
import time
import logging.config
import pprint
except ImportError as error:
print (error)
sys.exit(-1)
if os.path.exists(os.path.basename(__file__)+ ".log") and os.path.isfile(os.path.basename(__file__)+ ".log"):
os.unlink(os.path.basename(__file__)+ ".log")
#logging.config.fileConfig("logging.conf",defaults={'logfilename': os.path.basename(__file__)+ ".log"})
logging.config.fileConfig(os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),"resources","logging.conf"),defaults={'logfilename': os.path.basename(__file__)+ ".log"})
logger = logging.getLogger(__name__)
start_time = time.time()
def is_attacked (x,y,BOARD):
try:
# Check if any value in the xth array is 1 #row
if 1 in BOARD[x]:
return True
# check if any value of the yth element in all arrays is 1 # colom
for i in range(len(BOARD)):
for j in range(len(BOARD[i])):
if 1 == BOARD[i][y]:
return True
# diagonals
# if x+y = i+j & the value of BOARD[i][j] == 1 then its attacked
# if x-y = i-j & the value of BOARD[i][j] == 1 then its attacked
for i in range(len(BOARD)):
for j in range(len(BOARD[i])):
if x+y == i+j and BOARD[i][j] == 1:
return True
if x-y == i-j and BOARD[i][j] == 1:
return True
return False
except Exception:
print (Exception)
raise Exception
if __name__ == "__main__":
try:
max_num = int(sys.argv[1])
except ValueError:
logger.error("%s is not an Integer but %s. Please provide an integer till which all prime Numbers needs to be printed.",sys.argv[1],type(sys.argv[1]))
exit (1)
logger.info("The size of the board is %sx%s",max_num,max_num)
board = [[0 for x in range(max_num)] for y in range(max_num)]
solutions = []
num_queens , total_sol = 0,0
for a in range(len(board)):
for b in range(len(board[a])):
board[a][b] = 1
num_queens = num_queens + 1
for i in range(len(board)):
for j in range(len(board[i])):
if not is_attacked(i, j, board):
board[i][j] = 1
num_queens = num_queens + 1
logger.info("#"*100)
logger.info("Max Number of Queens that can be placed on a %sx%s board are %s",max_num,max_num,num_queens)
logger.info(pprint.pformat(board))
logger.info("#"*100)
if board not in solutions:
solutions.append(board)
total_sol = total_sol + 1
board = [[0 for x in range(max_num)] for y in range(max_num)]
num_queens = 0
logger.info("Max Number of Queens that can be placed on a %sx%s board are %s",max_num,max_num,total_sol)
logger.info("Total Solutions is %s",len(solutions))
logger.info(pprint.pformat(solutions))
logger.info("Time taken to calculate is %s sec",time.time() - start_time)
|
rkhale/python
|
generic questions/attack_queen.py
|
Python
|
unlicense
| 4,008
|
import logging
import cv2
import numpy as np
from chunk import Chunk
import struct
import math
from time import time
import os.path
from common import SushiError, clip
WAVE_FORMAT_PCM = 0x0001
WAVE_FORMAT_EXTENSIBLE = 0xFFFE
class DownmixedWavFile(object):
def __init__(self, path):
super(DownmixedWavFile, self).__init__()
self._file = None
self._file = open(path, 'rb')
try:
riff = Chunk(self._file, bigendian=False)
if riff.getname() != 'RIFF':
raise SushiError('File does not start with RIFF id')
if riff.read(4) != 'WAVE':
raise SushiError('Not a WAVE file')
fmt_chunk_read = False
data_chink_read = False
file_size = os.path.getsize(path)
while True:
try:
chunk = Chunk(self._file, bigendian=False)
except EOFError:
break
if chunk.getname() == 'fmt ':
self._read_fmt_chunk(chunk)
fmt_chunk_read = True
elif chunk.getname() == 'data':
if file_size > 0xFFFFFFFF:
# large broken wav
self.frames_count = (file_size - self._file.tell()) // self.frame_size
else:
self.frames_count = chunk.chunksize // self.frame_size
data_chink_read = True
break
chunk.skip()
if not fmt_chunk_read or not data_chink_read:
raise SushiError('Invalid WAV file')
except:
if self._file:
self._file.close()
raise
def __del__(self):
self.close()
def close(self):
if self._file:
self._file.close()
self._file = None
def readframes(self, count):
if not count:
return ''
data = self._file.read(count * self.frame_size)
if self.sample_width == 2:
unpacked = np.fromstring(data, dtype=np.int16)
elif self.sample_width == 3:
bytes = np.ndarray(len(data), 'int8', data)
unpacked = np.zeros(len(data) / 3, np.int16)
unpacked.view(dtype='int8')[0::2] = bytes[1::3]
unpacked.view(dtype='int8')[1::2] = bytes[2::3]
else:
raise SushiError('Unsupported sample width: {0}'.format(self.sample_width))
unpacked = unpacked.astype('float32')
if self.channels_count == 1:
return unpacked
else:
min_length = len(unpacked) // self.channels_count
real_length = len(unpacked) / float(self.channels_count)
if min_length != real_length:
logging.error("Length of audio channels didn't match. This might result in broken output")
channels = (unpacked[i::self.channels_count] for i in xrange(self.channels_count))
data = reduce(lambda a, b: a[:min_length]+b[:min_length], channels)
data /= float(self.channels_count)
return data
def _read_fmt_chunk(self, chunk):
wFormatTag, self.channels_count, self.framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack('<HHLLH',
chunk.read(14))
if wFormatTag == WAVE_FORMAT_PCM or wFormatTag == WAVE_FORMAT_EXTENSIBLE: # ignore the rest
bits_per_sample = struct.unpack('<H', chunk.read(2))[0]
self.sample_width = (bits_per_sample + 7) // 8
else:
raise SushiError('unknown format: {0}'.format(wFormatTag))
self.frame_size = self.channels_count * self.sample_width
class WavStream(object):
READ_CHUNK_SIZE = 1 # one second, seems to be the fastest
PADDING_SECONDS = 10
def __init__(self, path, sample_rate=12000, sample_type='uint8'):
if sample_type not in ('float32', 'uint8'):
raise SushiError('Unknown sample type of WAV stream, must be uint8 or float32')
file = DownmixedWavFile(path)
total_seconds = file.frames_count / float(file.framerate)
downsample_rate = sample_rate / float(file.framerate)
self.sample_count = math.ceil(total_seconds * sample_rate)
self.sample_rate = sample_rate
# pre-allocating the data array and some place for padding
self.data = np.empty((1, self.PADDING_SECONDS * 2 * file.framerate + self.sample_count), np.float32)
self.padding_size = 10 * file.framerate
before_read = time()
try:
seconds_read = 0
samples_read = self.padding_size
while seconds_read < total_seconds:
data = file.readframes(int(self.READ_CHUNK_SIZE * file.framerate))
new_length = int(round(len(data) * downsample_rate))
dst_view = self.data[0][samples_read:samples_read+new_length]
if downsample_rate != 1:
data = data.reshape((1, len(data)))
data = cv2.resize(data, (new_length, 1), interpolation=cv2.INTER_NEAREST)[0]
np.copyto(dst_view, data, casting='no')
samples_read += new_length
seconds_read += self.READ_CHUNK_SIZE
# padding the audio from both sides
self.data[0][0:self.padding_size].fill(self.data[0][self.padding_size])
self.data[0][-self.padding_size:].fill(self.data[0][-self.padding_size-1])
# normalizing
# also clipping the stream by 3*median value from both sides of zero
max_value = np.median(self.data[self.data >= 0], overwrite_input=True) * 3
min_value = np.median(self.data[self.data <= 0], overwrite_input=True) * 3
np.clip(self.data, min_value, max_value, out=self.data)
self.data -= min_value
self.data /= (max_value - min_value)
if sample_type == 'uint8':
self.data *= 255.0
self.data += 0.5
self.data = self.data.astype('uint8')
except Exception as e:
raise SushiError('Error while loading {0}: {1}'.format(path, e))
finally:
file.close()
logging.info('Done reading WAV {0} in {1}s'.format(path, time() - before_read))
@property
def duration_seconds(self):
return self.sample_count / self.sample_rate
def get_substream(self, start, end):
start_off = self._get_sample_for_time(start)
end_off = self._get_sample_for_time(end)
return self.data[:, start_off:end_off]
def _get_sample_for_time(self, timestamp):
# this function gets REAL sample for time, taking padding into account
return int(self.sample_rate * timestamp) + self.padding_size
def find_substream(self, pattern, start_time, end_time):
start_time = clip(start_time, -self.PADDING_SECONDS, self.duration_seconds)
end_time = clip(end_time, 0, self.duration_seconds + self.PADDING_SECONDS)
start_sample = self._get_sample_for_time(start_time)
end_sample = self._get_sample_for_time(end_time) + len(pattern[0])
search_source = self.data[:, start_sample:end_sample]
result = cv2.matchTemplate(search_source, pattern, cv2.TM_SQDIFF_NORMED)
min_idx = result.argmin(axis=1)[0]
return result[0][min_idx], start_time + (min_idx / float(self.sample_rate))
|
alicx1/Sushi
|
wav.py
|
Python
|
mit
| 7,544
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def initial_data(apps, schema_editor):
SH = apps.get_model('ship', 'ShipHull')
SH(name='Frigate', space=25, cost=25).save()
SH(name='Destroyer', space=60, cost=85).save()
SH(name='Cruiser', space=120, cost=300).save()
SH(name='Battleship', space=250, cost=725).save()
SH(name='Titan', space=500, cost=1800).save()
SH(name='Doom Star', space=1200, cost=4800).save()
class Migration(migrations.Migration):
dependencies = [
('ship', '0001_initial'),
]
operations = [
migrations.RunPython(initial_data),
]
|
dwagon/pymoo
|
moo/ship/migrations/0002_populate_shiphulls.py
|
Python
|
gpl-2.0
| 665
|
import os
pythondir = os.path.join(installdir, 'Python') # noqa
path = os.environ.get('PATH', '')
os.environ['PATH'] = os.pathsep.join([pythondir, pkgdir, path]) # noqa
|
davvid/git-cola
|
contrib/win32/pynsist-preamble.py
|
Python
|
gpl-2.0
| 172
|
# -*- coding: Latin-1 -*-
"""
@file CompareVelocitys.py
@author Sascha Krieg
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2008-04-29
Creates a file with a comparison of velocities between Taxi-FCD and vtypeprobe.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import util.Path as path
import util.Reader as reader
from analysis.Taxi import *
#global vars
fcdEdgeDict={}
vtypeEdgeDict={}
WEE=True
def main():
print "start program"
#getVtypeV()
#getFcdV()
getSpeeds()
writeSelLanesOutput()
writeOutput()
print "end"
def getSpeeds():
"""Reads the speeds from the analysis file"""
taxis=reader.readAnalysisInfo(WEE)
#read speeds for every edge
for taxi in taxis:
for step in taxi.getSteps():
if step.source==SOURCE_SIMFCD:
vtypeEdgeDict.setdefault(step.edge,[]).append(float(step.speed))
elif step.source==SOURCE_FCD:
fcdEdgeDict.setdefault(step.edge,[]).append(float(step.speed))
#calc avg speed for each edge
#print fcdEdgeDict["558300689"]
#print vtypeEdgeDict["558300689"]
for edge in fcdEdgeDict:
fcdEdgeDict[edge]=sum(fcdEdgeDict[edge])/len(fcdEdgeDict[edge])
print len(fcdEdgeDict)
for edge in vtypeEdgeDict:
vtypeEdgeDict[edge]=sum(vtypeEdgeDict[edge])/len(vtypeEdgeDict[edge])
print len(vtypeEdgeDict)
#deprecated
def getVtypeV():
"""Reads the vyteprobe-File and creates a dict of edges with list of velocities.
In a second step generates for each edge a average speed in km/h.
"""
vtypeEdgeDictSpeedList={}
inputFile=open(path.vtypeprobe,'r')
for line in inputFile:
words=line.split('"')
if words[0].find("<vehicle id=")!=-1 and words[3][0]!=':':
vtypeEdgeDictSpeedList.setdefault(words[3][:-2],[]).append(float(words[15])*3.6)
inputFile.close()
for edge in vtypeEdgeDictSpeedList:
vtypeEdgeDict[edge]=sum(vtypeEdgeDictSpeedList[edge])/len(vtypeEdgeDictSpeedList[edge])
print len(vtypeEdgeDict)
#deprecated
def getFcdV():
"""Reads the fcd-File and creates a dict of edges with list of velocities.
In a second step generates for each edge a average speed in km/h.
"""
fcdEdgeDictSpeedList={}
inputFile=open(path.vls,'r')
for line in inputFile:
words=line.split('\t')
fcdEdgeDictSpeedList.setdefault(words[1],[]).append(float(words[2]))
inputFile.close()
for edge in fcdEdgeDictSpeedList:
fcdEdgeDict[edge]=sum(fcdEdgeDictSpeedList[edge])/len(fcdEdgeDictSpeedList[edge])
print len(fcdEdgeDict)
def writeSelLanesOutput():
outputFile=open(path.taxiVsFCDSpeedSelLanes,'w')
i=0
for edge in fcdEdgeDict:#each edge
if edge in vtypeEdgeDict:
#clac average speed
absDeviation=vtypeEdgeDict[edge]-fcdEdgeDict[edge]
relDeviation=absDeviation/fcdEdgeDict[edge]*100
#write output only if Taxi speed for this edge exists
#print relDeviation
if relDeviation<-50 or relDeviation>50:
i+=1
print "relDev ",relDeviation," edge ",edge
outputFile.write("lane:"+edge+"_0\n")
print "total",i
outputFile.close()
def writeOutput():
"""Writes the collected results to a file."""
outputFile=open(path.taxiVsFCDSpeed,'w')
outputFile.write('edge;fcdSpeed;simSpeed;absDeviation;relDeviation\n')
for edge in fcdEdgeDict:#each edge
if edge in vtypeEdgeDict:
#clac average speed
absDeviation=vtypeEdgeDict[edge]-fcdEdgeDict[edge]
relDeviation=absDeviation/fcdEdgeDict[edge]*100
#write output only if Taxi speed for this edge exists
outputFile.write('%s;%.2f;%.2f;%.2f;%.2f\n' %(edge,fcdEdgeDict[edge],vtypeEdgeDict[edge],absDeviation,relDeviation))
outputFile.close()
#start the program
main()
|
rudhir-upretee/Sumo17_With_Netsim
|
tools/projects/TaxiFCD_Krieg/src/analysis/CompareVelocitys.py
|
Python
|
gpl-3.0
| 4,238
|
#!/usr/bin/env python2.7
"""
@package: pyHerkulex
@name: herkulex.py
@author: Achu Wilson (achuwilson@gmail.com), Akhil Chandran (akhilchandran.t.r@gmail.com)
@version: 0.1
This is a python library for interfacing the Herkulex range of smart
servo motors manufactured by Dongbu Robotics.
The library was created by Achu Wilson (mailto:achu@sastrarobotics.com)
for the internal projects of Sastra Robotics
This free software is distributed under the GNU General Public License.
See http://www.gnu.org/licenses/gpl.html for details.
For usage of this code for commercial purposes contact Sastra Robotics
India Pvt. Ltd. (mailto:contact@sastrarobotics.com)
"""
import time
try:
# PySerial Module
import serial
except:
raise ImportError("couldnt find pySerial")
# Commands
EEP_WRITE_REQ = 0x01
EEP_READ_REQ = 0x02
RAM_WRITE_REQ = 0x03
RAM_READ_REQ = 0x04
I_JOG_REQ = 0x05
S_JOG_REQ = 0x06
STAT_REQ = 0x07
ROLLBACK_REQ = 0x08
REBOOT_REQ = 0x09
EEP_WRITE_ACK = 0x41
EEP_READ_ACK = 0x42
RAM_WRITE_ACK = 0x43
RAM_READ_ACK = 0x44
I_JOG_ACK = 0x45
S_JOG_ACK = 0x46
STAT_ACK = 0x47
ROLLBACK_ACK = 0x48
REBOOT_ACK = 0x49
#Addresses
MODEL_NO1_EEP = 0
MODEL_NO2_EEP = 1
VERSION1_EEP = 2
VERSION2_EEP = 3
BAUD_RATE_EEP = 4
SERVO_ID_EEP = 6
SERVO_ID_RAM = 0
ACK_POLICY_EEP = 7
ACK_POLICY_RAM = 1
ALARM_LED_POLICY_EEP = 8
ALARM_LED_POLICY_RAM = 2
TORQUE_POLICY_EEP = 9
TORQUE_POLICY_RAM = 3
MAX_TEMP_EEP = 11
MAX_TEMP_RAM = 5
MIN_VOLTAGE_EEP = 12
MIN_VOLTAGE_RAM = 6
MAX_VOLTAGE_EEP = 13
MAX_VOLTAGE_RAM = 7
ACCELERATION_RATIO_EEP = 14
ACCELERATION_RATIO_RAM = 8
MAX_ACCELERATION_TIME_EEP = 15
MAX_ACCELERATION_TIME_RAM = 9
DEAD_ZONE_EEP = 16
DEAD_ZONE_RAM = 10
SATURATOR_OFFSET_EEP = 17
SATURATOR_OFFSET_RAM = 11
SATURATOR_SLOPE_EEP = 18
SATURATOR_SLOPE_RAM = 12
PWM_OFFSET_EEP = 20
PWM_OFFSET_RAM = 14
MIN_PWM_EEP = 21
MIN_PWM_RAM = 15
MAX_PWM_EEP = 22
MAX_PWM_RAM = 16
OVERLOAD_PWM_THRESHOLD_EEP = 24
OVERLOAD_PWM_THRESHOLD_RAM = 18
MIN_POSITION_EEP = 26
MIN_POSITION_RAM = 20
MAX_POSITION_EEP = 28
MAX_POSITION_RAM = 22
POSITION_KP_EEP = 30
POSITION_KP_RAM = 24
POSITION_KD_EEP = 32
POSITION_KD_RAM = 26
POSITION_KI_EEP = 34
POSITION_KI_RAM =28
POSITION_FEEDFORWARD_GAIN1_EEP = 36
POSITION_FEEDFORWARD_GAIN1_RAM = 30
POSITION_FEEDFORWARD_GAIN2_EEP = 38
POSITION_FEEDFORWARD_GAIN2_RAM = 32
VELOCITY_KP_EEP = 40
VELOCITY_KP_RAM = 34
VELOCITY_KI_EEP = 42
VELOCITY_KI_RAM = 36
LED_BLINK_PERIOD_EEP = 44
LED_BLINK_PERIOD_RAM = 38
ADC_FAULT_CHECK_PERIOD_EEP = 45
ADC_FAULT_CHECK_PERIOD_RAM = 39
PACKET_GARBAGE_CHECK_PERIOD_EEP = 46
PACKET_GARBAGE_CHECK_PERIOD_RAM = 40
STOP_DETECTION_PERIOD_EEP = 47
STOP_DETECTION_PERIOD_RAM = 41
OVERLOAD_DETECTION_PERIOD_EEP = 48
OVERLOAD_DETECTION_PERIOD_RAM = 42
STOP_THRESHOLD_EEP = 49
STOP_THRESHOLD_RAM = 43
INPOSITION_MARGIN_EEP = 50
INPOSITION_MARGIN_RAM = 44
CALIBRATION_DIFF_LOW_EEP = 52
CALIBRATION_DIFF_LOW_RAM = 46
CALIBRATION_DIFF_UP_EEP = 53
CALIBRATION_DIFF_UP_RAM = 47
STATUS_ERROR_RAM = 48
STATUS_DETAIL_RAM = 49
AUX1_RAM = 50
TORQUE_CONTROL_RAM = 52
LED_CONTROL_RAM = 53
VOLTAGE_RAM = 54
TEMPERATURE_RAM = 55
CURRENT_CONTROL_MODE_RAM = 56
TICK_RAM = 57
CALIBRATED_POSITION_RAM = 58
ABSOLUTE_POSITION_RAM = 60
DIFFERENTIAL_POSITION_RAM = 62
PWM_RAM = 64
ABSOLUTE_SECOND_POSITION_RAM = 66
ABSOLUTE_GOAL_POSITION_RAM = 68
ABSOLUTE_DESIRED_TRAJECTORY_POSITION = 70
DESIRED_VELOCITY_RAM = 72
BYTE1 = 0x01
BYTE2 = 0x02
BROADCAST_ID = 0xFE
SERPORT = None
def connect(portname, baudrate):
""" Connect to the Herkulex bus
Connect to serial port to which Herkulex Servos are attatched
Args:
portname (str): The serial port name
baudrate (int): The serial port baudrate
Raises:
SerialException: Error occured while opening serial port
"""
global SERPORT
try:
SERPORT = serial.Serial(portname, baudrate, timeout = 0.1)
except:
raise HerkulexError("could not open the serial port")
def close():
""" Close the Serial port
Properly close the serial port before exiting the application
Raises:
SerialException: Error occured while closing serial port
"""
try:
SERPORT.close()
except:
raise HerkulexError("could not close the serial port")
def checksum1(data, stringlength):
""" Calculate Checksum 1
Calculate the ckecksum 1 required for the herkulex data packet
Args:
data (list): the data of which checksum is to be calculated
stringlength (int): the length of the data
Returns:
int: The calculated checksum 1
"""
value_buffer = 0
for count in range(0, stringlength):
value_buffer = value_buffer ^ data[count]
return value_buffer&0xFE
def checksum2(data):
""" Calculate Checksum 2
Calculate the ckecksum 2 required for the herkulex data packet
Args:
data (int): the data of which checksum is to be calculated
Returns:
int: The calculated checksum 2
"""
return (~data)&0xFE
def send_data(data):
""" Send data to herkulex
Paketize & write the packet to serial port
Args:
data (list): the data to be sent
Raises:
SerialException: Error occured while opening serial port
"""
datalength = len(data)
csm1 = checksum1(data, datalength)
csm2 = checksum2(csm1)
data.insert(0, 0xFF)
data.insert(1, 0xFF)
data.insert(5, csm1)
data.insert(6, csm2)
stringtosend = ""
for i in range(len(data)):
byteformat = '%02X' % data[i]
stringtosend = stringtosend + "\\x" + byteformat
try:
SERPORT.write(stringtosend.decode('string-escape'))
#print stringtosend
except:
raise HerkulexError("could not communicate with motors")
def clear_errors():
""" Clears the errors register of all Herkulex servos
Args:
none
"""
data = []
data.append(0x0B)
data.append(BROADCAST_ID)
data.append(RAM_WRITE_REQ)
data.append(STATUS_ERROR_RAM)
data.append(BYTE2)
data.append(0x00)
data.append(0x00)
send_data(data)
def scale(input_value, input_min, input_max, out_min, out_max):
""" scale a value from one range to another
"""
# Figure out how 'wide' each range is
input_span = input_max - input_min
output_span = out_max - out_min
# Convert the left range into a 0-1 range (float)
valuescaled = float(input_value - input_min) / float(input_span)
# Convert the 0-1 range into a value in the right range.
return out_min + (valuescaled * output_span)
def scan_servos():
"""Scan for the herkulex servos connected
This function will scan for all the herkulex servos connected
to the bus.
Args:
none
Returns:
list: a list of tuples of the form [(id, model)]
"""
servos = []
for servo_id in range(0x00, 0xFE):
model = get_model(servo_id)
if model:
servos += [(servo_id, model)]
return servos
def get_model(servoid):
""" Get the servo model
This function gets the model of the herkules servo, provided its id
Args:
servoid(int): the id of the servo
Returns:
int: an integer corresponding to the model number
0x06 for DRS-602
0x04 for DRS-402
0x02 for DRS-202
"""
data = []
data.append(0x09)
data.append(servoid)
data.append(EEP_READ_REQ)
data.append(MODEL_NO1_EEP)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except:
raise HerkulexError("could not communicate with motors")
class servo:
""" The servo class
This class handles the interface to the herkulex smart servos
"""
def __init__(self, servoid):
""" servo class initialization
Args:
servoid(int): the id of the servo
"""
self.servoid = servoid
self.servomodel = get_model(servoid)
def get_model(self):
""" Get the servo model
This function gets the model of the herkules servo, provided its id
Args:
none
Returns:
int: an integer corresponding to the model number
0x06 for DRS-602
0x04 for DRS-402
0x02 for DRS-202
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(EEP_READ_REQ)
data.append(MODEL_NO1_EEP)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except:
raise HerkulexError("could not communicate with motors")
def get_servo_status(self):
""" Get the error status of servo
This function gets the error status (if any) of the servo
Args:
none
Returns:
int: an integer corresponding to the servo status
* refer datasheet
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(STATUS_ERROR_RAM)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except:
raise HerkulexError("could not communicate with motors")
def get_servo_status_detail(self):
""" Get the detailed error status of servo
This function gets the detailed error status (if any) of the servo
Args:
none
Returns:
int: an integer corresponding to the servo status
* refer datasheet
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(STATUS_DETAIL_RAM)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except HerkulexError:
raise HerkulexError("could not communicate with motors")
def set_led(self, colorcode):
""" Set the LED Color of Herkulex
Args:
colorcode (int): The code for colors
(0x00-OFF
0x02-BLUE
0x03-CYAN
0x04-RED
0x05-ORANGE
0x06-VIOLET
0x07-WHITE
"""
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(LED_CONTROL_RAM)
data.append(0x01)
data.append(colorcode)
send_data(data)
def brake_on(self):
""" Set the Brakes of Herkulex
In braked mode, position control and velocity control
will not work, enable torque before that
Args:
none
"""
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(0x01)
data.append(0x40)
send_data(data)
def torque_off(self):
""" Set the torques of Herkulex to zero
In this mode, position control and velocity control
will not work, enable torque before that. Also the
servo shaft is freely movable
Args:
none
"""
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(0x01)
data.append(0x00)
send_data(data)
def torque_on(self):
""" Enable the torques of Herkulex
In this mode, position control and velocity control
will work.
Args:
none
"""
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(0x01)
data.append(0x60)
send_data(data)
def get_torque_state(self):
""" get the torque state of motor
Returns:
bool: True if torque is enabled, else False
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return bool(ord(rxdata[9]))
except HerkulexError:
raise HerkulexError("could not communicate with motors")
def set_servo_position(self, goalposition, goaltime, led):
""" Set the position of Herkulex
Enable torque using torque_on function before calling this
Args:
goalposition (int): The desired position, min-0 & max-1023
goaltime (int): the time taken to move from present
position to goalposition
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
"""
goalposition_msb = int(goalposition) >> 8
goalposition_lsb = int(goalposition) & 0xff
data = []
data.append(0x0C)
data.append(self.servoid)
data.append(I_JOG_REQ)
data.append(goalposition_lsb)
data.append(goalposition_msb)
data.append(led)
data.append(self.servoid)
data.append(goaltime)
send_data(data)
def get_servo_position(self):
""" Gets the current position of Herkulex
Args:
none
Returns:
int: position of the servo- 0 to 1023
Raises:
SerialException: Error occured while opening serial port
"""
#global SERPORT
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(CALIBRATED_POSITION_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
if (self.servomodel==0x06) or (self.servomodel == 0x04):
return ((ord(rxdata[10])&0xff)<<8) | (ord(rxdata[9])&0xFF)
else:
#print ord(rxdata[9]),ord(rxdata[10])
return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF)
except HerkulexError:
print "Could not read from the servos. Check connection"
def get_servo_temperature(self):
""" Gets the current temperature of Herkulex
Args:
none
Returns:
int: the current temperature register of Herkulex
Raises:
SerialException: Error occured while opening serial port
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(TEMPERATURE_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return ord(rxdata[9])
except HerkulexError:
raise HerkulexError("Could not communicate with motors")
def get_servo_torque(self):
""" Gets the current torque of Herkulex
Gives the current load on the servo shaft.
It is actually the PWM value to the motors
Args:
none
Returns:
int: the torque on servo shaft. range from -1023 to 1023
Raises:
SerialException: Error occured while opening serial port
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(PWM_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
if ord(rxdata[10])<=127:
return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF)
else:
return (ord(rxdata[10])-0xFF)*0xFF + (ord(rxdata[9])&0xFF)-0xFF
except HerkulexError:
raise HerkulexError("could not communicate with motors")
def set_servo_speed(self, goalspeed, led):
""" Set the Herkulex in continuous rotation mode
Args:
goalspeed (int): the speed , range -1023 to 1023
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
"""
if goalspeed>0 :
goalspeed_msb = (int(goalspeed)& 0xFF00) >> 8
goalspeed_lsb = int(goalspeed) & 0xff
elif goalspeed<0 :
goalspeed_msb = 64+(255- ((int(goalspeed)& 0xFF00) >> 8))
goalspeed_lsb = (abs(goalspeed) & 0xff)
#print goalspeed_msb,goalspeed_lsb
data = []
data.append(0x0C)
data.append(self.servoid)
data.append(I_JOG_REQ)
data.append(goalspeed_lsb)
data.append(goalspeed_msb)
data.append(0x02|led)
data.append(self.servoid)
data.append(0x00)
send_data(data)
def set_position_p(self, pvalue):
""" Set the P gain of the position PID
Args:
pvalue (int): P value
"""
pvalue_msb = int(pvalue) >> 8
pvalue_lsb = int(pvalue) & 0xff
data = []
data.append(0x0B)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(POSITION_KP_RAM)
data.append(BYTE2)
data.append( pvalue_lsb)
data.append( pvalue_msb)
send_data(data)
def set_position_i(self, ivalue):
""" Set the I gain of the position PID
Args:
ivalue (int): I value
"""
ivalue_msb = int(ivalue) >> 8
ivalue_lsb = int(ivalue) & 0xff
data = []
data.append(0x0B)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(POSITION_KI_RAM)
data.append(BYTE2)
data.append(ivalue_lsb)
data.append(ivalue_msb)
send_data(data)
def set_position_d(self, dvalue):
""" Set the D gain of the PID
Args:
dvalue (int): D value
"""
dvalue_msb = int(dvalue) >> 8
dvalue_lsb = int(dvalue) & 0xff
data = []
data.append(0x0B)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(POSITION_KD_RAM)
data.append(BYTE2)
data.append(dvalue_lsb)
data.append(dvalue_msb)
send_data(data)
def get_position_p(self):
""" Get the P value of the current PID for position
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(POSITION_KP_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return (ord(rxdata[10])*256)+(ord(rxdata[9])&0xff)
except HerkulexError:
raise HerkulexError("could not communicate with motors")
def get_position_i(self):
""" Get the I value of the current PID for position
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(POSITION_KI_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return (ord(rxdata[10])*256)+(ord(rxdata[9])&0xff)
except HerkulexError:
raise HerkulexError("Could not read from motors")
def get_position_d(self):
""" Get the D value of the current PID for position
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(POSITION_KD_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return (ord(rxdata[10])*256)+(ord(rxdata[9])&0xff)
except HerkulexError:
raise HerkulexError("could not communicate with motors")
def save_pid_eeprom(self):
""" saves the PID values from RAM to EEPROM
"""
pval = self.get_position_p()
ival = self.get_position_i()
dval = self.get_position_d()
#write P value
pvalue_msb = int(pval) >> 8
pvalue_lsb = int(pval) & 0xff
data_p = []
data_p.append(0x0B)
data_p.append(self.servoid)
data_p.append(EEP_WRITE_REQ)
data_p.append(POSITION_KP_EEP)
data_p.append(BYTE2)
data_p.append( pvalue_lsb)
data_p.append( pvalue_msb)
send_data(data_p)
# write I value
ivalue_msb = int(ival) >> 8
ivalue_lsb = int(ival) & 0xff
data_i = []
data_i.append(0x0B)
data_i.append(self.servoid)
data_i.append(EEP_WRITE_REQ)
data_i.append(POSITION_KI_EEP)
data_i.append(BYTE2)
data_i.append( ivalue_lsb)
data_i.append( ivalue_msb)
send_data(data_i)
# write D value
dvalue_msb = int(dval) >> 8
dvalue_lsb = int(dval) & 0xff
data_d = []
data_d.append(0x0B)
data_d.append(self.servoid)
data_d.append(EEP_WRITE_REQ)
data_d.append(POSITION_KD_EEP)
data_d.append(BYTE2)
data_d.append( dvalue_lsb)
data_d.append( dvalue_msb)
send_data(data_d)
def set_servo_angle(self, goalangle, goaltime, led):
""" Sets the servo angle (in degrees)
Enable torque using torque_on function before calling this
Args:
goalangle (int): The desired angle in degrees, range -150 to 150
goaltime (int): the time taken to move from present
position to goalposition
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
"""
if (self.servomodel==0x06) or (self.servomodel == 0x04):
goalposition = scale(goalangle, -159.9, 159.6, 10627, 22129)
else:
goalposition = scale(goalangle, -150, 150, 21, 1002)
self.set_servo_position(goalposition, goaltime, led)
def get_servo_angle(self):
""" Gets the current angle of the servo in degrees
Args:
none
Returns:
int : the current servo angle
"""
servoposition = self.get_servo_position()
if (self.servomodel==0x06) or (self.servomodel == 0x04):
return scale(servoposition, 10627, 22129, -159.9, 159.6)
else:
return scale(servoposition, 21, 1002, -150, 150)
class HerkulexError(Exception):
""" Class to handle sservo errors
"""
def __init__(self, message):
super(HerkulexError, self).__init__(message)
self.message = message
|
seiji56/rmaze-2016
|
logic_code/last_ver/sim/herkulex.py
|
Python
|
gpl-3.0
| 23,289
|
# -*- coding: utf-8 -*-
"""
jinja.nodes
~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: 2007 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from itertools import chain
from copy import copy
def get_nodes(nodetype, tree, exclude_root=True):
"""
Get all nodes from nodetype in the tree excluding the
node passed if `exclude_root` is `True` (default).
"""
if exclude_root:
todo = tree.get_child_nodes()
else:
todo = [tree]
while todo:
node = todo.pop()
if node.__class__ is nodetype:
yield node
todo.extend(node.get_child_nodes())
class NotPossible(NotImplementedError):
"""
If a given node cannot do something.
"""
class Node(object):
"""
Jinja node.
"""
def __init__(self, lineno=None, filename=None):
self.lineno = lineno
self.filename = filename
def get_items(self):
return []
def get_child_nodes(self):
return [x for x in self.get_items() if isinstance(x, Node)]
def allows_assignments(self):
return False
def __repr__(self):
return 'Node()'
class Text(Node):
"""
Node that represents normal text.
"""
def __init__(self, text, variables, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.text = text
self.variables = variables
def get_items(self):
return [self.text] + list(self.variables)
def __repr__(self):
return 'Text(%r, %r)' % (
self.text,
self.variables
)
class NodeList(list, Node):
"""
A node that stores multiple childnodes.
"""
def __init__(self, data, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
list.__init__(self, data)
def get_items(self):
return list(self)
def __repr__(self):
return 'NodeList(%s)' % list.__repr__(self)
class Template(Node):
"""
Node that represents a template.
"""
def __init__(self, extends, body, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.extends = extends
self.body = body
def get_items(self):
return [self.extends, self.body]
def __repr__(self):
return 'Template(%r, %r)' % (
self.extends,
self.body
)
class ForLoop(Node):
"""
A node that represents a for loop
"""
def __init__(self, item, seq, body, else_, recursive, lineno=None,
filename=None):
Node.__init__(self, lineno, filename)
self.item = item
self.seq = seq
self.body = body
self.else_ = else_
self.recursive = recursive
def get_items(self):
return [self.item, self.seq, self.body, self.else_, self.recursive]
def __repr__(self):
return 'ForLoop(%r, %r, %r, %r, %r)' % (
self.item,
self.seq,
self.body,
self.else_,
self.recursive
)
class IfCondition(Node):
"""
A node that represents an if condition.
"""
def __init__(self, tests, else_, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.tests = tests
self.else_ = else_
def get_items(self):
result = []
for test in self.tests:
result.extend(test)
result.append(self.else_)
return result
def __repr__(self):
return 'IfCondition(%r, %r)' % (
self.tests,
self.else_
)
class Cycle(Node):
"""
A node that represents the cycle statement.
"""
def __init__(self, seq, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.seq = seq
def get_items(self):
return [self.seq]
def __repr__(self):
return 'Cycle(%r)' % (self.seq,)
class Print(Node):
"""
A node that represents variable tags and print calls.
"""
def __init__(self, expr, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.expr = expr
def get_items(self):
return [self.expr]
def __repr__(self):
return 'Print(%r)' % (self.expr,)
class Macro(Node):
"""
A node that represents a macro.
"""
def __init__(self, name, arguments, body, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.name = name
self.arguments = arguments
self.body = body
def get_items(self):
return [self.name] + list(chain(*self.arguments)) + [self.body]
def __repr__(self):
return 'Macro(%r, %r, %r)' % (
self.name,
self.arguments,
self.body
)
class Call(Node):
"""
A node that represents am extended macro call.
"""
def __init__(self, expr, body, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.expr = expr
self.body = body
def get_items(self):
return [self.expr, self.body]
def __repr__(self):
return 'Call(%r, %r)' % (
self.expr,
self.body
)
class Set(Node):
"""
Allows defining own variables.
"""
def __init__(self, name, expr, scope_local, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.name = name
self.expr = expr
self.scope_local = scope_local
def get_items(self):
return [self.name, self.expr, self.scope_local]
def __repr__(self):
return 'Set(%r, %r, %r)' % (
self.name,
self.expr,
self.scope_local
)
class Filter(Node):
"""
Node for filter sections.
"""
def __init__(self, body, filters, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.body = body
self.filters = filters
def get_items(self):
return [self.body] + list(self.filters)
def __repr__(self):
return 'Filter(%r, %r)' % (
self.body,
self.filters
)
class Block(Node):
"""
A node that represents a block.
"""
def __init__(self, name, body, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.name = name
self.body = body
def replace(self, node):
"""
Replace the current data with the copied data of another block
node.
"""
assert node.__class__ is Block
self.lineno = node.lineno
self.filename = node.filename
self.name = node.name
self.body = copy(node.body)
def clone(self):
"""
Create an independent clone of this node.
"""
return copy(self)
def get_items(self):
return [self.name, self.body]
def __repr__(self):
return 'Block(%r, %r)' % (
self.name,
self.body
)
class Include(Node):
"""
A node that represents the include tag.
"""
def __init__(self, template, lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.template = template
def get_items(self):
return [self.template]
def __repr__(self):
return 'Include(%r)' % (
self.template
)
class Trans(Node):
"""
A node for translatable sections.
"""
def __init__(self, singular, plural, indicator, replacements,
lineno=None, filename=None):
Node.__init__(self, lineno, filename)
self.singular = singular
self.plural = plural
self.indicator = indicator
self.replacements = replacements
def get_items(self):
rv = [self.singular, self.plural, self.indicator]
if self.replacements:
rv.extend(self.replacements.values())
rv.extend(self.replacements.keys())
return rv
def __repr__(self):
return 'Trans(%r, %r, %r, %r)' % (
self.singular,
self.plural,
self.indicator,
self.replacements
)
class Expression(Node):
"""
Baseclass for all expressions.
"""
class BinaryExpression(Expression):
"""
Baseclass for all binary expressions.
"""
def __init__(self, left, right, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.left = left
self.right = right
def get_items(self):
return [self.left, self.right]
def __repr__(self):
return '%s(%r, %r)' % (
self.__class__.__name__,
self.left,
self.right
)
class UnaryExpression(Expression):
"""
Baseclass for all unary expressions.
"""
def __init__(self, node, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.node = node
def get_items(self):
return [self.node]
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self.node
)
class ConstantExpression(Expression):
"""
any constat such as {{ "foo" }}
"""
def __init__(self, value, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.value = value
def get_items(self):
return [self.value]
def __repr__(self):
return 'ConstantExpression(%r)' % (self.value,)
class UndefinedExpression(Expression):
"""
represents the special 'undefined' value.
"""
def __repr__(self):
return 'UndefinedExpression()'
class RegexExpression(Expression):
"""
represents the regular expression literal.
"""
def __init__(self, value, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.value = value
def get_items(self):
return [self.value]
def __repr__(self):
return 'RegexExpression(%r)' % (self.value,)
class NameExpression(Expression):
"""
any name such as {{ foo }}
"""
def __init__(self, name, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.name = name
def get_items(self):
return [self.name]
def allows_assignments(self):
return self.name != '_'
def __repr__(self):
return 'NameExpression(%r)' % self.name
class ListExpression(Expression):
"""
any list literal such as {{ [1, 2, 3] }}
"""
def __init__(self, items, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.items = items
def get_items(self):
return list(self.items)
def __repr__(self):
return 'ListExpression(%r)' % (self.items,)
class DictExpression(Expression):
"""
any dict literal such as {{ {1: 2, 3: 4} }}
"""
def __init__(self, items, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.items = items
def get_items(self):
return list(chain(*self.items))
def __repr__(self):
return 'DictExpression(%r)' % (self.items,)
class SetExpression(Expression):
"""
any set literal such as {{ @(1, 2, 3) }}
"""
def __init__(self, items, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.items = items
def get_items(self):
return self.items[:]
def __repr__(self):
return 'SetExpression(%r)' % (self.items,)
class ConditionalExpression(Expression):
"""
{{ foo if bar else baz }}
"""
def __init__(self, test, expr1, expr2, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.test = test
self.expr1 = expr1
self.expr2 = expr2
def get_items(self):
return [self.test, self.expr1, self.expr2]
def __repr__(self):
return 'ConstantExpression(%r, %r, %r)' % (
self.test,
self.expr1,
self.expr2
)
class FilterExpression(Expression):
"""
{{ foo|bar|baz }}
"""
def __init__(self, node, filters, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.node = node
self.filters = filters
def get_items(self):
result = [self.node]
for filter, args in self.filters:
result.append(filter)
result.extend(args)
return result
def __repr__(self):
return 'FilterExpression(%r, %r)' % (
self.node,
self.filters
)
class TestExpression(Expression):
"""
{{ foo is lower }}
"""
def __init__(self, node, name, args, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.node = node
self.name = name
self.args = args
def get_items(self):
return [self.node, self.name] + list(self.args)
def __repr__(self):
return 'TestExpression(%r, %r, %r)' % (
self.node,
self.name,
self.args
)
class CallExpression(Expression):
"""
{{ foo(bar) }}
"""
def __init__(self, node, args, kwargs, dyn_args, dyn_kwargs,
lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.node = node
self.args = args
self.kwargs = kwargs
self.dyn_args = dyn_args
self.dyn_kwargs = dyn_kwargs
def get_items(self):
return [self.node, self.args, self.kwargs, self.dyn_args,
self.dyn_kwargs]
def __repr__(self):
return 'CallExpression(%r, %r, %r, %r, %r)' % (
self.node,
self.args,
self.kwargs,
self.dyn_args,
self.dyn_kwargs
)
class SubscriptExpression(Expression):
"""
{{ foo.bar }} and {{ foo['bar'] }} etc.
"""
def __init__(self, node, arg, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.node = node
self.arg = arg
def get_items(self):
return [self.node, self.arg]
def __repr__(self):
return 'SubscriptExpression(%r, %r)' % (
self.node,
self.arg
)
class SliceExpression(Expression):
"""
1:2:3 etc.
"""
def __init__(self, start, stop, step, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.start = start
self.stop = stop
self.step = step
def get_items(self):
return [self.start, self.stop, self.step]
def __repr__(self):
return 'SliceExpression(%r, %r, %r)' % (
self.start,
self.stop,
self.step
)
class TupleExpression(Expression):
"""
For loop unpacking and some other things like multiple arguments
for subscripts.
"""
def __init__(self, items, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.items = items
def get_items(self):
return list(self.items)
def allows_assignments(self):
for item in self.items:
if not item.allows_assignments():
return False
return True
def __repr__(self):
return 'TupleExpression(%r)' % (self.items,)
class ConcatExpression(Expression):
"""
For {{ foo ~ bar }}. Because of various reasons (especially because
unicode conversion takes place for the left and right expression and
is better optimized that way)
"""
def __init__(self, args, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.args = args
def get_items(self):
return list(self.args)
def __repr__(self):
return 'ConcatExpression(%r)' % (self.args,)
class CompareExpression(Expression):
"""
{{ foo == bar }}, {{ foo >= bar }} etc.
"""
def __init__(self, expr, ops, lineno=None, filename=None):
Expression.__init__(self, lineno, filename)
self.expr = expr
self.ops = ops
def get_items(self):
return [self.expr] + list(chain(*self.ops))
def __repr__(self):
return 'CompareExpression(%r, %r)' % (
self.expr,
self.ops
)
class MulExpression(BinaryExpression):
"""
{{ foo * bar }}
"""
class DivExpression(BinaryExpression):
"""
{{ foo / bar }}
"""
class FloorDivExpression(BinaryExpression):
"""
{{ foo // bar }}
"""
class AddExpression(BinaryExpression):
"""
{{ foo + bar }}
"""
class SubExpression(BinaryExpression):
"""
{{ foo - bar }}
"""
class ModExpression(BinaryExpression):
"""
{{ foo % bar }}
"""
class PowExpression(BinaryExpression):
"""
{{ foo ** bar }}
"""
class AndExpression(BinaryExpression):
"""
{{ foo and bar }}
"""
class OrExpression(BinaryExpression):
"""
{{ foo or bar }}
"""
class NotExpression(UnaryExpression):
"""
{{ not foo }}
"""
class NegExpression(UnaryExpression):
"""
{{ -foo }}
"""
class PosExpression(UnaryExpression):
"""
{{ +foo }}
"""
|
dcramer/jinja1-djangosupport
|
jinja/nodes.py
|
Python
|
bsd-3-clause
| 17,573
|
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.sparse as sps
from ._numdiff import approx_derivative, group_columns
from ._hessian_update_strategy import HessianUpdateStrategy
from scipy.sparse.linalg import LinearOperator
FD_METHODS = ('2-point', '3-point', 'cs')
class ScalarFunction(object):
"""Scalar function and its derivatives.
This class defines a scalar function F: R^n->R and methods for
computing or approximating its first and second derivatives.
Notes
-----
This class implements a memoization logic. There are methods `fun`,
`grad`, hess` and corresponding attributes `f`, `g` and `H`. The following
things should be considered:
1. Use only public methods `fun`, `grad` and `hess`.
2. After one of the methods is called, the corresponding attribute
will be set. However, a subsequent call with a different argument
of *any* of the methods may overwrite the attribute.
"""
def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step,
finite_diff_bounds):
if not callable(grad) and grad not in FD_METHODS:
raise ValueError("`grad` must be either callable or one of {}."
.format(FD_METHODS))
if not (callable(hess) or hess in FD_METHODS
or isinstance(hess, HessianUpdateStrategy)):
raise ValueError("`hess` must be either callable,"
"HessianUpdateStrategy or one of {}."
.format(FD_METHODS))
if grad in FD_METHODS and hess in FD_METHODS:
raise ValueError("Whenever the gradient is estimated via "
"finite-differences, we require the Hessian "
"to be estimated using one of the "
"quasi-Newton strategies.")
self.x = np.atleast_1d(x0).astype(float)
self.n = self.x.size
self.nfev = 0
self.ngev = 0
self.nhev = 0
self.f_updated = False
self.g_updated = False
self.H_updated = False
finite_diff_options = {}
if grad in FD_METHODS:
finite_diff_options["method"] = grad
finite_diff_options["rel_step"] = finite_diff_rel_step
finite_diff_options["bounds"] = finite_diff_bounds
if hess in FD_METHODS:
finite_diff_options["method"] = hess
finite_diff_options["rel_step"] = finite_diff_rel_step
finite_diff_options["as_linear_operator"] = True
# Function evaluation
def fun_wrapped(x):
self.nfev += 1
return fun(x, *args)
def update_fun():
self.f = fun_wrapped(self.x)
self._update_fun_impl = update_fun
self._update_fun()
# Gradient evaluation
if callable(grad):
def grad_wrapped(x):
self.ngev += 1
return np.atleast_1d(grad(x, *args))
def update_grad():
self.g = grad_wrapped(self.x)
elif grad in FD_METHODS:
def update_grad():
self._update_fun()
self.g = approx_derivative(fun_wrapped, self.x, f0=self.f,
**finite_diff_options)
self._update_grad_impl = update_grad
self._update_grad()
# Hessian Evaluation
if callable(hess):
self.H = hess(x0, *args)
self.H_updated = True
self.nhev += 1
if sps.issparse(self.H):
def hess_wrapped(x):
self.nhev += 1
return sps.csr_matrix(hess(x, *args))
self.H = sps.csr_matrix(self.H)
elif isinstance(self.H, LinearOperator):
def hess_wrapped(x):
self.nhev += 1
return hess(x, *args)
else:
def hess_wrapped(x):
self.nhev += 1
return np.atleast_2d(np.asarray(hess(x, *args)))
self.H = np.atleast_2d(np.asarray(self.H))
def update_hess():
self.H = hess_wrapped(self.x)
elif hess in FD_METHODS:
def update_hess():
self._update_grad()
self.H = approx_derivative(grad_wrapped, self.x, f0=self.g,
**finite_diff_options)
return self.H
update_hess()
self.H_updated = True
elif isinstance(hess, HessianUpdateStrategy):
self.H = hess
self.H.initialize(self.n, 'hess')
self.H_updated = True
self.x_prev = None
self.g_prev = None
def update_hess():
self._update_grad()
self.H.update(self.x - self.x_prev, self.g - self.g_prev)
self._update_hess_impl = update_hess
if isinstance(hess, HessianUpdateStrategy):
def update_x(x):
self._update_grad()
self.x_prev = self.x
self.g_prev = self.g
self.x = np.atleast_1d(x).astype(float)
self.f_updated = False
self.g_updated = False
self.H_updated = False
self._update_hess()
else:
def update_x(x):
self.x = np.atleast_1d(x).astype(float)
self.f_updated = False
self.g_updated = False
self.H_updated = False
self._update_x_impl = update_x
def _update_fun(self):
if not self.f_updated:
self._update_fun_impl()
self.f_updated = True
def _update_grad(self):
if not self.g_updated:
self._update_grad_impl()
self.g_updated = True
def _update_hess(self):
if not self.H_updated:
self._update_hess_impl()
self.H_updated = True
def fun(self, x):
if not np.array_equal(x, self.x):
self._update_x_impl(x)
self._update_fun()
return self.f
def grad(self, x):
if not np.array_equal(x, self.x):
self._update_x_impl(x)
self._update_grad()
return self.g
def hess(self, x):
if not np.array_equal(x, self.x):
self._update_x_impl(x)
self._update_hess()
return self.H
def fun_and_grad(self, x):
if not np.array_equal(x, self.x):
self._update_x_impl(x)
self._update_fun()
self._update_grad()
return self.f, self.g
class VectorFunction(object):
"""Vector function and its derivatives.
This class defines a vector function F: R^n->R^m and methods for
computing or approximating its first and second derivatives.
Notes
-----
This class implements a memoization logic. There are methods `fun`,
`jac`, hess` and corresponding attributes `f`, `J` and `H`. The following
things should be considered:
1. Use only public methods `fun`, `jac` and `hess`.
2. After one of the methods is called, the corresponding attribute
will be set. However, a subsequent call with a different argument
of *any* of the methods may overwrite the attribute.
"""
def __init__(self, fun, x0, jac, hess,
finite_diff_rel_step, finite_diff_jac_sparsity,
finite_diff_bounds, sparse_jacobian):
if not callable(jac) and jac not in FD_METHODS:
raise ValueError("`jac` must be either callable or one of {}."
.format(FD_METHODS))
if not (callable(hess) or hess in FD_METHODS
or isinstance(hess, HessianUpdateStrategy)):
raise ValueError("`hess` must be either callable,"
"HessianUpdateStrategy or one of {}."
.format(FD_METHODS))
if jac in FD_METHODS and hess in FD_METHODS:
raise ValueError("Whenever the Jacobian is estimated via "
"finite-differences, we require the Hessian to "
"be estimated using one of the quasi-Newton "
"strategies.")
self.x = np.atleast_1d(x0).astype(float)
self.n = self.x.size
self.nfev = 0
self.njev = 0
self.nhev = 0
self.f_updated = False
self.J_updated = False
self.H_updated = False
finite_diff_options = {}
if jac in FD_METHODS:
finite_diff_options["method"] = jac
finite_diff_options["rel_step"] = finite_diff_rel_step
if finite_diff_jac_sparsity is not None:
sparsity_groups = group_columns(finite_diff_jac_sparsity)
finite_diff_options["sparsity"] = (finite_diff_jac_sparsity,
sparsity_groups)
finite_diff_options["bounds"] = finite_diff_bounds
self.x_diff = np.copy(self.x)
if hess in FD_METHODS:
finite_diff_options["method"] = hess
finite_diff_options["rel_step"] = finite_diff_rel_step
finite_diff_options["as_linear_operator"] = True
self.x_diff = np.copy(self.x)
if jac in FD_METHODS and hess in FD_METHODS:
raise ValueError("Whenever the Jacobian is estimated via "
"finite-differences, we require the Hessian to "
"be estimated using one of the quasi-Newton "
"strategies.")
# Function evaluation
def fun_wrapped(x):
self.nfev += 1
return np.atleast_1d(fun(x))
def update_fun():
self.f = fun_wrapped(self.x)
self._update_fun_impl = update_fun
update_fun()
self.v = np.zeros_like(self.f)
self.m = self.v.size
# Jacobian Evaluation
if callable(jac):
self.J = jac(self.x)
self.J_updated = True
self.njev += 1
if (sparse_jacobian or
sparse_jacobian is None and sps.issparse(self.J)):
def jac_wrapped(x):
self.njev += 1
return sps.csr_matrix(jac(x))
self.J = sps.csr_matrix(self.J)
self.sparse_jacobian = True
elif sps.issparse(self.J):
def jac_wrapped(x):
self.njev += 1
return jac(x).toarray()
self.J = self.J.toarray()
self.sparse_jacobian = False
else:
def jac_wrapped(x):
self.njev += 1
return np.atleast_2d(jac(x))
self.J = np.atleast_2d(self.J)
self.sparse_jacobian = False
def update_jac():
self.J = jac_wrapped(self.x)
elif jac in FD_METHODS:
self.J = approx_derivative(fun_wrapped, self.x, f0=self.f,
**finite_diff_options)
self.J_updated = True
if (sparse_jacobian or
sparse_jacobian is None and sps.issparse(self.J)):
def update_jac():
self._update_fun()
self.J = sps.csr_matrix(
approx_derivative(fun_wrapped, self.x, f0=self.f,
**finite_diff_options))
self.J = sps.csr_matrix(self.J)
self.sparse_jacobian = True
elif sps.issparse(self.J):
def update_jac():
self._update_fun()
self.J = approx_derivative(fun_wrapped, self.x, f0=self.f,
**finite_diff_options).toarray()
self.J = self.J.toarray()
self.sparse_jacobian = False
else:
def update_jac():
self._update_fun()
self.J = np.atleast_2d(
approx_derivative(fun_wrapped, self.x, f0=self.f,
**finite_diff_options))
self.J = np.atleast_2d(self.J)
self.sparse_jacobian = False
self._update_jac_impl = update_jac
# Define Hessian
if callable(hess):
self.H = hess(self.x, self.v)
self.H_updated = True
self.nhev += 1
if sps.issparse(self.H):
def hess_wrapped(x, v):
self.nhev += 1
return sps.csr_matrix(hess(x, v))
self.H = sps.csr_matrix(self.H)
elif isinstance(self.H, LinearOperator):
def hess_wrapped(x, v):
self.nhev += 1
return hess(x, v)
else:
def hess_wrapped(x, v):
self.nhev += 1
return np.atleast_2d(np.asarray(hess(x, v)))
self.H = np.atleast_2d(np.asarray(self.H))
def update_hess():
self.H = hess_wrapped(self.x, self.v)
elif hess in FD_METHODS:
def jac_dot_v(x, v):
return jac_wrapped(x).T.dot(v)
def update_hess():
self._update_jac()
self.H = approx_derivative(jac_dot_v, self.x,
f0=self.J.T.dot(self.v),
args=(self.v,),
**finite_diff_options)
update_hess()
self.H_updated = True
elif isinstance(hess, HessianUpdateStrategy):
self.H = hess
self.H.initialize(self.n, 'hess')
self.H_updated = True
self.x_prev = None
self.J_prev = None
def update_hess():
self._update_jac()
# When v is updated before x was updated, then x_prev and
# J_prev are None and we need this check.
if self.x_prev is not None and self.J_prev is not None:
delta_x = self.x - self.x_prev
delta_g = self.J.T.dot(self.v) - self.J_prev.T.dot(self.v)
self.H.update(delta_x, delta_g)
self._update_hess_impl = update_hess
if isinstance(hess, HessianUpdateStrategy):
def update_x(x):
self._update_jac()
self.x_prev = self.x
self.J_prev = self.J
self.x = np.atleast_1d(x).astype(float)
self.f_updated = False
self.J_updated = False
self.H_updated = False
self._update_hess()
else:
def update_x(x):
self.x = np.atleast_1d(x).astype(float)
self.f_updated = False
self.J_updated = False
self.H_updated = False
self._update_x_impl = update_x
def _update_v(self, v):
if not np.array_equal(v, self.v):
self.v = v
self.H_updated = False
def _update_x(self, x):
if not np.array_equal(x, self.x):
self._update_x_impl(x)
def _update_fun(self):
if not self.f_updated:
self._update_fun_impl()
self.f_updated = True
def _update_jac(self):
if not self.J_updated:
self._update_jac_impl()
self.J_updated = True
def _update_hess(self):
if not self.H_updated:
self._update_hess_impl()
self.H_updated = True
def fun(self, x):
self._update_x(x)
self._update_fun()
return self.f
def jac(self, x):
self._update_x(x)
self._update_jac()
return self.J
def hess(self, x, v):
# v should be updated before x.
self._update_v(v)
self._update_x(x)
self._update_hess()
return self.H
class LinearVectorFunction(object):
"""Linear vector function and its derivatives.
Defines a linear function F = A x, where x is n-dimensional vector and
A is m-by-n matrix. The Jacobian is constant and equals to A. The Hessian
is identically zero and it is returned as a csr matrix.
"""
def __init__(self, A, x0, sparse_jacobian):
if sparse_jacobian or sparse_jacobian is None and sps.issparse(A):
self.J = sps.csr_matrix(A)
self.sparse_jacobian = True
elif sps.issparse(A):
self.J = A.toarray()
self.sparse_jacobian = False
else:
self.J = np.atleast_2d(A)
self.sparse_jacobian = False
self.m, self.n = self.J.shape
self.x = np.atleast_1d(x0).astype(float)
self.f = self.J.dot(self.x)
self.f_updated = True
self.v = np.zeros(self.m, dtype=float)
self.H = sps.csr_matrix((self.n, self.n))
def _update_x(self, x):
if not np.array_equal(x, self.x):
self.x = np.atleast_1d(x).astype(float)
self.f_updated = False
def fun(self, x):
self._update_x(x)
if not self.f_updated:
self.f = self.J.dot(x)
self.f_updated = True
return self.f
def jac(self, x):
self._update_x(x)
return self.J
def hess(self, x, v):
self._update_x(x)
self.v = v
return self.H
class IdentityVectorFunction(LinearVectorFunction):
"""Identity vector function and its derivatives.
The Jacobian is the identity matrix, returned as a dense array when
`sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is
identically zero and it is returned as a csr matrix.
"""
def __init__(self, x0, sparse_jacobian):
n = len(x0)
if sparse_jacobian or sparse_jacobian is None:
A = sps.eye(n, format='csr')
sparse_jacobian = True
else:
A = np.eye(n)
sparse_jacobian = False
super(IdentityVectorFunction, self).__init__(A, x0, sparse_jacobian)
|
jor-/scipy
|
scipy/optimize/_differentiable_functions.py
|
Python
|
bsd-3-clause
| 18,349
|
import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from datetime import datetime, timedelta
from Lclassifier import testClassifier
import pickle
import os
classifier_path = 'langSer/manager/classifier.pickle'
@csrf_exempt
def detect (request):
if request.method == "POST":
return detectLang (request)
else:
return HttpResponse (
json.dumps({'success': False}),
content_type="application/json",
)
def detectLang (request):
text = request.POST.get ('text', '')
lang = classifyLang ([text])
first_val = lang[0]
return HttpResponse (json.dumps (first_val), content_type="application/json")
def setUp():
f = open(classifier_path)
classifier = pickle.load(f)
f.close()
return classifier
global classifier
classifier = setUp()
def classifyLang (text):
return testClassifier (classifier[0], text, classifier[1], classifier[2])
|
rahulgoel/Language-Detection
|
langSer/langSer/manager/LanguageManager.py
|
Python
|
gpl-2.0
| 980
|
"""
Describe the purpose of the test class here.
"""
from __future__ import print_function
import lldb
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
class RenameThisSampleTestTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
# If your test case doesn't stress debug info, the
# set this to true. That way it won't be run once for
# each debug info format.
NO_DEBUG_INFO_TESTCASE = True
def test_sample_rename_this(self):
"""There can be many tests in a test case - describe this test here."""
self.build()
self.main_source_file = lldb.SBFileSpec("main.c")
self.sample_test()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def sample_test(self):
"""You might use the test implementation in several ways, say so here."""
# This function starts a process, "a.out" by default, sets a source
# breakpoint, runs to it, and returns the thread, process & target.
# It optionally takes an SBLaunchOption argument if you want to pass
# arguments or environment variables.
(target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
"Set a breakpoint here", self.main_source_file)
frame = thread.GetFrameAtIndex(0)
test_var = frame.FindVariable("test_var")
self.assertTrue(test_var.GetError().Success(), "Failed to fetch test_var")
test_value = test_var.GetValueAsUnsigned()
self.assertEqual(test_value, 10, "Got the right value for test_var")
|
apple/swift-lldb
|
packages/Python/lldbsuite/test/sample_test/TestSampleTest.py
|
Python
|
apache-2.0
| 1,625
|
#!/usr/bin/env python
import TransferErrors as TE
import cPickle as pickle
with open('stuck.pkl','rb') as pklfile:
stuck = pickle.load(pklfile)
TE.makeBasicTable(stuck,TE.workdir+'html/table.html',TE.webdir+'table.html')
TE.makeCSV(stuck,TE.webdir+'data.csv')
for basis in [-6,-5,-4,-3,-1,1,2]:
TE.makeJson(stuck,TE.webdir+('stuck_%i'%basis).replace('-','m')+'.json',basis)
|
sidnarayanan/TransferErrors
|
bin/write.py
|
Python
|
mit
| 382
|
# coding: utf-8
#
# Copyright © 2017 weirdgiraffe <giraffe@cyberzoo.xyz>
#
# Distributed under terms of the MIT license.
#
import sys
try: # real kodi
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
except ImportError: # mocked kodi
from mock_kodi import xbmc
from mock_kodi import xbmcaddon
from mock_kodi import xbmcgui
from mock_kodi import xbmcplugin
try: # python2
from urllib import urlencode
from urlparse import urlparse, parse_qs
except ImportError: # python3
from urllib.parse import urlparse, parse_qs, urlencode
class logger:
@staticmethod
def debug(s):
xbmc.log(s, xbmc.LOGDEBUG)
@staticmethod
def info(s):
xbmc.log(s, xbmc.LOGNOTICE)
@staticmethod
def error(s):
s += '\n\taddon arguments:\n\t{0}'.format('\n\t'.join(sys.argv[1:]))
xbmc.log(s, xbmc.LOGERROR)
def list_item(name, thumb):
li = xbmcgui.ListItem(name)
if thumb is not None:
li.setArt(thumb)
# it is sayed that both of these methods are deprecated
# see: http://kodi.wiki/view/Jarvis_API_changes
# but only these methods actually works with Jarvis
li.setIconImage(thumb)
li.setThumbnailImage(thumb)
return li
class Plugin:
def __init__(self, *args):
self._addon = xbmcaddon.Addon()
self._url = args[0]
self._handler = int(args[1], base=10)
# addon url has format:
# plugin://plugin.hello.blah?arg1=xxx&arg2=xxx
# where args are urlencoded
o = urlparse(args[2])
self._args = dict()
for k, v in parse_qs(o.query).items():
if len(v) == 1:
self._args[k] = v[0]
else:
self._args[k] = v
@property
def icon(self):
return self._addon.getAddonInfo('icon')
@property
def args(self):
return self._args
def read_input(self, header):
keyboard = xbmc.Keyboard('', 'Что искать?', False)
keyboard.doModal()
if keyboard.isConfirmed():
return keyboard.getText()
def play(self, url):
li = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(self._handler, True, li)
def add_screen_item(self, name, url, **kwargs):
thumb = kwargs.get('thumb')
li = list_item(name, thumb)
li.setProperty('IsPlayable', 'true')
ret = xbmcplugin.addDirectoryItem(self._handler, url, li, False)
if not ret:
logger.error('failed to add {0} playable item'.format(name))
def add_screen_directory(self, name, url, **kwargs):
thumb = kwargs.get('thumb')
li = list_item(name, thumb)
args = [self._handler, url, li, True]
items_count = kwargs.get('items_count')
if items_count:
args += [items_count]
ret = xbmcplugin.addDirectoryItem(*args)
if not ret:
logger.error('failed to add {0} directory item'.format(name))
def publish_screen(self, ok, refresh=False):
xbmcplugin.endOfDirectory(self._handler, ok, refresh)
def make_url(self, argv):
return '{0}?{1}'.format(self._url, urlencode(argv))
def settings_value(self, setting_id):
return self._addon.getSetting(setting_id)
def show_notification(self, title, message):
timeout = len(message) / 10 * 2000
title = title.replace('"', '\\"')
message = message.replace('"', '\\"')
xbmc.executebuiltin('Notification("{0}","{1}","{2}","{3}")'.format(
title.encode('ascii', 'ignore'),
message.encode('ascii', 'ignore'),
timeout,
self.icon))
|
weirdgiraffe/plugin.video.giraffe.seasonvar
|
resources/site-packages/kodi/__init__.py
|
Python
|
mit
| 3,703
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Various utilities used by XenServer plugins."""
import cPickle as pickle
import errno
import logging
import os
import shutil
import signal
import subprocess
import tempfile
import XenAPIPlugin
LOG = logging.getLogger(__name__)
CHUNK_SIZE = 8192
class CommandNotFound(Exception):
pass
def delete_if_exists(path):
try:
os.unlink(path)
except OSError, e: # noqa
if e.errno == errno.ENOENT:
LOG.warning("'%s' was already deleted, skipping delete" % path)
else:
raise
def _link(src, dst):
LOG.info("Hard-linking file '%s' -> '%s'" % (src, dst))
os.link(src, dst)
def _rename(src, dst):
LOG.info("Renaming file '%s' -> '%s'" % (src, dst))
try:
os.rename(src, dst)
except OSError, e: # noqa
if e.errno == errno.EXDEV:
LOG.error("Invalid cross-device link. Perhaps %s and %s should "
"be symlinked on the same filesystem?" % (src, dst))
raise
def make_subprocess(cmdline, stdout=False, stderr=False, stdin=False,
universal_newlines=False, close_fds=True, env=None):
"""Make a subprocess according to the given command-line string
"""
LOG.info("Running cmd '%s'" % " ".join(cmdline))
kwargs = {}
kwargs['stdout'] = stdout and subprocess.PIPE or None
kwargs['stderr'] = stderr and subprocess.PIPE or None
kwargs['stdin'] = stdin and subprocess.PIPE or None
kwargs['universal_newlines'] = universal_newlines
kwargs['close_fds'] = close_fds
kwargs['env'] = env
try:
proc = subprocess.Popen(cmdline, **kwargs)
except OSError, e: # noqa
if e.errno == errno.ENOENT:
raise CommandNotFound
else:
raise
return proc
class SubprocessException(Exception):
def __init__(self, cmdline, ret, out, err):
Exception.__init__(self, "'%s' returned non-zero exit code: "
"retcode=%i, out='%s', stderr='%s'"
% (cmdline, ret, out, err))
self.cmdline = cmdline
self.ret = ret
self.out = out
self.err = err
def finish_subprocess(proc, cmdline, cmd_input=None, ok_exit_codes=None):
"""Ensure that the process returned a zero exit code indicating success
"""
if ok_exit_codes is None:
ok_exit_codes = [0]
out, err = proc.communicate(cmd_input)
ret = proc.returncode
if ret not in ok_exit_codes:
raise SubprocessException(' '.join(cmdline), ret, out, err)
return out
def run_command(cmd, cmd_input=None, ok_exit_codes=None):
"""Abstracts out the basics of issuing system commands. If the command
returns anything in stderr, an exception is raised with that information.
Otherwise, the output from stdout is returned.
cmd_input is passed to the process on standard input.
"""
proc = make_subprocess(cmd, stdout=True, stderr=True, stdin=True,
close_fds=True)
return finish_subprocess(proc, cmd, cmd_input=cmd_input,
ok_exit_codes=ok_exit_codes)
def try_kill_process(proc):
"""Sends the given process the SIGKILL signal."""
pid = proc.pid
LOG.info("Killing process %s" % pid)
try:
os.kill(pid, signal.SIGKILL)
except Exception:
LOG.exception("Failed to kill %s" % pid)
def make_staging_area(sr_path):
"""The staging area is a place where we can temporarily store and
manipulate VHDs. The use of the staging area is different for upload and
download:
Download
========
When we download the tarball, the VHDs contained within will have names
like "snap.vhd" and "image.vhd". We need to assign UUIDs to them before
moving them into the SR. However, since 'image.vhd' may be a base_copy, we
need to link it to 'snap.vhd' (using vhd-util modify) before moving both
into the SR (otherwise the SR.scan will cause 'image.vhd' to be deleted).
The staging area gives us a place to perform these operations before they
are moved to the SR, scanned, and then registered with XenServer.
Upload
======
On upload, we want to rename the VHDs to reflect what they are, 'snap.vhd'
in the case of the snapshot VHD, and 'image.vhd' in the case of the
base_copy. The staging area provides a directory in which we can create
hard-links to rename the VHDs without affecting what's in the SR.
NOTE
====
The staging area is created as a subdirectory within the SR in order to
guarantee that it resides within the same filesystem and therefore permit
hard-linking and cheap file moves.
"""
staging_path = tempfile.mkdtemp(dir=sr_path)
return staging_path
def cleanup_staging_area(staging_path):
"""Remove staging area directory
On upload, the staging area contains hard-links to the VHDs in the SR;
it's safe to remove the staging-area because the SR will keep the link
count > 0 (so the VHDs in the SR will not be deleted).
"""
if os.path.exists(staging_path):
shutil.rmtree(staging_path)
def _handle_old_style_images(staging_path):
"""Rename files to conform to new image format, if needed.
Old-Style:
snap.vhd -> image.vhd -> base.vhd
New-Style:
0.vhd -> 1.vhd -> ... (n-1).vhd
The New-Style format has the benefit of being able to support a VDI chain
of arbitrary length.
"""
file_num = 0
for filename in ('snap.vhd', 'image.vhd', 'base.vhd'):
path = os.path.join(staging_path, filename)
if os.path.exists(path):
_rename(path, os.path.join(staging_path, "%d.vhd" % file_num))
file_num += 1
def _assert_vhd_not_hidden(path):
"""Sanity check to ensure that only appropriate VHDs are marked as hidden.
If this flag is incorrectly set, then when we move the VHD into the SR, it
will be deleted out from under us.
"""
query_cmd = ["vhd-util", "query", "-n", path, "-f"]
out = run_command(query_cmd)
for line in out.splitlines():
if line.lower().startswith('hidden'):
value = line.split(':')[1].strip()
if value == "1":
raise Exception(
"VHD %s is marked as hidden without child" % path)
def _validate_vhd(vdi_path):
"""This checks for several errors in the VHD structure.
Most notably, it checks that the timestamp in the footer is correct, but
may pick up other errors also.
This check ensures that the timestamps listed in the VHD footer aren't in
the future. This can occur during a migration if the clocks on the the two
Dom0's are out-of-sync. This would corrupt the SR if it were imported, so
generate an exception to bail.
"""
check_cmd = ["vhd-util", "check", "-n", vdi_path, "-p"]
out = run_command(check_cmd, ok_exit_codes=[0, 22])
first_line = out.splitlines()[0].strip()
if 'invalid' in first_line:
if 'footer' in first_line:
part = 'footer'
elif 'header' in first_line:
part = 'header'
else:
part = 'setting'
details = first_line.split(':', 1)
if len(details) == 2:
details = details[1]
else:
details = first_line
extra = ''
if 'timestamp' in first_line:
extra = (" ensure source and destination host machines have "
"time set correctly")
LOG.info("VDI Error details: %s" % out)
raise Exception(
"VDI '%(vdi_path)s' has an invalid %(part)s: '%(details)s'"
"%(extra)s" % {'vdi_path': vdi_path, 'part': part,
'details': details, 'extra': extra})
def _validate_vdi_chain(vdi_path):
"""This check ensures that the parent pointers on the VHDs are valid
before we move the VDI chain to the SR. This is *very* important
because a bad parent pointer will corrupt the SR causing a cascade of
failures.
"""
def get_parent_path(path):
query_cmd = ["vhd-util", "query", "-n", path, "-p"]
out = run_command(query_cmd, ok_exit_codes=[0, 22])
first_line = out.splitlines()[0].strip()
if first_line.endswith(".vhd"):
return first_line
elif 'has no parent' in first_line:
return None
elif 'query failed' in first_line:
raise Exception("VDI '%s' not present which breaks"
" the VDI chain, bailing out" % path)
else:
raise Exception("Unexpected output '%s' from vhd-util" % out)
cur_path = vdi_path
while cur_path:
_validate_vhd(cur_path)
cur_path = get_parent_path(cur_path)
def _validate_sequenced_vhds(staging_path):
"""This check ensures that the VHDs in the staging area are sequenced
properly from 0 to n-1 with no gaps.
"""
seq_num = 0
filenames = os.listdir(staging_path)
for filename in filenames:
if not filename.endswith('.vhd'):
continue
# Ignore legacy swap embedded in the image, generated on-the-fly now
if filename == "swap.vhd":
continue
vhd_path = os.path.join(staging_path, "%d.vhd" % seq_num)
if not os.path.exists(vhd_path):
raise Exception("Corrupt image. Expected seq number: %d. Files: %s"
% (seq_num, filenames))
seq_num += 1
def import_vhds(sr_path, staging_path, uuid_stack):
"""Move VHDs from staging area into the SR.
The staging area is necessary because we need to perform some fixups
(assigning UUIDs, relinking the VHD chain) before moving into the SR,
otherwise the SR manager process could potentially delete the VHDs out from
under us.
Returns: A dict of imported VHDs:
{'root': {'uuid': 'ffff-aaaa'}}
"""
_handle_old_style_images(staging_path)
_validate_sequenced_vhds(staging_path)
files_to_move = []
# Collect sequenced VHDs and assign UUIDs to them
seq_num = 0
while True:
orig_vhd_path = os.path.join(staging_path, "%d.vhd" % seq_num)
if not os.path.exists(orig_vhd_path):
break
# Rename (0, 1 .. N).vhd -> aaaa-bbbb-cccc-dddd.vhd
vhd_uuid = uuid_stack.pop()
vhd_path = os.path.join(staging_path, "%s.vhd" % vhd_uuid)
_rename(orig_vhd_path, vhd_path)
if seq_num == 0:
leaf_vhd_path = vhd_path
leaf_vhd_uuid = vhd_uuid
files_to_move.append(vhd_path)
seq_num += 1
# Re-link VHDs, in reverse order, from base-copy -> leaf
parent_path = None
for vhd_path in reversed(files_to_move):
if parent_path:
# Link to parent
modify_cmd = ["vhd-util", "modify", "-n", vhd_path,
"-p", parent_path]
run_command(modify_cmd)
parent_path = vhd_path
# Sanity check the leaf VHD
_assert_vhd_not_hidden(leaf_vhd_path)
_validate_vdi_chain(leaf_vhd_path)
# Move files into SR
for orig_path in files_to_move:
new_path = os.path.join(sr_path, os.path.basename(orig_path))
_rename(orig_path, new_path)
imported_vhds = dict(root=dict(uuid=leaf_vhd_uuid))
return imported_vhds
def prepare_staging_area(sr_path, staging_path, vdi_uuids, seq_num=0):
"""Hard-link VHDs into staging area."""
for vdi_uuid in vdi_uuids:
source = os.path.join(sr_path, "%s.vhd" % vdi_uuid)
link_name = os.path.join(staging_path, "%d.vhd" % seq_num)
_link(source, link_name)
seq_num += 1
def create_tarball(fileobj, path, callback=None, compression_level=None):
"""Create a tarball from a given path.
:param fileobj: a file-like object holding the tarball byte-stream.
If None, then only the callback will be used.
:param path: path to create tarball from
:param callback: optional callback to call on each chunk written
:param compression_level: compression level, e.g., 9 for gzip -9.
"""
tar_cmd = ["tar", "-zc", "--directory=%s" % path, "."]
env = os.environ.copy()
if compression_level and 1 <= compression_level <= 9:
env["GZIP"] = "-%d" % compression_level
tar_proc = make_subprocess(tar_cmd, stdout=True, stderr=True, env=env)
try:
while True:
chunk = tar_proc.stdout.read(CHUNK_SIZE)
if chunk == '':
break
if callback:
callback(chunk)
if fileobj:
fileobj.write(chunk)
except Exception:
try_kill_process(tar_proc)
raise
finish_subprocess(tar_proc, tar_cmd)
def extract_tarball(fileobj, path, callback=None):
"""Extract a tarball to a given path.
:param fileobj: a file-like object holding the tarball byte-stream
:param path: path to extract tarball into
:param callback: optional callback to call on each chunk read
"""
tar_cmd = ["tar", "-zx", "--directory=%s" % path]
tar_proc = make_subprocess(tar_cmd, stderr=True, stdin=True)
try:
while True:
chunk = fileobj.read(CHUNK_SIZE)
if chunk == '':
break
if callback:
callback(chunk)
tar_proc.stdin.write(chunk)
except Exception:
try_kill_process(tar_proc)
raise
finish_subprocess(tar_proc, tar_cmd)
def _handle_serialization(func):
def wrapped(session, params):
params = pickle.loads(params['params'])
rv = func(session, *params['args'], **params['kwargs'])
return pickle.dumps(rv)
return wrapped
def register_plugin_calls(*funcs):
"""Wrapper around XenAPIPlugin.dispatch which handles pickle
serialization.
"""
wrapped_dict = {}
for func in funcs:
wrapped_dict[func.__name__] = _handle_serialization(func)
XenAPIPlugin.dispatch(wrapped_dict)
|
eharney/nova
|
plugins/xenserver/xenapi/etc/xapi.d/plugins/utils.py
|
Python
|
apache-2.0
| 14,618
|
"""
Tinderbox target
"""
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation.
import os
from catalyst.support import cmd, list_bashify, CatalystError
from catalyst.base.stagebase import StageBase
class tinderbox(StageBase):
"""
Builder class for the tinderbox target
"""
def __init__(self,spec,addlargs):
self.required_values=["tinderbox/packages"]
self.valid_values=self.required_values[:]
self.valid_values.extend(["tinderbox/use"])
StageBase.__init__(self,spec,addlargs)
def run_local(self):
# tinderbox
# example call: "grp.sh run xmms vim sys-apps/gleep"
try:
if os.path.exists(self.settings["controller_file"]):
cmd(self.settings["controller_file"]+" run "+\
list_bashify(self.settings["tinderbox/packages"]),"run script failed.",env=self.env)
except CatalystError:
self.unbind()
raise CatalystError("Tinderbox aborting due to error.",
print_traceback=True)
def set_cleanables(self):
self.settings['cleanables'] = [
'/etc/resolv.conf',
'/var/tmp/*',
'/root/*',
self.settings['portdir'],
]
def set_action_sequence(self):
#Default action sequence for run method
self.settings["action_sequence"]=["unpack","unpack_snapshot",\
"config_profile_link","setup_confdir","bind","chroot_setup",\
"setup_environment","run_local","preclean","unbind","clean",\
"clear_autoresume"]
|
elitak/catalyst
|
catalyst/targets/tinderbox.py
|
Python
|
gpl-2.0
| 1,379
|
from django.db import models
from django.urls import reverse
from djrichtextfield.models import RichTextField
class Post(models.Model):
title = models.CharField(max_length=50)
lead = RichTextField(field_settings='mini')
content = RichTextField()
def get_absolute_url(self):
return reverse('post_detail', kwargs={'pk': self.pk})
def get_add_comment_url(self):
return reverse('post_add_comment', kwargs={'pk': self.pk})
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
content = models.TextField()
def get_absolute_url(self):
return '{}#c{}'.format(reverse('post_detail', kwargs={'pk': self.post.pk}), self.pk)
def __str__(self):
return 'Comment on "%s"' % self.post.title
|
jaap3/django-richtextfield
|
testproject/testapp/models.py
|
Python
|
mit
| 835
|
## roster.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: roster.py,v 1.20 2005/07/13 13:22:52 snakeru Exp $
"""
Simple roster implementation. Can be used though for different tasks like
mass-renaming of contacts.
"""
from protocol import *
from client import PlugIn
class Roster(PlugIn):
""" Defines a plenty of methods that will allow you to manage roster.
Also automatically track presences from remote JIDs taking into
account that every JID can have multiple resources connected. Does not
currently support 'error' presences.
You can also use mapping interface for access to the internal representation of
contacts in roster.
"""
def __init__(self):
""" Init internal variables. """
PlugIn.__init__(self)
self.DBG_LINE='roster'
self._data = {}
self.set=None
self._exported_methods=[self.getRoster]
def plugin(self,owner,request=1):
""" Register presence and subscription trackers in the owner's dispatcher.
Also request roster from server if the 'request' argument is set.
Used internally."""
self._owner.RegisterHandler('iq',self.RosterIqHandler,'result',NS_ROSTER)
self._owner.RegisterHandler('iq',self.RosterIqHandler,'set',NS_ROSTER)
self._owner.RegisterHandler('presence',self.PresenceHandler)
if request: self.Request()
def Request(self,force=0):
""" Request roster from server if it were not yet requested
(or if the 'force' argument is set). """
if self.set is None: self.set=0
elif not force: return
self._owner.send(Iq('get',NS_ROSTER))
self.DEBUG('Roster requested from server','start')
def getRoster(self):
""" Requests roster from server if neccessary and returns self."""
if not self.set: self.Request()
while not self.set: self._owner.Process(10)
return self
def RosterIqHandler(self,dis,stanza):
""" Subscription tracker. Used internally for setting items state in
internal roster representation. """
for item in stanza.getTag('query').getTags('item'):
jid=item.getAttr('jid')
if item.getAttr('subscription')=='remove':
if self._data.has_key(jid): del self._data[jid]
raise NodeProcessed # a MUST
self.DEBUG('Setting roster item %s...'%jid,'ok')
if not self._data.has_key(jid): self._data[jid]={}
self._data[jid]['name']=item.getAttr('name')
self._data[jid]['ask']=item.getAttr('ask')
self._data[jid]['subscription']=item.getAttr('subscription')
if item.getAttr('subscription') == 'both':
self._data[jid]['subs_from_status'] = 'subscribed'
self._data[jid]['subs_to_status'] = 'subscribed'
elif item.getAttr('subscription') == 'from':
self._data[jid]['subs_from_status'] = 'subscribed'
if item.getAttr('subscription') == 'to':
self._data[jid]['subs_to_status'] = 'subscribed'
self._data[jid]['groups']=[]
if not self._data[jid].has_key('resources'): self._data[jid]['resources']={}
for group in item.getTags('group'): self._data[jid]['groups'].append(group.getData())
self._data[self._owner.User+'@'+self._owner.Server]={'resources':{},'name':None,'ask':None,'subscription':None,'subs_from_status':None,'subs_to_status':None,'groups':None,}
self.set=1
raise NodeProcessed # a MUST. Otherwise you'll get back an <iq type='error'/>
def PresenceHandler(self,dis,pres):
""" Presence tracker. Used internally for setting items' resources state in
internal roster representation. """
jid=JID(pres.getFrom())
if not self._data.has_key(jid.getStripped()): self._data[jid.getStripped()]={'name':None,'ask':None,'subscription':'none','subs_from_status':None,'subs_to_status':None,'groups':['Not in roster'],'resources':{}}
item=self._data[jid.getStripped()]
typ=pres.getType()
if not typ:
self.DEBUG('Setting roster item %s for resource %s...'%(jid.getStripped(),jid.getResource()),'ok')
item['resources'][jid.getResource()]=res={'show':None,'status':None,'priority':'0','timestamp':None}
res['show'] = pres.getShow() if pres.getTag('show') else 'available'
if pres.getTag('status'): res['status']=pres.getStatus()
if pres.getTag('priority'): res['priority']=pres.getPriority()
if not pres.getTimestamp(): pres.setTimestamp()
res['timestamp']=pres.getTimestamp()
item['subs_to_status'] = 'subscribed'
elif typ=='unavailable' and item['resources'].has_key(jid.getResource()): del item['resources'][jid.getResource()]
elif typ=='subscribe': item['subs_from_status'] = 'pending'
elif typ=='error':
# Need to handle type='error' better
item['resources'][jid.getResource()]=res={'show':None,'status':None,'priority':'0','timestamp':None}
if not pres.getTimestamp(): pres.setTimestamp()
res['timestamp']=pres.getTimestamp()
res['show'] = 'unavailable'
res['status'] = 'error'
def _getItemData(self,jid,dataname):
""" Return specific jid's representation in internal format. Used internally. """
jid=jid[:(jid+'/').find('/')]
return self._data[jid][dataname]
def _getResourceData(self,jid,dataname):
""" Return specific jid's resource representation in internal format. Used internally. """
if jid.find('/')+1:
jid,resource=jid.split('/',1)
if self._data[jid]['resources'].has_key(resource): return self._data[jid]['resources'][resource][dataname]
elif self._data[jid]['resources'].keys():
lastpri=-129
for r in self._data[jid]['resources'].keys():
if int(self._data[jid]['resources'][r]['priority'])>lastpri: resource,lastpri=r,int(self._data[jid]['resources'][r]['priority'])
return self._data[jid]['resources'][resource][dataname]
def delItem(self,jid):
""" Delete contact 'jid' from roster."""
self._owner.send(Iq('set',NS_ROSTER,payload=[Node('item',{'jid':jid,'subscription':'remove'})]))
def getAsk(self,jid):
""" Returns 'ask' value of contact 'jid'."""
return self._getItemData(jid,'ask')
def getGroups(self,jid):
""" Returns groups list that contact 'jid' belongs to."""
return self._getItemData(jid,'groups')
def getName(self,jid):
""" Returns name of contact 'jid'."""
return self._getItemData(jid,'name')
def getPriority(self,jid):
""" Returns priority of contact 'jid'. 'jid' should be a full (not bare) JID."""
return self._getResourceData(jid,'priority')
def getRawRoster(self):
""" Returns roster representation in internal format. """
return self._data
def getRawItem(self,jid):
""" Returns roster item 'jid' representation in internal format. """
return self._data[jid[:(jid+'/').find('/')]]
def getSubscriptionFromStatus(self,jid):
""" Returns the status of the subscription request of contact 'jid' to us. Can be None, 'pending' or 'subscribed'"""
return self._getItemData(jid,'subs_from_status')
def getSubscriptionToStatus(self,jid):
""" Returns the status of the subscription request to contact 'jid'. Can be None, 'pending' or 'subscribed'"""
return self._getItemData(jid,'subs_to_status')
def getShow(self, jid):
""" Returns 'show' value of contact 'jid'. 'jid' should be a full (not bare) JID."""
return self._getResourceData(jid,'show')
def getStatus(self, jid):
""" Returns 'status' value of contact 'jid'. 'jid' should be a full (not bare) JID."""
return self._getResourceData(jid,'status')
def getSubscription(self,jid):
""" Returns 'subscription' value of contact 'jid'."""
return self._getItemData(jid,'subscription')
def getResources(self,jid):
""" Returns list of connected resources of contact 'jid'."""
return self._data[jid[:(jid+'/').find('/')]]['resources'].keys()
def setItem(self,jid,name=None,groups=[]):
""" Creates/renames contact 'jid' and sets the groups list that it now belongs to."""
iq=Iq('set',NS_ROSTER)
query=iq.getTag('query')
attrs={'jid':jid}
if name: attrs['name']=name
item=query.setTag('item',attrs)
for group in groups: item.addChild(node=Node('group',payload=[group]))
self._owner.send(iq)
def getItems(self):
""" Return list of all [bare] JIDs that the roster is currently tracks."""
return self._data.keys()
def keys(self):
""" Same as getItems. Provided for the sake of dictionary interface."""
return self._data.keys()
def __getitem__(self,item):
""" Get the contact in the internal format. Raises KeyError if JID 'item' is not in roster."""
return self._data[item]
def getItem(self,item):
""" Get the contact in the internal format (or None if JID 'item' is not in roster)."""
if self._data.has_key(item): return self._data[item]
def Subscribe(self,jid):
""" Send subscription request to JID 'jid'."""
self._data[jid]['subs_to_status'] = 'pending'
self._owner.send(Presence(jid,'subscribe'))
def Unsubscribe(self,jid):
""" Ask for removing our subscription for JID 'jid'."""
self._data[jid]['subs_to_status'] = None
self._owner.send(Presence(jid,'unsubscribe'))
def Authorize(self,jid):
""" Authorise JID 'jid'. Works only if these JID requested auth previously. """
self._data[jid]['subs_from_status'] = 'subscribed'
self._owner.send(Presence(jid,'subscribed'))
def Unauthorize(self,jid):
""" Unauthorise JID 'jid'. Use for declining authorisation request
or for removing existing authorization. """
self._data[jid]['subs_from_status'] = None
self._owner.send(Presence(jid,'unsubscribed'))
|
jjculber/xmpp-server-scanner
|
pybot/include/xmpp/roster.py
|
Python
|
gpl-2.0
| 10,808
|
'''
AnimeBot.py
Acts as the "main" file and ties all the other functionality together.
'''
import asyncio
import re
import traceback
import discord
import roboragi_old.CommentBuilder as CommentBuilder
import roboragi_old.Config as Config
import roboragi_old.DatabaseHandler as DatabaseHandler
import roboragi_old.Discord as Discord
import roboragi_old.DiscordoragiSearch as DiscordoragiSearch
# the servers where expanded requests are disabled
disableexpanded = ['']
async_queue = asyncio.Queue(maxsize=32)
ownerID = '164546159140929538'
@Discord.client.event
async def on_ready():
print('Logged in as')
print(Discord.client.user.name)
print(Discord.client.user.id)
print('------')
@Discord.client.event
async def on_server_join(server):
DatabaseHandler.addServerToDatabase(server.id)
print("Added server {} to database".format(server.id))
async def process_message(message, is_edit=False):
# Anime/Manga requests that are found go into separate arrays
animeArray = []
mangaArray = []
lnArray = []
# Checks if bot has permissions to embed
if message.channel.type != discord.ChannelType.private:
canEmbed = message.channel.server.default_role.permissions.embed_links
else:
canEmbed = True
if not canEmbed:
botMember = Discord.getMemberFromID(Config.clientid, message.server)
defaultroleperm = botMember.top_role.permissions
canEmbed = defaultroleperm.embed_links
isAdmin = message.author.top_role.permissions.administrator
isServerMod = message.author.top_role.permissions.manage_server
isOwner = message.author.id == ownerID
if message.author.bot:
return
# ignores all "code" markup (i.e. anything between backticks)
preCleanMessage = re.sub(r"\`(.*?)\`", "", message.clean_content)
cleanMessage = re.sub(r'<:.+?:([0-9]{15,21})>', "", preCleanMessage)
messageReply = ''
if re.search('({!help.*?}|{{!help.*?}}|<!help.*?>|<<!help.*?>>)',
cleanMessage, re.S) is not None:
try:
localEm = CommentBuilder.buildHelpEmbed()
await Discord.client.send_message(message.channel, embed=localEm)
return
except:
return
if re.search(
'({!command.*?}|{{!command.*?}}|<!command.*?>|<<!command.*?>>)',
cleanMessage, re.S) is not None:
if 'toggleexpanded' in cleanMessage.lower() and (
isAdmin or isServerMod):
try:
allowedStatus = DatabaseHandler.toggleAllowExpanded(
message.server.id)
print("Toggled allowExpanded for server {}".format(
message.server.id))
if allowedStatus.lower() == 'true':
await Discord.client.send_message(message.channel,
"Expanded requests are now allowed.")
else:
await Discord.client.send_message(message.channel,
"Expanded requests are now disallowed.")
return
except Exception as e:
print(e)
return
if 'addserver' in cleanMessage.lower() and (isOwner == True):
try:
DatabaseHandler.addServerToDatabase(message.server.id)
await Discord.client.send_message(message.channel,
"Server has been added.")
return
except Exception as e:
print(e)
return
else:
print("command failed, user probably has insufficient rights")
return
sender = re.search('[@]([A-Za-z0-9 _-]+?)(>|}|$)', cleanMessage, re.S)
mentionArray = message.raw_mentions
if re.search('({!stats.*?}|{{!stats.*?}}|<!stats.*?>|<<!stats.*?>>)',
cleanMessage, re.S) is not None and sender is not None:
for mention in mentionArray:
if not canEmbed:
messageReply = CommentBuilder.buildStatsComment(
server=message.server, username=mention)
else:
localEm = CommentBuilder.buildStatsEmbed(server=message.server,
username=mention)
await Discord.client.send_message(message.channel,
embed=localEm)
return None
if re.search('({!sstats}|{{!sstats}}|<!sstats>|<<!sstats>>)', cleanMessage,
re.S) is not None:
if not canEmbed:
messageReply = CommentBuilder.buildStatsComment(
server=message.server)
else:
localEm = CommentBuilder.buildStatsEmbed(server=message.server)
await Discord.client.send_message(message.channel, embed=localEm)
return None
elif re.search('({!stats.*?}|{{!stats.*?}}|<!stats.*?>|<<!stats.*?>>)',
cleanMessage, re.S) is not None:
if not canEmbed:
messageReply = CommentBuilder.buildStatsComment()
else:
localEm = CommentBuilder.buildStatsEmbed()
await Discord.client.send_message(message.channel, embed=localEm)
return None
else:
# The basic algorithm here is:
# If it's an expanded request, build a reply using the data in the braces, clear the arrays, add the reply to the relevant array and ignore everything else.
# If it's a normal request, build a reply using the data in the braces, add the reply to the relevant array.
# Counts the number of expanded results vs total results. If it's not just a single expanded result, they all get turned into normal requests.
numOfRequest = 0
numOfExpandedRequest = 0
forceNormal = False
expandedAllowed = DatabaseHandler.checkServerConfig('allowexpanded',
message.server.id)
if expandedAllowed == False:
forceNormal = True
for match in re.finditer("\{{2}([^}]*)\}{2}|\<{2}([^>]*)\>{2}",
cleanMessage, re.S):
numOfRequest += 1
numOfExpandedRequest += 1
print("Request found: {}".format(match.group(0)))
for match in re.finditer(
"(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))|(?<=(?<!\<)\<)([^\<\>]*)(?=\>(?!\>))",
cleanMessage, re.S):
numOfRequest += 1
print("Request found: {}".format(match.group(0)))
if (numOfExpandedRequest >= 1) and (numOfRequest > 1):
forceNormal = True
# if numOfRequest != 0:
# await Discord.client.send_typing(message.channel)
# Expanded Anime
for match in re.finditer("\{{2}([^}]*)\}{2}", cleanMessage, re.S):
reply = ''
if match.group(1) != '':
if (forceNormal) or (
str(message.channel).lower() in disableexpanded):
reply = await DiscordoragiSearch.buildAnimeReply(
match.group(1), message, False, canEmbed)
else:
reply = await DiscordoragiSearch.buildAnimeReply(
match.group(1), message, True, canEmbed)
if (reply is not None):
animeArray.append(reply)
else:
print("Empty request, ignoring")
# Normal Anime
for match in re.finditer("(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))",
cleanMessage, re.S):
if match.group(1) != '':
reply = await DiscordoragiSearch.buildAnimeReply(
match.group(1), message, False, canEmbed)
if (reply is not None):
animeArray.append(reply)
else:
print('Could not find anime')
else:
print("Empty request, ignoring")
# Expanded Manga
# NORMAL EXPANDED
for match in re.finditer("\<{2}([^>]*)\>{2}(?!(:|\>))", cleanMessage,
re.S):
if match.group(1) != '':
reply = ''
if (forceNormal) or (
str(message.channel).lower() in disableexpanded):
reply = await DiscordoragiSearch.buildMangaReply(
match.group(1), message, False, canEmbed)
else:
reply = await DiscordoragiSearch.buildMangaReply(
match.group(1), message, True, canEmbed)
if (reply is not None):
mangaArray.append(reply)
else:
print("Empty request, ignoring")
# AUTHOR SEARCH EXPANDED
for match in re.finditer("\<{2}([^>]*)\>{2}:\(([^)]+)\)", cleanMessage,
re.S):
if match.group(1) != '':
reply = ''
if (forceNormal) or (
str(message.server).lower() in disableexpanded):
reply = await DiscordoragiSearch.buildMangaReplyWithAuthor(
match.group(1), match.group(2), message, False,
canEmbed)
else:
reply = await DiscordoragiSearch.buildMangaReplyWithAuthor(
match.group(1), match.group(2), message, True,
canEmbed)
if (reply is not None):
mangaArray.append(reply)
else:
print("Empty request, ignoring")
# Normal Manga
# NORMAL
for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]+)\>(?!(:|\>))",
cleanMessage, re.S):
if match.group(1) != '':
reply = await DiscordoragiSearch.buildMangaReply(
match.group(1), message, False, canEmbed)
if (reply is not None):
mangaArray.append(reply)
else:
print("Empty request, ignoring")
# AUTHOR SEARCH
for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]*)\>:\(([^)]+)\)",
cleanMessage, re.S):
reply = await DiscordoragiSearch.buildMangaReplyWithAuthor(
match.group(1), match.group(2), message, False, canEmbed)
if (reply is not None):
mangaArray.append(reply)
# Expanded LN
for match in re.finditer("\]{2}([^]]*)\[{2}", cleanMessage, re.S):
if match.group(1) != '':
reply = ''
if (forceNormal) or (
str(message.server).lower() in disableexpanded):
reply = await DiscordoragiSearch.buildLightNovelReply(
match.group(1), False, message, canEmbed)
else:
reply = await DiscordoragiSearch.buildLightNovelReply(
match.group(1), True, message, canEmbed)
if (reply is not None):
lnArray.append(reply)
else:
print("Empty request, ignoring")
# Normal LN
for match in re.finditer("(?<=(?<!\])\])([^\]\[]*)(?=\[(?!\[))",
cleanMessage, re.S):
if match.group(1) != '':
reply = await DiscordoragiSearch.buildLightNovelReply(
match.group(1), False, message, canEmbed)
if (reply is not None):
lnArray.append(reply)
else:
print("Empty request, ignoring")
# Here is where we create the final reply to be posted
# The final message reply. We add stuff to this progressively.
postedAnimeTitles = []
postedMangaTitles = []
postedLNTitles = []
messageReply = ''
# Basically just to keep track of people posting the same title multiple times (e.g. {Nisekoi}{Nisekoi}{Nisekoi})
postedAnimeTitles = []
postedMangaTitles = []
postedLNTitles = []
# Adding all the anime to the final message. If there's manga too we split up all the paragraphs and indent them in Reddit markup by adding a '>', then recombine them
for i, animeReply in enumerate(animeArray):
if not (i is 0):
messageReply += '\n\n'
if not (animeReply['title'] in postedAnimeTitles):
postedAnimeTitles.append(animeReply['title'])
if not canEmbed:
messageReply += animeReply['comment']
else:
messageReply = 'n/a'
if mangaArray:
messageReply += '\n\n'
# Adding all the manga to the final message
for i, mangaReply in enumerate(mangaArray):
if not (i is 0):
messageReply += '\n\n'
if not (mangaReply['title'] in postedMangaTitles):
postedMangaTitles.append(mangaReply['title'])
if not canEmbed:
messageReply += mangaReply['comment']
else:
messageReply = 'n/a'
if lnArray:
messageReply += '\n\n'
# Adding all the manga to the final comment
for i, lnReply in enumerate(lnArray):
if not (i is 0):
commentReply += '\n\n'
if not (lnReply['title'] in postedLNTitles):
postedLNTitles.append(lnReply['title'])
if not canEmbed:
messageReply += lnReply['comment']
else:
messageReply = 'N/A'
# If there are more than 10 requests, shorten them all
if not (messageReply is '') and (
len(animeArray) + len(mangaArray) >= 10):
messageReply = re.sub(r"\^\((.*?)\)", "", messageReply, flags=re.M)
# If there was actually something found, add the signature and post the message to Reddit. Then, add the message to the "already seen" database.
if not (messageReply is ''):
if is_edit:
if not canEmbed:
await Discord.client.send_message(message.channel,
messageReply)
else:
for i, animeReply in enumerate(animeArray):
await Discord.client.send_message(message.channel,
embed=animeReply[
'embed'])
for i, mangaReply in enumerate(mangaArray):
await Discord.client.send_message(message.channel,
embed=mangaReply[
'embed'])
for i, lnReply in enumerate(lnArray):
await Discord.client.send_message(message.channel,
embed=lnReply['embed'])
else:
try:
print("Message created.\n")
if not canEmbed:
await Discord.client.send_message(message.channel,
messageReply)
else:
for i, animeReply in enumerate(animeArray):
await Discord.client.send_message(message.channel,
embed=animeReply[
'embed'])
for i, mangaReply in enumerate(mangaArray):
await Discord.client.send_message(message.channel,
embed=mangaReply[
'embed'])
for i, lnReply in enumerate(lnArray):
await Discord.client.send_message(message.channel,
embed=lnReply[
'embed'])
except discord.errors.Forbidden:
print('Request from banned channel: ' + str(
message.channel) + '\n')
except Exception as e:
print(e)
traceback.print_exc()
except:
traceback.print_exc()
else:
try:
if is_edit:
return None
else:
DatabaseHandler.addMessage(message.id, message.author.id,
message.server.id, False)
except:
traceback.print_exc()
# Overwrite on_message so we can run our stuff
@Discord.client.event
async def on_message(message):
from DiscordoragiSearch import \
isValidMessage # local import here to fix attribute not found error
print('Message recieved')
# Is the message valid (i.e. it's not made by Discordoragi and I haven't seen it already). If no, try to add it to the "already seen pile" and skip to the next message. If yes, keep going.
if not (isValidMessage(message)):
try:
if not (DatabaseHandler.messageExists(message.id)):
DatabaseHandler.addMessage(message.id, message.author.id,
message.server.id, False)
except Exception:
traceback.print_exc()
pass
else:
await process_message(message)
# ------------------------------------#
# Here's the stuff that actually gets run
# Initialise Discord.
print('Starting Bot')
Discord.run()
|
MaT1g3R/Roboragi
|
roboragi_old/AnimeBot.py
|
Python
|
mit
| 17,947
|
import os
def rename_abf(fname):
fname = os.path.basename(fname)
path = os.path.dirname(os.path.abspath(fname))
path_file = os.path.join(path, fname)
fnoext = fname.split(os.extsep)[0]
## pull out the year, month and day from the fname
year = fnoext[0:2]
month = fnoext[2]
day = fnoext[3:5]
fnum = fnoext[5:8]
## make yyyy_mm_dd_NNNN
newyear = '20' + year
## because the month is 1 : 9 for jan - sep, than o = october,
## n = november, d = decemeber, have to have some cases here
try:
int(month)
newmonth = '0' + month
except ValueError:
if month=='o':
newmonth = '10'
elif month=='n':
newmonth = '11'
elif month == 'd':
newmonth = '12'
## day stays same
## fnum needs a prepended 0
newfnum = '0' + fnum
new_name = newyear+'_'+newmonth+'_'+day+'_'+newfnum+os.extsep+'abf'
new_path_file = os.path.join(path, new_name)
os.rename(path_file, new_path_file)
|
matthewperkins/abf_reader
|
abf_rename.py
|
Python
|
mit
| 1,161
|
"""Reusable testing components"""
from django.test import TestCase, Client
from django.urls import reverse
from rest_framework import status
from server.models import User
class AdminTestCase(TestCase):
"""Test the admin site is configured to have all expected views."""
admin_endpoints = {}
def app(self):
"""Return the app name, which is the base of the module."""
return self.__module__.split('.')[0]
def setUp(self):
self.client = Client()
self.user = User.objects.create(username='test')
def test_no_access(self):
"""Test that unauthenticated requests redirected to login."""
for path in self.admin_endpoints:
url = reverse('admin:{}_{}_changelist'.format(self.app(), path))
response = self.client.get(url)
# Redirect to login page.
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response.url, '{}?next={}'.format(reverse('admin:login'), url))
def test_ro_access(self):
"""Test that ro requests are rejected.
RO users should not have access to the admin site (unless they have
`is_staff = True`.
"""
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = reverse('admin:{}_{}_changelist'.format(self.app(), path))
response = self.client.get(url)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_302_FOUND, msg=msg)
self.assertEqual(response.url, '{}?next={}'.format(reverse('admin:login'), url),
msg=msg)
def test_ga_access(self):
"""Ensure GA userprofile grants admin page access."""
user_profile = self.user.userprofile
user_profile.level = 'GA'
user_profile.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = reverse('admin:{}_{}_changelist'.format(self.app(), path))
response = self.client.get(url, follow=True)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=msg)
|
salopensource/sal
|
sal/test_utils.py
|
Python
|
apache-2.0
| 2,243
|
import hashlib
import json
import logging
import os
import re
import sys
import glob
from collections import defaultdict
from cStringIO import StringIO
from fs.osfs import OSFS
from importlib import import_module
from lxml import etree
from path import path
from xmodule.error_module import ErrorDescriptor
from xmodule.errortracker import make_error_tracker, exc_info_to_str
from xmodule.course_module import CourseDescriptor
from xmodule.mako_module import MakoDescriptorSystem
from xmodule.x_module import XModuleDescriptor, XMLParsingSystem
from xmodule.html_module import HtmlDescriptor
from . import ModuleStoreBase, Location, XML_MODULESTORE_TYPE
from .exceptions import ItemNotFoundError
from .inheritance import compute_inherited_metadata
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
etree.set_default_parser(edx_xml_parser)
log = logging.getLogger(__name__)
# VS[compat]
# TODO (cpennington): Remove this once all fall 2012 courses have been imported
# into the cms from xml
def clean_out_mako_templating(xml_string):
xml_string = xml_string.replace('%include', 'include')
xml_string = re.sub(r"(?m)^\s*%.*$", '', xml_string)
return xml_string
class ImportSystem(XMLParsingSystem, MakoDescriptorSystem):
def __init__(self, xmlstore, course_id, course_dir,
policy, error_tracker, parent_tracker,
load_error_modules=True, **kwargs):
"""
A class that handles loading from xml. Does some munging to ensure that
all elements have unique slugs.
xmlstore: the XMLModuleStore to store the loaded modules in
"""
self.unnamed = defaultdict(int) # category -> num of new url_names for that category
self.used_names = defaultdict(set) # category -> set of used url_names
self.org, self.course, self.url_name = course_id.split('/')
# cdodge: adding the course_id as passed in for later reference rather than having to recomine the org/course/url_name
self.course_id = course_id
self.course_dir = course_dir
self.load_error_modules = load_error_modules
def process_xml(xml):
"""Takes an xml string, and returns a XModuleDescriptor created from
that xml.
"""
def make_name_unique(xml_data):
"""
Make sure that the url_name of xml_data is unique. If a previously loaded
unnamed descriptor stole this element's url_name, create a new one.
Removes 'slug' attribute if present, and adds or overwrites the 'url_name' attribute.
"""
# VS[compat]. Take this out once course conversion is done (perhaps leave the uniqueness check)
# tags that really need unique names--they store (or should store) state.
need_uniq_names = ('problem', 'sequential', 'video', 'course', 'chapter',
'videosequence', 'poll_question', 'timelimit')
attr = xml_data.attrib
tag = xml_data.tag
id = lambda x: x
# Things to try to get a name, in order (key, cleaning function, remove key after reading?)
lookups = [('url_name', id, False),
('slug', id, True),
('name', Location.clean, False),
('display_name', Location.clean, False)]
url_name = None
for key, clean, remove in lookups:
if key in attr:
url_name = clean(attr[key])
if remove:
del attr[key]
break
def looks_like_fallback(url_name):
"""Does this look like something that came from fallback_name()?"""
return (url_name is not None
and url_name.startswith(tag)
and re.search('[0-9a-fA-F]{12}$', url_name))
def fallback_name(orig_name=None):
"""Return the fallback name for this module. This is a function instead of a variable
because we want it to be lazy."""
if looks_like_fallback(orig_name):
# We're about to re-hash, in case something changed, so get rid of the tag_ and hash
orig_name = orig_name[len(tag) + 1:-12]
# append the hash of the content--the first 12 bytes should be plenty.
orig_name = "_" + orig_name if orig_name not in (None, "") else ""
xml_bytes = xml.encode('utf8')
return tag + orig_name + "_" + hashlib.sha1(xml_bytes).hexdigest()[:12]
# Fallback if there was nothing we could use:
if url_name is None or url_name == "":
url_name = fallback_name()
# Don't log a warning--we don't need this in the log. Do
# put it in the error tracker--content folks need to see it.
if tag in need_uniq_names:
error_tracker("PROBLEM: no name of any kind specified for {tag}. Student "
"state will not be properly tracked for this module. Problem xml:"
" '{xml}...'".format(tag=tag, xml=xml[:100]))
else:
# TODO (vshnayder): We may want to enable this once course repos are cleaned up.
# (or we may want to give up on the requirement for non-state-relevant issues...)
# error_tracker("WARNING: no name specified for module. xml='{0}...'".format(xml[:100]))
pass
# Make sure everything is unique
if url_name in self.used_names[tag]:
# Always complain about modules that store state. If it
# doesn't store state, don't complain about things that are
# hashed.
if tag in need_uniq_names:
msg = ("Non-unique url_name in xml. This may break state tracking for content."
" url_name={0}. Content={1}".format(url_name, xml[:100]))
error_tracker("PROBLEM: " + msg)
log.warning(msg)
# Just set name to fallback_name--if there are multiple things with the same fallback name,
# they are actually identical, so it's fragile, but not immediately broken.
# TODO (vshnayder): if the tag is a pointer tag, this will
# break the content because we won't have the right link.
# That's also a legitimate attempt to reuse the same content
# from multiple places. Once we actually allow that, we'll
# need to update this to complain about non-unique names for
# definitions, but allow multiple uses.
url_name = fallback_name(url_name)
self.used_names[tag].add(url_name)
xml_data.set('url_name', url_name)
try:
# VS[compat]
# TODO (cpennington): Remove this once all fall 2012 courses
# have been imported into the cms from xml
xml = clean_out_mako_templating(xml)
xml_data = etree.fromstring(xml)
make_name_unique(xml_data)
descriptor = XModuleDescriptor.load_from_xml(
etree.tostring(xml_data, encoding='unicode'), self, self.org,
self.course, xmlstore.default_class)
except Exception as err:
if not self.load_error_modules:
raise
# Didn't load properly. Fall back on loading as an error
# descriptor. This should never error due to formatting.
msg = "Error loading from xml. " + str(err)[:200]
log.warning(msg)
# Normally, we don't want lots of exception traces in our logs from common
# content problems. But if you're debugging the xml loading code itself,
# uncomment the next line.
log.exception(msg)
self.error_tracker(msg)
err_msg = msg + "\n" + exc_info_to_str(sys.exc_info())
descriptor = ErrorDescriptor.from_xml(
xml,
self,
self.org,
self.course,
err_msg
)
setattr(descriptor, 'data_dir', course_dir)
xmlstore.modules[course_id][descriptor.location] = descriptor
if hasattr(descriptor, 'children'):
for child in descriptor.get_children():
parent_tracker.add_parent(child.location, descriptor.location)
# After setting up the descriptor, save any changes that we have
# made to attributes on the descriptor to the underlying KeyValueStore.
descriptor.save()
return descriptor
render_template = lambda: ''
# TODO (vshnayder): we are somewhat architecturally confused in the loading code:
# load_item should actually be get_instance, because it expects the course-specific
# policy to be loaded. For now, just add the course_id here...
load_item = lambda location: xmlstore.get_instance(course_id, location)
resources_fs = OSFS(xmlstore.data_dir / course_dir)
MakoDescriptorSystem.__init__(self, load_item, resources_fs,
error_tracker, render_template, **kwargs)
XMLParsingSystem.__init__(self, load_item, resources_fs,
error_tracker, process_xml, policy, **kwargs)
class ParentTracker(object):
"""A simple class to factor out the logic for tracking location parent pointers."""
def __init__(self):
"""
Init
"""
# location -> set(parents). Not using defaultdict because we care about the empty case.
self._parents = dict()
def add_parent(self, child, parent):
"""
Add a parent of child location to the set of parents. Duplicate calls have no effect.
child and parent must be something that can be passed to Location.
"""
child = Location(child)
parent = Location(parent)
s = self._parents.setdefault(child, set())
s.add(parent)
def is_known(self, child):
"""
returns True iff child has some parents.
"""
child = Location(child)
return child in self._parents
def make_known(self, location):
"""Tell the parent tracker about an object, without registering any
parents for it. Used for the top level course descriptor locations."""
self._parents.setdefault(location, set())
def parents(self, child):
"""
Return a list of the parents of this child. If not is_known(child), will throw a KeyError
"""
child = Location(child)
return list(self._parents[child])
class XMLModuleStore(ModuleStoreBase):
"""
An XML backed ModuleStore
"""
def __init__(self, data_dir, default_class=None, course_dirs=None, load_error_modules=True):
"""
Initialize an XMLModuleStore from data_dir
data_dir: path to data directory containing the course directories
default_class: dot-separated string defining the default descriptor
class to use if none is specified in entry_points
course_dirs: If specified, the list of course_dirs to load. Otherwise,
load all course dirs
"""
super(XMLModuleStore, self).__init__()
self.data_dir = path(data_dir)
self.modules = defaultdict(dict) # course_id -> dict(location -> XModuleDescriptor)
self.courses = {} # course_dir -> XModuleDescriptor for the course
self.errored_courses = {} # course_dir -> errorlog, for dirs that failed to load
self.load_error_modules = load_error_modules
if default_class is None:
self.default_class = None
else:
module_path, _, class_name = default_class.rpartition('.')
class_ = getattr(import_module(module_path), class_name)
self.default_class = class_
self.parent_trackers = defaultdict(ParentTracker)
# If we are specifically asked for missing courses, that should
# be an error. If we are asked for "all" courses, find the ones
# that have a course.xml. We sort the dirs in alpha order so we always
# read things in the same order (OS differences in load order have
# bitten us in the past.)
if course_dirs is None:
course_dirs = sorted([d for d in os.listdir(self.data_dir) if
os.path.exists(self.data_dir / d / "course.xml")])
for course_dir in course_dirs:
self.try_load_course(course_dir)
def try_load_course(self, course_dir):
'''
Load a course, keeping track of errors as we go along.
'''
# Special-case code here, since we don't have a location for the
# course before it loads.
# So, make a tracker to track load-time errors, then put in the right
# place after the course loads and we have its location
errorlog = make_error_tracker()
course_descriptor = None
try:
course_descriptor = self.load_course(course_dir, errorlog.tracker)
except Exception as e:
msg = "ERROR: Failed to load course '{0}': {1}".format(course_dir, str(e))
log.exception(msg)
errorlog.tracker(msg)
if course_descriptor is not None and not isinstance(course_descriptor, ErrorDescriptor):
self.courses[course_dir] = course_descriptor
self._location_errors[course_descriptor.location] = errorlog
self.parent_trackers[course_descriptor.id].make_known(course_descriptor.location)
else:
# Didn't load course. Instead, save the errors elsewhere.
self.errored_courses[course_dir] = errorlog
def __unicode__(self):
'''
String representation - for debugging
'''
return '<XMLModuleStore data_dir=%r, %d courses, %d modules>' % (
self.data_dir, len(self.courses), len(self.modules))
def load_policy(self, policy_path, tracker):
"""
Attempt to read a course policy from policy_path. If the file
exists, but is invalid, log an error and return {}.
If the policy loads correctly, returns the deserialized version.
"""
if not os.path.exists(policy_path):
return {}
try:
with open(policy_path) as f:
return json.load(f)
except (IOError, ValueError) as err:
msg = "ERROR: loading course policy from {0}".format(policy_path)
tracker(msg)
log.warning(msg + " " + str(err))
return {}
def load_course(self, course_dir, tracker):
"""
Load a course into this module store
course_path: Course directory name
returns a CourseDescriptor for the course
"""
log.debug('========> Starting course import from {0}'.format(course_dir))
with open(self.data_dir / course_dir / "course.xml") as course_file:
# VS[compat]
# TODO (cpennington): Remove this once all fall 2012 courses have
# been imported into the cms from xml
course_file = StringIO(clean_out_mako_templating(course_file.read()))
course_data = etree.parse(course_file, parser=edx_xml_parser).getroot()
org = course_data.get('org')
if org is None:
msg = ("No 'org' attribute set for course in {dir}. "
"Using default 'edx'".format(dir=course_dir))
log.warning(msg)
tracker(msg)
org = 'edx'
course = course_data.get('course')
if course is None:
msg = ("No 'course' attribute set for course in {dir}."
" Using default '{default}'".format(dir=course_dir,
default=course_dir
)
)
log.warning(msg)
tracker(msg)
course = course_dir
url_name = course_data.get('url_name', course_data.get('slug'))
policy_dir = None
if url_name:
policy_dir = self.data_dir / course_dir / 'policies' / url_name
policy_path = policy_dir / 'policy.json'
policy = self.load_policy(policy_path, tracker)
# VS[compat]: remove once courses use the policy dirs.
if policy == {}:
old_policy_path = self.data_dir / course_dir / 'policies' / '{0}.json'.format(url_name)
policy = self.load_policy(old_policy_path, tracker)
else:
policy = {}
# VS[compat] : 'name' is deprecated, but support it for now...
if course_data.get('name'):
url_name = Location.clean(course_data.get('name'))
tracker("'name' is deprecated for module xml. Please use "
"display_name and url_name.")
else:
raise ValueError("Can't load a course without a 'url_name' "
"(or 'name') set. Set url_name.")
course_id = CourseDescriptor.make_id(org, course, url_name)
system = ImportSystem(
self,
course_id,
course_dir,
policy,
tracker,
self.parent_trackers[course_id],
self.load_error_modules,
)
course_descriptor = system.process_xml(etree.tostring(course_data, encoding='unicode'))
# If we fail to load the course, then skip the rest of the loading steps
if isinstance(course_descriptor, ErrorDescriptor):
return course_descriptor
# NOTE: The descriptors end up loading somewhat bottom up, which
# breaks metadata inheritance via get_children(). Instead
# (actually, in addition to, for now), we do a final inheritance pass
# after we have the course descriptor.
compute_inherited_metadata(course_descriptor)
# now import all pieces of course_info which is expected to be stored
# in <content_dir>/info or <content_dir>/info/<url_name>
self.load_extra_content(system, course_descriptor, 'course_info', self.data_dir / course_dir / 'info', course_dir, url_name)
# now import all static tabs which are expected to be stored in
# in <content_dir>/tabs or <content_dir>/tabs/<url_name>
self.load_extra_content(system, course_descriptor, 'static_tab', self.data_dir / course_dir / 'tabs', course_dir, url_name)
self.load_extra_content(system, course_descriptor, 'custom_tag_template', self.data_dir / course_dir / 'custom_tags', course_dir, url_name)
self.load_extra_content(system, course_descriptor, 'about', self.data_dir / course_dir / 'about', course_dir, url_name)
log.debug('========> Done with course import from {0}'.format(course_dir))
return course_descriptor
def load_extra_content(self, system, course_descriptor, category, base_dir, course_dir, url_name):
self._load_extra_content(system, course_descriptor, category, base_dir, course_dir)
# then look in a override folder based on the course run
if os.path.isdir(base_dir / url_name):
self._load_extra_content(system, course_descriptor, category, base_dir / url_name, course_dir)
def _load_extra_content(self, system, course_descriptor, category, path, course_dir):
for filepath in glob.glob(path / '*'):
if not os.path.isfile(filepath):
continue
with open(filepath) as f:
try:
html = f.read().decode('utf-8')
# tabs are referenced in policy.json through a 'slug' which is just the filename without the .html suffix
slug = os.path.splitext(os.path.basename(filepath))[0]
loc = Location('i4x', course_descriptor.location.org, course_descriptor.location.course, category, slug)
module = HtmlDescriptor(
system,
{'data': html, 'location': loc, 'category': category}
)
# VS[compat]:
# Hack because we need to pull in the 'display_name' for static tabs (because we need to edit them)
# from the course policy
if category == "static_tab":
for tab in course_descriptor.tabs or []:
if tab.get('url_slug') == slug:
module.display_name = tab['name']
module.data_dir = course_dir
module.save()
self.modules[course_descriptor.id][module.location] = module
except Exception, e:
logging.exception("Failed to load {0}. Skipping... Exception: {1}".format(filepath, str(e)))
system.error_tracker("ERROR: " + str(e))
def get_instance(self, course_id, location, depth=0):
"""
Returns an XModuleDescriptor instance for the item at
location, with the policy for course_id. (In case two xml
dirs have different content at the same location, return the
one for this course_id.)
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
location: Something that can be passed to Location
"""
location = Location(location)
try:
return self.modules[course_id][location]
except KeyError:
raise ItemNotFoundError(location)
def has_item(self, course_id, location):
"""
Returns True if location exists in this ModuleStore.
"""
location = Location(location)
return location in self.modules[course_id]
def get_item(self, location, depth=0):
"""
Returns an XModuleDescriptor instance for the item at location.
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
location: Something that can be passed to Location
"""
raise NotImplementedError("XMLModuleStores can't guarantee that definitions"
" are unique. Use get_instance.")
def get_items(self, location, course_id=None, depth=0):
items = []
def _add_get_items(self, location, modules):
for mod_loc, module in modules.iteritems():
# Locations match if each value in `location` is None or if the value from `location`
# matches the value from `mod_loc`
if all(goal is None or goal == value for goal, value in zip(location, mod_loc)):
items.append(module)
if course_id is None:
for _, modules in self.modules.iteritems():
_add_get_items(self, location, modules)
else:
_add_get_items(self, location, self.modules[course_id])
return items
def get_courses(self, depth=0):
"""
Returns a list of course descriptors. If there were errors on loading,
some of these may be ErrorDescriptors instead.
"""
return self.courses.values()
def get_errored_courses(self):
"""
Return a dictionary of course_dir -> [(msg, exception_str)], for each
course_dir where course loading failed.
"""
return dict((k, self.errored_courses[k].errors) for k in self.errored_courses)
def update_item(self, location, data):
"""
Set the data in the item specified by the location to
data
location: Something that can be passed to Location
data: A nested dictionary of problem data
"""
raise NotImplementedError("XMLModuleStores are read-only")
def update_children(self, location, children):
"""
Set the children for the item specified by the location to
data
location: Something that can be passed to Location
children: A list of child item identifiers
"""
raise NotImplementedError("XMLModuleStores are read-only")
def update_metadata(self, location, metadata):
"""
Set the metadata for the item specified by the location to
metadata
location: Something that can be passed to Location
metadata: A nested dictionary of module metadata
"""
raise NotImplementedError("XMLModuleStores are read-only")
def get_parent_locations(self, location, course_id):
'''Find all locations that are the parents of this location in this
course. Needed for path_to_location().
returns an iterable of things that can be passed to Location. This may
be empty if there are no parents.
'''
location = Location.ensure_fully_specified(location)
if not self.parent_trackers[course_id].is_known(location):
raise ItemNotFoundError("{0} not in {1}".format(location, course_id))
return self.parent_trackers[course_id].parents(location)
def get_modulestore_type(self, course_id):
"""
Returns a type which identifies which modulestore is servicing the given
course_id. The return can be either "xml" (for XML based courses) or "mongo" for MongoDB backed courses
"""
return XML_MODULESTORE_TYPE
|
wwj718/edx-video
|
common/lib/xmodule/xmodule/modulestore/xml.py
|
Python
|
agpl-3.0
| 26,947
|
#!/usr/bin/env python
import os, sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "july.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
julython/julython.org
|
manage.py
|
Python
|
mit
| 245
|
#!/usr/bin/python
# File created on Nov 27 Jan 2012
from __future__ import division
__author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
from os import makedirs, sys, remove, rename
from sys import path
import re, traceback
from optparse import OptionParser, OptionGroup
from glob import glob
from libs.python_modules.utils.metapathways_utils import parse_command_line_parameters, fprintf, printf, eprintf
from libs.python_modules.utils.metapathways_utils import strip_taxonomy, ShortenORFId, ShortenContigId
from libs.python_modules.utils.sysutil import getstatusoutput
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed 'source MetaPathwaysrc'"""
print """ """
sys.exit(3)
usage= sys.argv[0] + """ -d dbname1 -b parsed_blastout_for_database1 -w weight_for_database1 [-d dbname2 -b parsed_blastout_for_database2 -w weight_for_database2 ] [ --rRNA_16S 16SrRNA-stats-table ] [ --tRNA tRNA-stats-table ] [ --compact_output ]"""
parser = None
def createParser():
global parser
epilog = """Based on the parsed homology search results against reference protein databases,
tRNA scan results and matches against the LSU and SSU rRNA sequence databases
annotations for individual ORFs are created.
The functional annotation is done by synthesizing the annotations from the
reference databases, after removing redundant texts and also making sure
the annotations from different reference databases are in agreement with each other.
The taxonomic annotation is done by applying the LCA (lowest common ancestor rule)
on the hits from the RefSeq NR protein database."""
epilog = re.sub(r'\s+',' ', epilog)
parser = OptionParser(usage = usage, epilog = epilog)
parser.add_option("-b", "--blastoutput", dest="input_blastout", action='append', default=[],
help='blastout files in TSV format [at least 1 REQUIRED]')
parser.add_option("-a", "--algorithm", dest="algorithm", default="BLAST", help="algorithm BLAST or LAST" )
parser.add_option("-m", "--contig-map", dest="contig_map_file", default=None, help="contig map file" )
parser.add_option("-d", "--dbasename", dest="database_name", action='append', default=[],
help='the database names [at least 1 REQUIRED]')
parser.add_option("-D", "--blastdir", dest="blastdir", default=None,
help='the blast dir where all the BLAST outputs are located')
parser.add_option("-s", "--samplename", dest="sample_name", default=None,
help='the sample name')
parser.add_option("-w", "--weight_for_database", dest="weight_db", action='append', default=[], type='float',
help='the map file for the database [at least 1 REQUIRED]')
parser.add_option( "--rRNA_16S", dest="rRNA_16S", action="append", default=[],
help='the 16s rRNA stats file [OPTIONAL]')
parser.add_option( "--tRNA", dest="tRNA", action="append", default=[],
help='the tRNA stats file [OPTIONAL]')
cutoffs_group = OptionGroup(parser, 'Cuttoff Related Options')
cutoffs_group.add_option("--min_score", dest="min_score", type='float', default=20,
help='the minimum bit score cutoff [default = 20 ] ')
cutoffs_group.add_option("--max_evalue", dest="max_evalue", type='float', default=1e-6,
help='the maximum E-value cutoff [ default = 1e-6 ] ')
cutoffs_group.add_option("--min_length", dest="min_length", type='float', default=30,
help='the minimum length of query cutoff [default = 30 ] ')
cutoffs_group.add_option("--max_length", dest="max_length", type='float', default=10000,
help='the maximum length of query cutoff [default = 10000 ] ')
cutoffs_group.add_option("--min_identity", dest="min_identity", type='float', default=20,
help='the minimum identity of query cutoff [default 30 ] ')
cutoffs_group.add_option("--max_identity", dest="max_identity", type='float', default=100,
help='the maximum identity of query cutoff [default = 100 ] ')
cutoffs_group.add_option("--limit", dest="limit", type='float', default=5,
help='max number of hits per query cutoff [default = 5 ] ')
cutoffs_group.add_option("--min_bsr", dest="min_bsr", type='float', default=0.00,
help='minimum BIT SCORE RATIO [default = 0.00 ] ')
parser.add_option_group(cutoffs_group)
output_options_group = OptionGroup(parser, 'Output Options')
output_options_group.add_option("--tax", dest="taxonomy", action='store_true', default=False,
help='add the taxonomy info [useful for refseq] ')
parser.add_option_group(output_options_group)
parser.add_option('-o' , "--output_gff", dest="output_gff",
help='the output gff file [REQUIRED]')
parser.add_option('--output-comparative-annotation', dest="output_comparative_annotation",
help='the comparative output table [REQUIRED]')
parser.add_option( "--compact_output", dest="compact_output", action='store_true', default=False,
help='compact output [OPTIONAL]')
parser.add_option('--input_gff', dest='input_gff',
metavar='INPUT', help='Unannotated gff file [REQUIRED]')
def check_arguments(opts, args):
return True
if len(opts.input_blastout) == 0:
eprintf("There should be at least one blastoutput file\n")
return False
if len(opts.database_name) == 0:
eprintf("There should be at least one database name\n")
return False
if len(opts.weight_db) == 0:
eprint("There should be at least one weight\n")
return False
if len(opts.input_blastout) != len(opts.database_name) or\
len(opts.input_blastout) != len(opts.weight_db) :
eprint("The num of database names, blastoutputs and database map file should be equal\n")
return False
if opts.output_gff == None:
eprintf("Must specify the output gff file\n")
return False
if opts.output_comparative_annotation == None:
eprintf("Must specify the output tables for comparative annotation\n")
return False
if opts.input_gff == None:
eprintf("Must specify the input gff file\n")
return False
return True
def insert_attribute(attributes, attribStr):
rawfields = re.split('=', attribStr)
if len(rawfields) == 2:
if rawfields[0].strip().lower()=='id':
orfid = ShortenORFId(rawfields[1].strip())
attributes[rawfields[0].strip().lower()] = orfid
else:
attributes[rawfields[0].strip().lower()] = rawfields[1].strip()
def split_attributes(str, attributes):
rawattributes = re.split(';', str)
for attribStr in rawattributes:
insert_attribute(attributes, attribStr)
return attributes
def insert_orf_into_dict(line, contig_dict):
rawfields = re.split('\t', line)
fields = []
for field in rawfields:
fields.append(field.strip());
if( len(fields) != 9):
return
attributes = {}
seqname = fields[0]
try:
seqname = ShortenContigId(fields[0])
except:
seqname = fields[0]
attributes['seqname'] = seqname # this is a bit of a duplication
attributes['source'] = fields[1]
attributes['feature'] = fields[2]
attributes['start'] = int(fields[3])
attributes['end'] = int(fields[4])
try:
attributes['score'] = float(fields[5])
except:
attributes['score'] = fields[5]
attributes['strand'] = fields[6]
attributes['frame'] = fields[7]
split_attributes(fields[8], attributes)
if not seqname in contig_dict :
contig_dict[seqname] = []
contig_dict[seqname].append(attributes)
class GffFileParser(object):
def __init__(self, gff_filename):
self.Size = 10000
self.i=0
self.orf_dictionary = {}
self.gff_beg_pattern = re.compile("^#")
self.lines= []
self.size=0
try:
self.gff_file = open( gff_filename,'r')
except AttributeError:
eprintf("Cannot read the map file for database : %s\n", dbname)
exit_process()
def __iter__(self):
return self
def refillBuffer(self):
self.orf_dictionary = {}
i = 0
while i < self.Size:
line=self.gff_file.readline()
if not line:
break
if self.gff_beg_pattern.search(line):
continue
insert_orf_into_dict(line, self.orf_dictionary)
#print self.orf_dictionary
i += 1
self.orfs = self.orf_dictionary.keys()
self.size = len(self.orfs)
self.i = 0
def next(self):
if self.i == self.size:
self.refillBuffer()
if self.size==0:
self.gff_file.close()
raise StopIteration()
#print self.i
if self.i < self.size:
self.i = self.i + 1
return self.orfs[self.i-1]
def process_gff_file(gff_file_name, orf_dictionary):
try:
gfffile = open(gff_file_name, 'r')
except IOError:
eprintf("Cannot read file %s!\n", gff_file_name)
gff_lines = gfffile.readlines()
gff_beg_pattern = re.compile("^#")
gfffile.close()
count = 0
for line in gff_lines:
line = line.strip()
if gff_beg_pattern.search(line):
continue
insert_orf_into_dict(line, orf_dictionary)
count += 1
#if count %10000 == 0:
# print count
def create_dictionary(databasemapfile, annot_map):
seq_beg_pattern = re.compile(">")
dbmapfile = open( databasemapfile,'r')
lines=dbmapfile.readlines()
dbmapfile.close()
for line in lines:
if seq_beg_pattern.search(line):
words = line.rstrip().split()
name = words[0].replace('>','',1)
words.pop(0)
annotation = ' '.join(words)
annot_map[name]= annotation
def write_annotation_for_orf(outputgff_file, candidatedbname, dbname_weight, results_dictionary, orf_dictionary, contig, candidate_orf_pos, orfid, compact_output):
try:
fields = [ 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame' ]
output_line= orf_dictionary[contig][candidate_orf_pos]['seqname']
#if compact_output:
output_line = ShortenContigId(output_line)
for field in fields:
# printf("\t%s", orf_dictionary[contig][candidate_orf_pos][field])
output_line += "\t"+ str(orf_dictionary[contig][candidate_orf_pos][field])
#if compact_output:
try:
attributes = "ID="+ShortenORFId(orf_dictionary[contig][candidate_orf_pos]['id'])
attributes += ";" + "locus_tag="+ShortenORFId(orf_dictionary[contig][candidate_orf_pos]['locus_tag'])
except:
attributes = "ID="+orf_dictionary[contig][candidate_orf_pos]['id']
attributes += ";" + "locus_tag="+orf_dictionary[contig][candidate_orf_pos]['locus_tag']
attributes += ";" + "contig_length="+orf_dictionary[contig][candidate_orf_pos]['contig_length']
attributes += ";" + "orf_length="+orf_dictionary[contig][candidate_orf_pos]['orf_length']
attributes += ";" + "partial="+orf_dictionary[contig][candidate_orf_pos]['partial']
attributes += ";" + "sourcedb="+candidatedbname
if candidatedbname in results_dictionary:
attributes += ";" + "annotvalue="+str(results_dictionary[candidatedbname][orfid]['value'])
attributes += ";" + "ec="+str(results_dictionary[candidatedbname][orfid]['ec'])
attributes += ";" + "product="+results_dictionary[candidatedbname][orfid]['product']
else:
attributes += ";" + "annotvalue="+str('0')
attributes += ";" + "ec="+str('')
attributes += ";" + "product="+'hypothetical protein'
output_line += '\t' + attributes
if candidatedbname in results_dictionary:
fprintf(outputgff_file, "%s\n", output_line);
except:
eprintf("ERROR : Failure to annotate in contig %s\n", contig)
#print orf_dictionary[contig]
print traceback.print_exc(10)
exit_process()
def write_16S_tRNA_gene_info(rRNA_dictionary, outputgff_file, tag):
fields = [ 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame' ]
for rRNA in rRNA_dictionary:
output_line= rRNA_dictionary[rRNA]['seqname']
for field in fields:
output_line += "\t"+ str(rRNA_dictionary[rRNA][field])
attributes = "ID="+rRNA_dictionary[rRNA]['seqname'] + tag
attributes += ";" + "locus_tag="+rRNA_dictionary[rRNA]['seqname'] + tag
attributes += ";" + "orf_length=" + str(rRNA_dictionary[rRNA]['orf_length'])
attributes += ";" + "contig_length=" + str(rRNA_dictionary[rRNA]['contig_length'])
attributes += ";" + "ec="
attributes += ";" + "product="+rRNA_dictionary[rRNA]['product']
output_line += '\t' + attributes
fprintf(outputgff_file, "%s\n", output_line);
def process_rRNA_16S_stats(rRNA_16S_file, rRNA_16S_dictionary):
try:
taxonomy_file = open(rRNA_16S_file, 'r')
except IOError:
eprintf("Cannot read file %s!\n", rRNA_16S_file)
exit_process()
tax_lines = taxonomy_file.readlines()
similarity_pattern = re.compile("similarity")
evalue_pattern = re.compile("evalue")
bitscore_pattern = re.compile("bitscore")
taxonomy_pattern = re.compile("taxonomy")
headerScanned = False
for line in tax_lines:
if headerScanned == False:
if similarity_pattern.search(line) and evalue_pattern.search(line) and bitscore_pattern.search(line) and taxonomy_pattern.search(line):
headerScanned = True
continue
fields = [ x.strip() for x in line.split('\t') ]
if len(fields) >=6:
if fields[1]!='-':
rRNA_16S_dictionary[fields[0]] = [ fields[1], fields[2], fields[5] ]
else:
if len(fields) >=12:
if fields[7]!='-':
rRNA_16S_dictionary[fields[0]] = [ fields[7], fields[8], fields[11] ]
taxonomy_file.close()
def process_tRNA_stats(tRNA_stats_file, tRNA_dictionary):
try:
tRNA_file = open(tRNA_stats_file, 'r')
except IOError:
eprintf("Cannot read file %s!\n", tRNA_stats_file)
exit_process()
tRNA_lines = tRNA_file.readlines()
sequence_name_pattern = re.compile("sequence name", re.I)
number_pattern = re.compile("number", re.I)
headerScanned = False
for line in tRNA_lines:
if number_pattern.search(line):
continue
if headerScanned == False:
if sequence_name_pattern.search(line):
headerScanned = True
continue
fields = [ x.strip() for x in line.split('\t') ]
if len(fields) >=6:
tRNA_dictionary[fields[0]] = [ fields[3], fields[4], fields[5], fields[1] ]
# this adds the features and attributes to be added to the gff file format for the tRNA genes
def add_tRNA_genes(tRNA_dictionary, tRNA_gff_dictionary, contig_lengths) :
for tRNA in tRNA_dictionary:
try:
orf_length = abs(int( tRNA_dictionary[tRNA][1] )-int( tRNA_dictionary[tRNA][0] )) + 1
except:
orf_length = 0
if tRNA in contig_lengths:
contig_length = contig_lengths[tRNA]
else:
contig_length = 0
dict = { 'id':tRNA, 'seqname': tRNA, 'start':str(tRNA_dictionary[tRNA][0]), 'end':str(tRNA_dictionary[tRNA][1]),\
'strand':tRNA_dictionary[tRNA][2], 'score':" ", 'orf_length':str(orf_length),\
'contig_length':str(contig_length),\
'feature':'tRNA', 'source':'tranScan-1.4', 'frame':0, 'product':'tRNA-' + tRNA_dictionary[tRNA][3], 'ec':'' }
tRNA_gff_dictionary[tRNA] = dict.copy()
# this adds the features and attributes to be added to the gff file format for the 16S rRNA genes
def add_16S_genes(rRNA_16S_dictionary, rRNA_dictionary, contig_lengths) :
for rRNA in rRNA_16S_dictionary:
try:
orf_length = abs(int( tRNA_dictionary[rRNA][1] )-int( tRNA_dictionary[rRNA][0] )) + 1
except:
orf_length = 0
if rRNA in contig_lengths:
contig_length = contig_lengths[rRNA]
else:
contig_length = 0
dict = { 'id':rRNA, 'seqname': rRNA, 'start':str(rRNA_16S_dictionary[rRNA][0]), 'end':str(rRNA_16S_dictionary[rRNA][1]),\
'strand':'+', 'score':str(rRNA_16S_dictionary[rRNA][2]), 'orf_length':str(orf_length),\
'contig_length':str(contig_length),\
'feature':'CDS', 'source':'BLAST Search', 'frame':0, 'product':'16S rRNA', 'ec':'' }
rRNA_dictionary[rRNA] = dict.copy()
def create_annotation(dbname_weight, results_dictionary, input_gff, rRNA_16S_stats_files, tRNA_stats_files, output_gff, output_comparative_annotation, contig_lengths, compact_output = False):
orf_dictionary={}
# process_gff_file(input_gff, orf_dictionary)
gffreader = GffFileParser(input_gff)
output_gff_tmp = output_gff + ".tmp"
outputgff_file = open( output_gff_tmp, 'w')
#output_comp_annot_file1 = open( output_comparative_annotation + '.1.txt', 'w')
# #output_comp_annot_file2 = open( output_comparative_annotation + '.2.txt', 'w')
output_comp_annot_file1_Str = 'orf_id\tref dbname\tEC\tproduct\tvalue'
#fprintf(output_comp_annot_file1,'%s\n', output_comp_annot_file1_Str)
output_comp_annot_file2_Str = 'orf_id'
dbnames = dbname_weight.keys()
for dbname in dbnames:
weight = dbname_weight[dbname]
output_comp_annot_file2_Str += '\t{0}(EC) \t{0}(product)\t{0}(value)'.format(dbname)
#fprintf(output_comp_annot_file2,'%s\n', output_comp_annot_file2_Str)
# gffreader = GffReader(input_gff)
# for dbname in dbnames:
# print dbname, len(results_dictionary[dbname].keys())
# print results_dictionary[dbname].keys()
i = 0
for contig in gffreader:
count = 0
for orf in gffreader.orf_dictionary[contig]:
value = 0.0001
success =False
output_comp_annot_file1_Str = ''
output_comp_annot_file2_Str = ''
for dbname in dbnames:
weight = dbname_weight[dbname]
value = 0
orf_id = orf['id']
if orf_id in results_dictionary[dbname]:
if value < results_dictionary[dbname][orf_id]['value']:
value = results_dictionary[dbname][orf_id]['value']
candidatedbname=dbname
success =True
candidate_orf_pos = count
if output_comp_annot_file1_Str:
output_comp_annot_file1_Str += '{0}\t{1}\t{2}\t{3}\t{4}\n'.format('', dbname,\
results_dictionary[dbname][orf['id']]['ec'],\
results_dictionary[dbname][orf['id']]['product'],\
str(results_dictionary[dbname][orf['id']]['value']*float(weight)))
else:
output_comp_annot_file1_Str += '{0}\t{1}\t{2}\t{3}\t{4}\n'.format(orf_id, dbname,\
results_dictionary[dbname][orf['id']]['ec'],\
results_dictionary[dbname][orf['id']]['product'],\
str(results_dictionary[dbname][orf['id']]['value']*float(weight)))
if output_comp_annot_file2_Str:
output_comp_annot_file2_Str += '\t{0}\t{1}\t{2}'.format(\
results_dictionary[dbname][orf['id']]['ec'],\
results_dictionary[dbname][orf['id']]['product'],\
str(results_dictionary[dbname][orf['id']]['value']*float(weight)))
else:
output_comp_annot_file2_Str += '{0}\t{1}\t{2}\t{3}'.format(orf_id,
results_dictionary[dbname][orf['id']]['ec'],\
results_dictionary[dbname][orf['id']]['product'],\
str(results_dictionary[dbname][orf['id']]['value']*float(weight)))
else:
if not output_comp_annot_file1_Str:
output_comp_annot_file1_Str += '{0}\t{1}\t{2}\t{3}\t{4}\n'.format(orf_id, '','','','')
if output_comp_annot_file2_Str:
output_comp_annot_file2_Str += '\t{0}\t{1}\t{2}'.format('', '','')
else:
output_comp_annot_file2_Str += '{0}\t{1}\t{2}\t{3}'.format(orf_id, '','','','')
if success: # there was a database hit
#fprintf(output_comp_annot_file1,'%s\n', output_comp_annot_file1_Str)
#fprintf(output_comp_annot_file2,'%s\n', output_comp_annot_file2_Str)
write_annotation_for_orf(outputgff_file, candidatedbname, dbname_weight, results_dictionary, gffreader.orf_dictionary, contig, candidate_orf_pos, orf_id, compact_output=compact_output)
else: # if it was not a hit then it is a hypothetical protein
#print gffreader.orf_dictionary
write_annotation_for_orf(outputgff_file, 'None', '0', results_dictionary, gffreader.orf_dictionary, contig, count, orf_id, compact_output = compact_output)
count +=1 #move to the next orf
#del orf_dictionary[contig]
#output_comp_annot_file1.close()
#output_comp_annot_file2.close()
# now deal with the rRNA sequences if there is rRNA stats file
if len(rRNA_16S_stats_files) > 0 and contig_lengths :
rRNA_16S_dictionary={}
for rRNA_16S_stats_file in rRNA_16S_stats_files:
process_rRNA_16S_stats(rRNA_16S_stats_file, rRNA_16S_dictionary)
rRNA_dictionary = {}
add_16S_genes(rRNA_16S_dictionary, rRNA_dictionary, contig_lengths)
write_16S_tRNA_gene_info(rRNA_dictionary, outputgff_file, '_rRNA')
# now deal with the tRNA sequences if there is tRNA stats file
if len(tRNA_stats_files) > 0 and contig_lengths:
tRNA_dictionary={}
for tRNA_stats_file in tRNA_stats_files:
process_tRNA_stats(tRNA_stats_file, tRNA_dictionary)
tRNA_gff_dictionary = {}
add_tRNA_genes(tRNA_dictionary, tRNA_gff_dictionary, contig_lengths)
write_16S_tRNA_gene_info(tRNA_gff_dictionary, outputgff_file, '_tRNA')
#print tRNA_dictionary
outputgff_file.close()
rename(output_gff_tmp, output_gff)
def process_product(product, database, similarity_threshold=0.9):
"""Returns the best set of products from the list of (*database*,
*product*) tuples *products*.
Each product in the set is first trimmed down, removing database-specific
information.
The set is then determined by first sorting the products by length
(ascending), and then, for each product, sequentially applying the longest
common substring algorithm to determine the similarity between the product
and already determined products. If this similarity is greater than the
specified *similarity_threshold*, the longer of the two products is chosen
to be a determined product.
"""
processed_product = ''
# print 'dbase', database
# COG
if database == 'cog':
results = re.search(r'Function: (.+?) #', product)
if results:
processed_product=results.group(1)
# KEGG: split and process
elif database == 'kegg':
kegg_products = re.split(r'\s*;\s+', product)
for kegg_product in kegg_products:
# Toss out organism:ID pairs, gene names, and KO IDs
kegg_product = re.sub(r'^lcl[|]', '', kegg_product)
kegg_product = re.sub(r'[a-z]{3}:\S+', '', kegg_product)
kegg_product = kegg_product.strip()
kegg_product = re.sub(r'(, \b[a-z]{3}[A-Z]?\b)+', '', kegg_product)
kegg_product = re.sub(r'^\b[a-z]{3}[A-Z]?\b', '', kegg_product)
# get KO number
kegg_product = re.sub(r'\bK\d{5}\b', '', kegg_product)
# Also toss out anything between square brackets
kegg_product = re.sub(r'\[.*\]', '', kegg_product)
if kegg_product.strip():
processed_product=kegg_product.strip()
# RefSeq: split and process
elif database == 'refseq':
for subproduct in product.split('; '):
subproduct = re.sub(r'[a-z]{2,}\|(.+?)\|\S*', '', subproduct)
subproduct = re.sub(r'\[.+?\]', '', subproduct)
if subproduct.strip():
processed_product=subproduct.strip()
# MetaCyc: split and process
elif database == 'metacyc':
# Pull out first name after the accession code:
product_name = product.split('#')[0].strip()
product_name = re.sub(r'^[^ ]* ', '', product_name)
product_name = re.sub(r' OS=.*', '', product_name)
if product_name:
processed_product=product_name
# Seed: split and process
elif database == 'seed':
for subproduct in product.split('; '):
#subproduct = re.sub(r'[a-z]{2,}\|(.+?)\|\S*', '', subproduct)
subproduct = re.sub(r'\[.+?\]', '', subproduct)
subproduct = re.sub(r'\(.+?\)', '', subproduct)
if subproduct.strip():
processed_product=subproduct.strip()
elif database == 'cazy':
for subproduct in product.split('; '):
#subproduct = re.sub(r'[a-z]{2,}\|(.+?)\|\S*', '', subproduct)
subproduct = re.sub(r'\[.+?\]', '', subproduct)
subproduct = re.sub(r'\(.+?\)', '', subproduct)
if subproduct.strip():
processed_product=subproduct.strip()
print processed_product
# MetaCyc: split and process
# Generic
else:
processed_product=strip_taxonomy(product)
words = [ x.strip() for x in processed_product.split() ]
filtered_words =[]
underscore_pattern = re.compile("_")
arrow_pattern = re.compile(">")
for word in words:
if not underscore_pattern.search(word) and not arrow_pattern.search(word):
filtered_words.append(word)
#processed_product = ' '.join(filtered_words)
# Chop out hypotheticals
processed_product = remove_repeats(filtered_words)
processed_product = re.sub(';','',processed_product)
# can actually be a proper annotation
# processed_product = re.sub(r'hypothetical protein','', processed_product)
return processed_product
def remove_repeats(filtered_words):
word_dict = {}
newlist = []
for word in filtered_words:
if not word in word_dict:
if not word in ['', 'is', 'have', 'has', 'will', 'can', 'should', 'in', 'at', 'upon', 'the', 'a', 'an', 'on', 'for', 'of', 'by', 'with' ,'and', '>' ]:
word_dict[word]=1
newlist.append(word)
return ' '.join(newlist)
class BlastOutputTsvParser(object):
def __init__(self, dbname, blastoutput):
self.dbname = dbname
self.blastoutput = blastoutput
self.i=1
self.data = {}
self.fieldmap={}
self.seq_beg_pattern = re.compile("#")
try:
self.blastoutputfile = open( blastoutput,'r')
self.lines=self.blastoutputfile.readlines()
self.blastoutputfile.close()
self.size = len(self.lines)
if not self.seq_beg_pattern.search(self.lines[0]) :
exit_process("First line must have field header names and begin with \"#\"")
header = self.lines[0].replace('#','',1)
fields = [ x.strip() for x in header.rstrip().split('\t')]
k = 0
for x in fields:
self.fieldmap[x] = k
k+=1
eprintf("\nProcessing database : %s\n", dbname)
except AttributeError:
eprintf("Cannot read the map file for database :%s\n", dbname)
exit_process()
def __iter__(self):
return self
count = 0
def next(self):
if self.i < self.size:
try:
fields = [ x.strip() for x in self.lines[self.i].split('\t')]
#print self.fieldmap['ec'], fields, self.i, self.blastoutput
self.data['query'] = ShortenORFId(fields[self.fieldmap['query']])
self.data['q_length'] = int(fields[self.fieldmap['q_length']])
self.data['bitscore'] = float(fields[self.fieldmap['bitscore']])
self.data['bsr'] = float(fields[self.fieldmap['bsr']])
self.data['expect'] = float(fields[self.fieldmap['expect']])
self.data['identity'] = float(fields[self.fieldmap['identity']])
self.data['ec'] = fields[self.fieldmap['ec']]
self.data['product'] = re.sub(r'=',' ',fields[self.fieldmap['product']])
self.i = self.i + 1
return self.data
except:
print self.lines[self.i]
print data
sys.exit(0)
return None
else:
raise StopIteration()
def isWithinCutoffs(data, cutoffs):
if data['q_length'] < cutoffs.min_length:
return False
if data['bitscore'] < cutoffs.min_score:
return False
if data['expect'] > cutoffs.max_evalue:
return False
if data['identity'] < cutoffs.min_identity:
return False
if data['bsr'] < cutoffs.min_bsr:
return False
return True
def word_information(string_of_words):
words = [ x.strip() for x in string_of_words.split() ]
information = 0
wordlist = {}
underscore_pattern = re.compile("_")
for word in words:
if not word in ['', 'is', 'have', 'has', 'will', 'can', 'should', 'in', 'at', 'upon', 'the', 'a', 'an', 'on', 'for', 'of', 'by', 'with' ,'and', '>', 'predicted', 'protein', 'conserved' ]:
if not underscore_pattern.search(word):
wordlist[word]=1
#print string_of_words
#print wordlist
#print len(wordlist)
return len(wordlist)
def compute_annotation_value(data):
score = 0;
if len(data['ec'] ) > 0:
score += 10
if not re.search(r'hypothetical protein', data['product']):
score += word_information(data['product'])
return score
# compute the refscores
def process_parsed_blastoutput(dbname, weight, blastoutput, cutoffs, annotation_results):
blastparser = BlastOutputTsvParser(dbname, blastoutput)
fields = ['q_length', 'bitscore', 'bsr', 'expect', 'aln_length', 'identity', 'ec' ]
if cutoffs.taxonomy:
fields.append('taxonomy')
fields.append('product')
annotation = {}
for data in blastparser:
#if count%10000==0:
# print count
if isWithinCutoffs(data, cutoffs) :
#print data['query'] + '\t' + str(data['q_length']) +'\t' + str(data['bitscore']) +'\t' + str(data['expect']) +'\t' + str(data['identity']) + '\t' + str(data['bsr']) + '\t' + data['ec'] + '\t' + data['product']
# if data['query'] =='NapDC_illum_asm_188606_0':
# print dbname
annotation['bsr'] = data['bsr']
annotation['ec'] = data['ec']
annotation['product'] = strip_taxonomy(process_product(data['product'], dbname) )
annotation['value'] = compute_annotation_value(annotation)*weight
# print annotation
if not data['query'] in annotation_results:
annotation_results[data['query']] = {'value':0}
if annotation_results[data['query']]['value'] <= annotation['value'] :
annotation_results[data['query']] = annotation.copy()
# add_refscore_to_file(blastoutput,refscore_file, allNames)
count = len(annotation_results.keys())
return count
def read_contig_lengths(contig_map_file, contig_lengths):
try:
mapfile = open(contig_map_file, 'r')
except IOError:
print "Cannot read file " + contig_map_file + " !"
return
mapfile_lines = mapfile.readlines()
mapfile.close()
for line in mapfile_lines:
line = line.strip()
fields = [ x.strip() for x in line.split('\t') ]
if len(fields) != 3:
contig_lengths = {}
return
contig_lengths[fields[0] ] = int(fields[2])
def getBlastFileNames(opts) :
database_names = []
parsed_blastouts = []
weight_dbs = []
dbnamePATT = re.compile(r'' + opts.blastdir + '*' + opts.sample_name + '*[.](.*)[.]' + opts.algorithm.upper() + 'out.parsed.txt')
blastOutNames = glob(opts.blastdir + '*' + opts.algorithm.upper() + 'out.parsed.txt')
for blastoutname in blastOutNames :
result = dbnamePATT.search(blastoutname)
if result:
dbname = result.group(1)
database_names.append(dbname)
parsed_blastouts.append(blastoutname)
weight_dbs.append(1)
return database_names, parsed_blastouts, weight_dbs
# the main function
def main(argv, errorlogger =None, runstatslogger = None):
global parser
(opts, args) = parser.parse_args(argv)
if not check_arguments(opts, args):
print usage
sys.exit(0)
results_dictionary={}
dbname_weight={}
contig_lengths = {}
read_contig_lengths(opts.contig_map_file, contig_lengths)
if opts.blastdir !=None and opts.sample_name != None:
try:
database_names, input_blastouts, weight_dbs = getBlastFileNames(opts)
except:
print traceback.print_exc(10)
pass
else:
database_names = opts.database_name
input_blastouts = opts.input_blastout
weight_dbs = opts.weight_db
priority = 6000
count_annotations = {}
for dbname, blastoutput, weight in zip(database_names, input_blastouts, weight_dbs):
results_dictionary[dbname]={}
dbname_weight[dbname] = weight
count = process_parsed_blastoutput( dbname, weight, blastoutput, opts, results_dictionary[dbname])
if runstatslogger!=None:
runstatslogger.write("%s\tProtein Annotations from %s\t%s\n" %( str(priority), dbname, str(count)))
count_annotations
priority += 1
for dbname in results_dictionary:
print dbname, len(results_dictionary[dbname].keys())
for seqname in results_dictionary[dbname]:
count_annotations[seqname] = True
count = len(count_annotations)
if runstatslogger!=None:
runstatslogger.write("%s\tTotal Protein Annotations\t%s\n" %( str(priority), str(count)))
#create the annotations from he results
create_annotation(dbname_weight, results_dictionary, opts.input_gff, opts.rRNA_16S, opts.tRNA, opts.output_gff, opts.output_comparative_annotation, contig_lengths, compact_output = opts.compact_output)
def MetaPathways_annotate_fast(argv, errorlogger = None, runstatslogger = None):
createParser()
errorlogger.write("#STEP\tANNOTATE_ORFS\n")
main(argv, errorlogger = errorlogger, runstatslogger = runstatslogger)
return (0,'')
# the main function of metapaths
if __name__ == "__main__":
createParser()
main(sys.argv[1:])
|
wholebiome/MetaPathways_Python_Koonkie.3.0
|
libs/python_scripts/MetaPathways_annotate_fast_threaded.py
|
Python
|
mit
| 36,220
|
# -*- coding: utf-8 -*-
import sys
from OpenSSL import crypto
def generate_pem_file(string, filename):
with open(filename, "w") as fl:
fl.write(string)
def extract_pem_certificate():
path_file = sys.argv[1]
password = sys.argv[2]
try:
certificate = open(path_file, "r").read()
except IOError:
print u"Não foi possível encontrar o arquivo."
return False
try:
cert = crypto.load_pkcs12(certificate, password)
except crypto.Error:
print u"Senha incorreta para o certificado."
return False
private_key = cert.get_privatekey()
x509 = cert.get_certificate()
cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, x509)
key_pem = crypto.dump_privatekey(crypto.FILETYPE_PEM, private_key)
generate_pem_file(cert_pem, "certificate.pem")
generate_pem_file(key_pem, "private_key.pem")
print "Certificado e chave privada extraídos com Sucesso."
return True
if __name__ == "__main__":
extract_pem_certificate()
|
adrianomargarin/extract-pems
|
extract.py
|
Python
|
gpl-3.0
| 1,032
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import KeywordPlanServiceTransport
from .grpc import KeywordPlanServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[KeywordPlanServiceTransport]]
_transport_registry["grpc"] = KeywordPlanServiceGrpcTransport
__all__ = (
"KeywordPlanServiceTransport",
"KeywordPlanServiceGrpcTransport",
)
|
googleads/google-ads-python
|
google/ads/googleads/v10/services/services/keyword_plan_service/transports/__init__.py
|
Python
|
apache-2.0
| 1,049
|
import urllib
import re
import bggBazaSQLite as baza
def WstawDoBazy(numer):
vNAZWA=""
stronaParti=urllib.urlopen('http://boardgamegeek.com/play/details/'+str(numer)).read()
plikParti=open('bggwwwParti','w')
plikParti.write(stronaParti)
plikParti.close()
plikParti=open('bggwwwParti')
imie=0
new=0
position=0
team=0
score=0
win=0
ranting=0
for linia in plikParti:
if ranting==2:
vRANTING=str(linia.replace(" ","").rstrip())
print vNAZWA,vIMIE,vUSER,vNEW,vPOSITION,vTEAM,vSCORE,vWIN,vRANTING
baza.WstawDoBazySQLiteJednaPartie(vNAZWA,numer,vIMIE,vUSER,vNEW,vPOSITION,vTEAM,vSCORE,vWIN,vRANTING)
vPOSITION=""
vTEAM=""
vSCORE=""
vWIN=""
vRANTING=""
vUSER=""
ranting=0
if ranting==1:
q = re.search('<td align=\'center\'>',linia)
if q:
ranting=2
if win==2:
if re.search('<img',linia):
vWIN =1
else:
vWIN=0
win=0
ranting=1
if win==1:
s = re.search('<td align=\'center\'>',linia)
if s:
win=2
if score==2:
vSCORE=int(linia.replace(" ","").rstrip())
score=0
win=1
if score==1:
r = re.search('<td align=\'center\'>',linia)
if r:
score=2
if team==2:
vTEAM=str(linia.replace(" ","").rstrip())
team=0
score=1
if team==1:
p = re.search('<td align=\'center\'>',linia)
if p:
team=2
if position==2:
vPOSITION=linia.replace(" ","").rstrip()
position=0
team=1
if position==1:
o = re.search('<td align=\'center\'>',linia)
if o:
position=2
if new==2:
if re.search('<img',linia):
vNEW = str(1)
else:
vNEW=str(0)
new=0
position=1
if imie==2:
a = re.search('>[a-zA-z0-9 ]*<',linia)
if a:
vUSER = a.group()[1:-1]
else:
vUSER = 'N/A'
new=1
imie=0
#wyluskanie imienia
if imie==1:
vIMIE= str(linia.replace(" ","").rstrip())
imie=2
if new==1:
n = re.search('<td align=\'center\'>',linia)
if n:
new=2
#imie1
m = re.search('<td align=\'left\'>',linia)
x= re.search('<title>[^|]*',linia)
if m:
imie=1
if x:
vNAZWA=str(x.group(0)[7:])
plikParti.close()
|
przemekwa/BoardGameUserGames
|
bggZapisPartiDoBazy.py
|
Python
|
mit
| 2,286
|
"""The main world definition"""
import common
import zone
import serialize
import geometry
import events
import actor
import profiler
class UnknownActor(Exception): """Could not find the actor"""
class DuplicateActor(Exception): """The actor was already in the world"""
class DuplicateZone(Exception): """The zone was already in the world"""
class World(common.Loggable, serialize.Serializable, common.EventAware):
"""The main world object
The :doc:`engine` will control main worlds. Each world has a number
of :doc:`zone` which contain :doc:`actor`.
"""
my_properties = (
serialize.S('name', '', 'the name of this world'),
serialize.L('zones', set(), 'the zones in this world'),
serialize.L('unzoned_actors', set(), 'the actors not in any zone in this world'),
)
def __init__(self, name):
"""Initialise the World"""
self.addLogger()
self.initEvents()
self.name = name
self.engine = None
self.zones = set()
self.unzoned_actors = set() # Actors get put here if then end up in no zone
self.event_handlers = {}
self.init()
### Serializing ###
def init(self):
"""Initialise from serialized state"""
self.addLogger()
self.initEvents()
self.log.info('Initializing world %s' % self.name)
super(World, self).__init__()
self.engine = None
#
# This list is used to order the processing of actors in rendering. The
# flag is used to tell us when we need to resort them
self._sorted_actors = []
self._actors_need_resorting = False
self._scheduled_deletions = set()
#
# Now process actors
for zone in self.zones:
zone.init()
for actor in self.unzoned_actors:
actor.init()
### Zones ###
def addZone(self, zone):
"""Add a zone to the world"""
if zone in self.zones:
raise DuplicateZone('The zone %s is already in the world' % zone)
else:
self.zones.add(zone)
self._actors_need_resorting = True
def clearZones(self):
"""Remove all the zones"""
self.zones = set()
### Main ###
def updateWorld(self, interval):
"""Update the objects in the world"""
for zone in self.zones:
if zone.active:
zone.updateZone(interval, self)
#
# Process any scheduled actor deletions
while self._scheduled_deletions:
try:
self.removeActor(self._scheduled_deletions.pop())
except UnknownActor:
# Ok, the actor must have been removed directly
pass
def setEngine(self, engine):
"""Set the engine that we are owned by"""
self.engine = engine
def getEngine(self):
"""Return the engine that we are owned by"""
return self.engine
def findActorsByTag(self, tag):
"""Return all the actors in all zones based on the tag"""
results = actor.ActorCollection()
for z in self.zones:
results.extend(z.findActorsByTag(tag))
return results
def findActorByName(self, name):
"""Return the actor with the give name in all zones"""
for z in self.zones:
try:
return z.findActorByName(name)
except zone.ActorNotFound:
pass
else:
raise zone.ActorNotFound('Unable to find actor named "%s" in any zone' % name)
def findActorsAt(self, x, y):
"""Return the actors at a certain location"""
actors = actor.ActorCollection()
test = geometry.Point(x, y)
for the_actor in self.getActors():
if test.isInside(the_actor):
actors.append(the_actor)
return actors
def getActors(self):
"""Return all the actors"""
actors = actor.ActorCollection(self.unzoned_actors)
for z in self.zones:
actors.extend(z.getActors())
return actors
def rezoneActors(self):
"""Move actors to the right zone based on their spatial location"""
#
# Start with a list of actors to find homes for based on any that
# were not in any zones at all
moved = self.unzoned_actors
self.unzoned_actors = set()
#
# Find all the actors that are no longer in the right zone
# and remove them from their current zone
for z in self.zones:
for actor in z.actors.copy():
if not actor.isOverlapping(z):
z.removeActor(actor)
moved.add(actor)
#
# Now find the place for the moved actors
for actor in moved:
self.addActor(actor)
def clearActors(self):
"""Clear all the actors"""
self.clearActorsExceptTags([])
def clearActorsExceptTags(self, tags):
"""Clear all actors except the ones with a tag in the list of tags"""
for actor in self.getActors():
if actor.tag not in tags:
try:
self.removeActor(actor)
except UnknownActor:
# Can be called if a composite actor removes their own children
pass
for actor in list(self.unzoned_actors):
if actor.tag not in tags:
self.unzoned_actors.remove(actor)
def clearActorsWithTags(self, tags):
"""Clear all actors with a tag in the list of tags"""
for actor in self.getActors():
if actor.tag in tags:
try:
self.removeActor(actor)
except UnknownActor:
# Can be called if a composite actor removes their own children
pass
for actor in list(self.unzoned_actors):
if actor.tag not in tags:
self.unzoned_actors.remove(actor)
def addActor(self, actor):
"""Add an actor to the world"""
#
self.log.debug('Adding %s to world %s' % (actor.getNiceName(), self.name))
#
# Make sure the actor isn't already here
if self.hasActor(actor):
raise DuplicateActor('The actor %s is already in the world' % actor.getNiceName())
#
# Try to put the actor in the right zone
for z in self.zones:
if z.wouldContain(actor):
z.addActor(actor)
break
else:
# The actor is not in any zones, store for later
self.unzoned_actors.add(actor)
#
# Tell the actor about it
actor.addedToWorld(self)
#
self._actors_need_resorting = True
def removeActor(self, actor):
"""Remove the actor from the world"""
self.log.debug('Removing "%s" actor (%s)' % (actor.tag, actor.getNiceName()))
#
self._actors_need_resorting = True
#
# Try to remove from zones
for z in self.zones:
if z.hasActor(actor):
z.removeActor(actor)
break
else:
#
# We didn't find it in the zone - maybe in the unzoned
if actor in self.unzoned_actors:
self.unzoned_actors.remove(actor)
else:
raise UnknownActor('The actor %s was not found in the world' % actor)
#
# Tell the actor about it
actor.removedFromWorld(self)
def scheduleActorRemoval(self, actor):
"""Remove an actor at the end of the next update for the world
This method can be used to safely remove an actor from the world
during the execution of the world update. It can sometimes be
useful to do this when inside logic that is iterating over actors
or inside the updateWorld event loop.
"""
self._scheduled_deletions.add(actor)
def hasActor(self, actor):
"""Return True if this actor is in the world"""
#
# Try to remove from zones
for z in self.zones:
if z.hasActor(actor):
return True
#
# We didn't find it in the zone - maybe in the unzoned
return actor in self.unzoned_actors
def renderTo(self, renderer, interval):
"""Render all of our actors in active zones"""
#
# Watch out in case we need to reorder our actors
if self._actors_need_resorting:
self.log.debug('Sorting actors now')
self._sorted_actors = renderer.orderActors(self.getActors())
self._actors_need_resorting = False
#
camera = renderer.getCamera()
self.processEvent((events.E_BEFORE_RENDER, self))
#
# Render all of the actors
for actor in self._sorted_actors:
if actor.active and actor.visible:
profiler.PROFILER.start(actor, 'renderActor')
try:
actor.renderTo(renderer, interval)
except Exception, err:
self.log.error('Failed rendering "%s" actor "%s": %s' % (actor.tag, actor, err))
raise
profiler.PROFILER.end()
#
self.processEvent((events.E_AFTER_RENDER, self))
def setZoom(self, zoom, x, y):
"""Set the visual zoom on this world to zoom centered on x, y"""
for actor in self.getActors():
actor.setZoom(zoom)
### Events ###
def processEvents(self, events):
"""Handle the events"""
inhibited = set()
for (event, obj), actor in events:
if actor.active and not event in inhibited:
# Process the event
new_inhibits = actor.processEvent((event, obj))
# Record if we need to inhibit further events of a certain type
if new_inhibits:
inhibited.update(new_inhibits)
def activateWorld(self):
"""Called when the world is set as the current world"""
self.processEvent((events.E_ACTIVATE_WORLD, self))
def deactivateWorld(self):
"""Called when the world is deactivated"""
self.processEvent((events.E_DEACTIVATE_WORLD, self))
### Physics ###
def setPhysicsStepsize(self, interval):
"""Set the maximum step size for physics calculations"""
for z in self.zones:
z.setPhysicsStepsize(interval)
def setGlobalForce(self, force):
"""Set the global force for physics"""
for z in self.zones:
z.setGlobalForce(force)
def sleepPhysicsForActors(self, actors):
"""Tell the actors to go to sleep from a physics perspective
The actors will still be visible and will still be updated but they
will not update their physics. Useful for optimising when an actor
does not need to interact with the physics simulation for a while.
If an actor is unzoned then this will have no impact on them
"""
for actor in actors:
for z in self.zones:
if z.hasActor(actor):
z.sleepActor(actor)
def wakePhysicsForActors(self, actors):
"""Tell the actors to go to wake up from a physics perspective
Actors that were put to sleep (via sleepPhysicsForActors) will be woken
up and take part in the physics simulation again.
"""
for actor in actors:
for z in self.zones:
if z.hasActor(actor):
z.wakeActor(actor)
|
smmosquera/serge
|
world.py
|
Python
|
lgpl-3.0
| 11,910
|
"""
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
jereze/scikit-learn
|
sklearn/__init__.py
|
Python
|
bsd-3-clause
| 3,038
|
#!/usr/bin/env python3
import pygame
import sys
import time
import os
from pygame.locals import *
from .button import Button
from .pygame_textinput import TextInput
from .globals import (screen_width, screen_height, FPS, fps_clock,
fontPath, assetsPath)
from .tool import (swap_c_is_chosing_color, swap_t_is_chosing_color,
game_quit, finish_balise, done_balise, undo_balise, add_balise,
add_more_balise, blit_on, undo_more_balise)
class MenuInit:
"""Menu for the init part representation.
Contains the information related to menu for the initialization of the robot.
Attributes:
surface: Surface on which the menu is blitted.
title: Title of the Menu.
text_input: Text input object in which the user can write coordinates.
color_input: Text input object in which the user can write colors.
buttons: List of buttons object.
"""
# surface should be a surface of the size of 350x950
def __init__(self, surface_e, screen):
"""Initialiaze the menu with a surface and the main screen pygame object"""
self.surface = surface_e
self.surface.fill((255, 255, 255))
self.title = "Initialization"
self.text_input = TextInput((1001, 140), 7)
self.color_input = TextInput((1161, 140), 7)
self.buttons = []
self.init_buttons()
self.init_menu(screen)
self.time_passed = 0
self.is_color_chosing = False
def draw_on(self, screen):
"""Draw the menu on the screen"""
# Render background
screen.blit(self.surface, (950, 0))
# Render buttons
for b in self.buttons:
b.draw(screen)
screen.blit(self.text_input.get_surface(), self.text_input.init_pos)
screen.blit(self.color_input.get_surface(), self.color_input.init_pos)
def init_menu(self, screen):
"""Set menu graphics at initialization."""
# Render texture background
brickTexture = os.path.join(assetsPath, 'textures/brick.jpg')
blit_on(self.surface, brickTexture)
# Render title
myFontPath = os.path.join(fontPath, 'Capture_it.ttf')
myfont = pygame.font.Font(myFontPath, 20)
title_surface = myfont.render(self.title, False, (255, 255, 255))
self.surface.blit(title_surface, (8, 0))
# Render instructions
myfont = pygame.font.SysFont("comicsansms", 15)
msg_surface = myfont.render("Enter coordinate of the balise in the form 0;899",
False, (0, 255, 55))
self.surface.blit(msg_surface, (8, 35))
msg_surface = myfont.render("min: 0 | max: 899 | unit: cm/px",
False, (0, 255, 55))
self.surface.blit(msg_surface, (8, 48))
msg_surface = myfont.render("Color: red, blue, green, yellow, purple",
False, (255, 155, 255))
self.surface.blit(msg_surface, (8, 65))
msg_surface = myfont.render(" orange, white",
False, (255, 155, 255))
self.surface.blit(msg_surface, (8, 79))
msg_surface = myfont.render("coordinate:",
False, (255, 255, 255))
self.surface.blit(msg_surface, (55, 110))
msg_surface = myfont.render("color:",
False, (255, 255, 255))
self.surface.blit(msg_surface, (210, 110))
# Render buttons
for b in self.buttons:
b.draw(screen)
def update(self, screen, game_engine, game_map, events):
"""Update core of the menu.
Args:
self: The current menu object.
game_engine: game Object that contains everything related to the core application.
game_map: Map object that represents the map part of the application.
events: List of externals events
"""
# update text
if self.is_color_chosing:
self.color_input.update(events)
else:
self.text_input.update(events)
self.time_passed += fps_clock.tick(FPS)
# update buttons
if self.time_passed >= 150:
for b in self.buttons:
b.update(screen, self, game_engine, game_map)
fps_clock.tick(30)
def init_buttons(self):
"""Initialize the list of the buttons."""
text_input_button = Button(1000, 140, 95, 30,
None,
None,
swap_t_is_chosing_color)
color_input_button = Button(1160, 140, 95, 30,
None,
None,
swap_c_is_chosing_color)
buttonQuitA = os.path.join(assetsPath, 'buttons/button_quit_a.png')
buttonQuit = os.path.join(assetsPath, 'buttons/button_quit.png')
button_quit = Button(1000, 800, 232, 93, buttonQuitA, buttonQuit,
game_quit)
buttonDoneA = os.path.join(assetsPath, 'buttons/button_done_a.png')
buttonDone = os.path.join(assetsPath, 'buttons/button_done.png')
button_done = Button(1000, 700, 232, 93,
buttonDoneA, buttonDone, done_balise)
buttonAddA = os.path.join(assetsPath, 'buttons/button_add_balise_a.png')
buttonAdd = os.path.join(assetsPath, 'buttons/button_add_balise.png')
button_add_balise = Button(1000, 200, 232, 93,
buttonAddA, buttonAdd, add_balise)
buttonUndoA = os.path.join(assetsPath,
'buttons/button_undo_balise_a.png')
buttonUndo = os.path.join(assetsPath, 'buttons/button_undo_balise.png')
button_undo = Button(1000, 300, 232, 93,
buttonUndoA, buttonUndo, undo_balise)
self.buttons.append(text_input_button)
self.buttons.append(color_input_button)
self.buttons.append(button_quit)
self.buttons.append(button_undo)
self.buttons.append(button_add_balise)
self.buttons.append(button_done)
def button_afterdone(self):
"""Change button configuration when the user has clicked on done button."""
self.buttons.pop()
self.buttons.pop()
self.buttons.pop()
buttonUndoA = os.path.join(assetsPath,
'buttons/button_undo_balise_a.png')
buttonUndo = os.path.join(assetsPath, 'buttons/button_undo_balise.png')
button_undo = Button(1000, 300, 232, 93,
buttonUndoA, buttonUndo, undo_more_balise)
buttonFinishA = os.path.join(assetsPath, 'buttons/button_finish_a.png')
buttonFinish = os.path.join(assetsPath, 'buttons/button_finish.png')
button_finish = Button(1000, 700, 232, 93,
buttonFinishA, buttonFinish, finish_balise)
buttonAddA = os.path.join(assetsPath, 'buttons/button_add_more_a.png')
buttonAdd = os.path.join(assetsPath, 'buttons/button_add_more.png')
button_add_more_balise = Button(1000, 200, 232, 93,
buttonAddA, buttonAdd, add_more_balise)
self.buttons.append(button_undo)
self.buttons.append(button_finish)
self.buttons.append(button_add_more_balise)
|
pleeplee-robot/interface
|
pleepleeapp/menu_init.py
|
Python
|
mit
| 7,422
|
'''test pysftp.Connection.chdir - uses py.test'''
# pylint: disable = W0142
# pylint: disable=E1101
from common import *
def test_chdir_bad_dir(psftp):
'''try to chdir() to a non-existing remote dir'''
with pytest.raises(IOError):
psftp.chdir('i-dont-exist')
|
Clean-Cole/pysftp
|
tests/test_chdir.py
|
Python
|
bsd-3-clause
| 278
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import job_search_histogram_search
PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"]
def test_search_jobs_histogram(tenant):
query = "count(base_compensation, [bucket(12, 20)])"
jobs = job_search_histogram_search.search_jobs(PROJECT_ID, tenant, query)
for job in jobs:
assert "projects/" in job
|
googleapis/python-talent
|
samples/snippets/job_search_histogram_search_test.py
|
Python
|
apache-2.0
| 903
|
# Copyright 2010-2012 RethinkDB, all rights reserved.
import query
import query_language_pb2 as p
import types
###################
# PRETTY PRINTING #
###################
# A note about pretty printing: The result of a pretty-print shouldn't contain
# newlines or tab characters. It may contain spaces.
PRETTY_PRINT_EXPR_WRAPPED = "wrapped"
PRETTY_PRINT_EXPR_UNWRAPPED = "unwrapped"
class PrettyPrinter(object):
def expr_wrapped(self, expr, backtrace_steps):
raise NotImplementedError()
def expr_unwrapped(self, expr, backtrace_steps):
raise NotImplementedError()
def write_query(self, query, backtrace_steps):
raise NotImplementedError()
def simple_string(self, string, backtrace_steps):
raise NotImplementedError()
class ReprPrettyPrinter(PrettyPrinter):
# This implementation has a lot of assertions so that it validates the
# implementations of `pretty_print()` on the various objects.
def expr_wrapped(self, expr, backtrace_steps):
assert isinstance(expr, query.ReadQuery)
assert isinstance(backtrace_steps, list)
string, wrapped = expr._inner.pretty_print(self)
assert "\n" not in string
if wrapped == PRETTY_PRINT_EXPR_UNWRAPPED:
string = "expr(%s)" % string
return string
def expr_unwrapped(self, expr, backtrace_steps):
assert isinstance(expr, query.ReadQuery)
assert isinstance(backtrace_steps, list)
string = expr._inner.pretty_print(self)[0]
assert "\n" not in string
return string
def write_query(self, wq, backtrace_steps):
assert isinstance(wq, query.WriteQuery)
assert isinstance(backtrace_steps, list)
string = wq._inner.pretty_print(self)
assert "\n" not in string
return string
def meta_query(self, mq, backtrace_steps):
assert isinstance(mq, query.MetaQuery)
assert isinstance(backtrace_steps, list)
string = mq._inner.pretty_print(self)
assert "\n" not in string
return string
def simple_string(self, string, backtrace_steps):
assert "\n" not in string
return string
#####################################
# DATABASE AND TABLE ADMINISTRATION #
#####################################
class MetaQueryInner(object):
def _write_meta_query(self, parent, opts):
raise NotImplementedError()
def pretty_print(self, printer):
raise NotImplementedError()
class DBCreate(MetaQueryInner):
def __init__(self, db_name):
assert isinstance(db_name, types.StringTypes)
self.db_name = db_name
def _write_meta_query(self, parent, opts):
parent.type = p.MetaQuery.CREATE_DB
parent.db_name = self.db_name
def pretty_print(self, printer):
return "db_create(%r)" % self.db_name
class DBDrop(MetaQueryInner):
def __init__(self, db_name):
assert isinstance(db_name, types.StringTypes)
self.db_name = db_name
def _write_meta_query(self, parent, opts):
parent.type = p.MetaQuery.DROP_DB
parent.db_name = self.db_name
def pretty_print(self, printer):
return "db_drop(%r)" % self.db_name
class DBList(MetaQueryInner):
def _write_meta_query(self, parent, opts):
parent.type = p.MetaQuery.LIST_DBS
def pretty_print(self, printer):
return "db_list()"
class TableCreate(MetaQueryInner):
def __init__(self, table_name, db_expr, primary_key, primary_datacenter, cache_size):
assert isinstance(table_name, types.StringTypes)
assert isinstance(db_expr, query.Database)
assert (not primary_key) or isinstance(primary_key, types.StringTypes)
assert (not primary_datacenter) or isinstance(primary_datacenter, types.StringTypes)
assert (not cache_size) or isinstance(cache_size, int)
self.table_name = table_name
self.db_expr = db_expr
self.primary_key = primary_key
self.primary_datacenter = primary_datacenter
self.cache_size = cache_size
def _write_meta_query(self, parent, opts):
parent.type = p.MetaQuery.CREATE_TABLE
parent.create_table.table_ref.db_name = self.db_expr.db_name
parent.create_table.table_ref.table_name = self.table_name
if self.primary_key:
parent.create_table.primary_key = self.primary_key
if self.primary_datacenter:
parent.create_table.datacenter = self.primary_datacenter
if self.cache_size:
parent.create_table.cache_size = self.cache_size
def pretty_print(self, printer):
return "db(%s).table_create(%r, primary_key=%r, primary_datacenter=%s)" % (
printer.simple_string(repr(self.db_expr.db_name), ["table_ref", "db_name"]),
self.table_name,
self.primary_key,
printer.simple_string(repr(self.primary_datacenter), ["datacenter"]))
class TableDrop(MetaQueryInner):
def __init__(self, table_name, db_expr):
assert isinstance(table_name, types.StringTypes)
assert isinstance(db_expr, query.Database)
self.table_name = table_name
self.db_expr = db_expr
def _write_meta_query(self, parent, opts):
parent.type = p.MetaQuery.DROP_TABLE
parent.drop_table.db_name = self.db_expr.db_name
parent.drop_table.table_name = self.table_name
def pretty_print(self, printer):
return "db(%s).table_drop(%s)" % (
printer.simple_string(repr(self.db_expr.db_name), ["db_name"]),
printer.simple_string(repr(self.table_name), ["table_name"])
)
class TableList(MetaQueryInner):
def __init__(self, db_expr):
assert isinstance(db_expr, query.Database)
self.db_expr = db_expr
def _write_meta_query(self, parent, opts):
parent.type = p.MetaQuery.LIST_TABLES
parent.db_name = self.db_expr.db_name
def pretty_print(self, printer):
return "db(%s).table_list()" % (
printer.simple_string(repr(self.db_expr.db_name), ["db_name"])
)
#################
# WRITE QUERIES #
#################
class WriteQueryInner(object):
def _write_write_query(self, parent, opts):
raise NotImplementedError()
def pretty_print(self, printer):
raise NotImplementedError()
class Insert(WriteQueryInner):
def __init__(self, table, entries, upsert):
self.table = table
self.upsert = upsert
if isinstance(entries, query.StreamExpression):
self.entries = [entries]
else:
self.entries = [query.expr(e) for e in entries]
def _write_write_query(self, parent, opts):
parent.type = p.WriteQuery.INSERT
parent.insert.overwrite = self.upsert
self.table._write_ref_ast(parent.insert.table_ref, opts)
for entry in self.entries:
entry._inner._write_ast(parent.insert.terms.add(), opts)
def pretty_print(self, printer):
return "%s.insert([%s])" % (
printer.expr_wrapped(self.table, ["table_ref"]),
", ".join(printer.expr_unwrapped(e, ["term:%d" % i]) for i, e in enumerate(self.entries)))
class Delete(WriteQueryInner):
def __init__(self, parent_view):
self.parent_view = parent_view
def _write_write_query(self, parent, opts):
parent.type = p.WriteQuery.DELETE
self.parent_view._inner._write_ast(parent.delete.view, opts)
def pretty_print(self, printer):
return "%s.delete()" % printer.expr_wrapped(self.parent_view, ["view"])
class Update(WriteQueryInner):
def __init__(self, parent_view, mapping, allow_nonatomic):
self.parent_view = parent_view
self.mapping = mapping
self.allow_nonatomic = allow_nonatomic
def _write_write_query(self, parent, opts):
parent.type = p.WriteQuery.UPDATE
parent.atomic = not self.allow_nonatomic
self.parent_view._inner._write_ast(parent.update.view, opts)
self.mapping.write_mapping(parent.update.mapping, opts)
def pretty_print(self, printer):
return "%s.update(%s)" % (
printer.expr_wrapped(self.parent_view, ["view"]),
self.mapping._pretty_print(printer, ["modify_map"]))
class Mutate(WriteQueryInner):
def __init__(self, parent_view, mapping, allow_nonatomic):
self.parent_view = parent_view
self.mapping = mapping
self.allow_nonatomic = allow_nonatomic
def _write_write_query(self, parent, opts):
parent.type = p.WriteQuery.MUTATE
parent.atomic = not self.allow_nonatomic
self.parent_view._inner._write_ast(parent.mutate.view, opts)
self.mapping.write_mapping(parent.mutate.mapping, opts)
def pretty_print(self, printer):
return "%s.replace(%s)" % (
printer.expr_wrapped(self.parent_view, ["view"]),
self.mapping._pretty_print(printer, ["modify_map"]))
class PointDelete(WriteQueryInner):
def __init__(self, parent_view):
self.parent_view = parent_view
def _write_write_query(self, parent, opts):
parent.type = p.WriteQuery.POINTDELETE
self.parent_view._inner._write_point_ast(parent.point_delete, opts)
def pretty_print(self, printer):
return "%s.get(%s, attr_name='%s').delete()" % (
printer.expr_wrapped(self.parent_view._inner.table, ["view"]),
printer.simple_string(self.parent_view._inner.attr_name, ["keyname"]),
printer.expr_unwrapped(self.parent_view._inner.key, ["key"]))
class PointUpdate(WriteQueryInner):
def __init__(self, parent_view, mapping, allow_nonatomic):
self.parent_view = parent_view
self.mapping = mapping
self.allow_nonatomic = allow_nonatomic
def _write_write_query(self, parent, opts):
parent.type = p.WriteQuery.POINTUPDATE
parent.atomic = not self.allow_nonatomic
self.mapping.write_mapping(parent.point_update.mapping, opts)
self.parent_view._inner._write_point_ast(parent.point_update, opts)
def pretty_print(self, printer):
return "%s.get(%s, attr_name='%s').update(%s)" % (
printer.expr_wrapped(self.parent_view._inner.table, ["view"]),
printer.expr_unwrapped(self.parent_view._inner.key, ["key"]),
printer.simple_string(self.parent_view._inner.attr_name, ["keyname"]),
self.mapping._pretty_print(printer, ["point_map"]))
class PointMutate(WriteQueryInner):
def __init__(self, parent_view, mapping, allow_nonatomic):
self.parent_view = parent_view
self.mapping = mapping
self.allow_nonatomic = allow_nonatomic
def _write_write_query(self, parent, opts):
parent.type = p.WriteQuery.POINTMUTATE
parent.atomic = not self.allow_nonatomic
self.mapping.write_mapping(parent.point_mutate.mapping, opts)
self.parent_view._inner._write_point_ast(parent.point_mutate, opts)
def pretty_print(self, printer):
return "%s.get(%s, attr_name='%s').replace(%s)" % (
printer.expr_wrapped(self.parent_view._inner.table, ["view"]),
printer.expr_unwrapped(self.parent_view._inner.key, ["key"]),
printer.simple_string(self.parent_view._inner.attr_name, ["keyname"]),
self.mapping._pretty_print(printer, ["point_map"]))
################
# READ QUERIES #
################
class ExpressionInner(object):
def _write_ast(self, parent, opts):
raise NotImplementedError()
def _write_call(self, parent, builtin, opts, *args):
parent.type = p.Term.CALL
parent.call.builtin.type = builtin
for arg in args:
arg._inner._write_ast(parent.call.args.add(), opts)
return parent.call.builtin
def pretty_print(self, pp):
raise NotImplementedError()
class LiteralNull(ExpressionInner):
def _write_ast(self, parent, opts):
parent.type = p.Term.JSON_NULL
def pretty_print(self, printer):
return ("None", PRETTY_PRINT_EXPR_UNWRAPPED)
class LiteralBool(ExpressionInner):
def __init__(self, value):
self.value = value
def _write_ast(self, parent, opts):
parent.type = p.Term.BOOL
parent.valuebool = self.value
def pretty_print(self, printer):
return (repr(self.value), PRETTY_PRINT_EXPR_UNWRAPPED)
class LiteralNumber(ExpressionInner):
def __init__(self, value):
self.value = value
def _write_ast(self, parent, opts):
parent.type = p.Term.NUMBER
parent.number = self.value
def pretty_print(self, printer):
return (repr(self.value), PRETTY_PRINT_EXPR_UNWRAPPED)
class LiteralString(ExpressionInner):
def __init__(self, value):
self.value = value
def _write_ast(self, parent, opts):
parent.type = p.Term.STRING
parent.valuestring = self.value
def pretty_print(self, printer):
return (repr(self.value), PRETTY_PRINT_EXPR_UNWRAPPED)
class LiteralArray(ExpressionInner):
def __init__(self, value):
self.value = [query.expr(e) for e in value]
def _write_ast(self, parent, opts):
parent.type = p.Term.ARRAY
for e in self.value:
e._inner._write_ast(parent.array.add(), opts)
def pretty_print(self, printer):
return ("[" + ", ".join(printer.expr_unwrapped(e, ["elem:%d" % i]) for i, e in enumerate(self.value)) + "]", PRETTY_PRINT_EXPR_UNWRAPPED)
class LiteralObject(ExpressionInner):
def __init__(self, value):
for k, v in value.iteritems():
assert isinstance(k, types.StringTypes)
self.value = dict((k, query.expr(v)) for k, v in value.iteritems())
def _write_ast(self, parent, opts):
parent.type = p.Term.OBJECT
for k, v in self.value.iteritems():
pair = parent.object.add()
pair.var = k
v._inner._write_ast(pair.term, opts)
def pretty_print(self, printer):
return ("{" + ", ".join(repr(k) + ": " + printer.expr_unwrapped(v, ["key:%s" % k]) for k, v in self.value.iteritems()) + "}", PRETTY_PRINT_EXPR_UNWRAPPED)
class RdbError(ExpressionInner):
def __init__(self, msg):
self.msg = msg;
def _write_ast(self, parent, opts):
parent.type = p.Term.ERROR
parent.error = self.msg;
def pretty_print(self, printer):
return ("error('"+self.msg+"')", PRETTY_PRINT_EXPR_WRAPPED)
class Javascript(ExpressionInner):
def __init__(self, body):
self.body = body
def _write_ast(self, parent, opts):
parent.type = p.Term.JAVASCRIPT
parent.javascript = self.body
def pretty_print(self, printer):
return ("js(body=%r)" % self.body, PRETTY_PRINT_EXPR_WRAPPED)
class ToArray(ExpressionInner):
def __init__(self, stream):
self.stream = stream
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.STREAMTOARRAY, opts, self.stream)
def pretty_print(self, printer):
return ("%s.stream_to_array()" % printer.expr_wrapped(self.stream, ["arg:0"]), PRETTY_PRINT_EXPR_WRAPPED)
class Builtin(ExpressionInner):
# The subclass of `Builtin` is obligated to set the following attributes:
# `builtin` - the protocol buffer enumeration value for the builtin
# `format_string` - string with one `%s` per argument, for pretty-printing
# `arg_wrapped_flags` - array of `PRETTY_PRINT_EXPR_WRAPPED` or
# `PRETTY_PRINT_EXPR_UNWRAPPED`, one per argument. Determines whether
# arguments will be pretty printed with `expr_wrapped()` or
# `expr_unwrapped()`.
# `wrapped_flag` - either `PRETTY_PRINT_EXPR_WRAPPED` or
# `PRETTY_PRINT_EXPR_UNWRAPPED`, indicating whether the expression as
# a whole is wrapped or not.
def __init__(self, *args):
self.args = [query.expr(arg) for arg in args]
assert len(self.args) == len(self.arg_wrapped_flags)
def _write_ast(self, parent, opts):
self._write_call(parent, self.builtin, opts, *self.args)
def pretty_print(self, printer):
printed_args = []
assert len(self.args) == len(self.arg_wrapped_flags), "bad format for %r" % type(self)
for i, (arg, wrapped) in enumerate(zip(self.args, self.arg_wrapped_flags)):
if wrapped == PRETTY_PRINT_EXPR_WRAPPED:
printed_args.append(printer.expr_wrapped(arg, ["arg:%d" % i]))
elif wrapped == PRETTY_PRINT_EXPR_UNWRAPPED:
printed_args.append(printer.expr_unwrapped(arg, ["arg:%d" % i]))
else:
raise ValueError("bad format for `arg_wrapped_flags`")
return (self.format_string % tuple(printed_args), self.wrapped_flag)
class Add(Builtin):
builtin = p.Builtin.ADD
format_string = "(%s + %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class Sub(Builtin):
builtin = p.Builtin.SUBTRACT
format_string = "(%s - %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class Negate(Builtin):
builtin = p.Builtin.SUBTRACT
format_string = "-%s"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class Mul(Builtin):
builtin = p.Builtin.MULTIPLY
format_string = "(%s * %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class Div(Builtin):
builtin = p.Builtin.DIVIDE
format_string = "(%s / %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class Mod(Builtin):
builtin = p.Builtin.MODULO
format_string = "(%s %% %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class Any(Builtin):
builtin = p.Builtin.ANY
format_string = "(%s | %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class Not(Builtin):
builtin = p.Builtin.NOT
format_string = "(~%s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class Extend(Builtin):
builtin = p.Builtin.MAPMERGE
format_string = "%s.extend(%s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_UNWRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class Append(Builtin):
builtin = p.Builtin.ARRAYAPPEND
format_string = "%s.append(%s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_UNWRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class Comparison(Builtin):
def _write_ast(self, parent, opts):
builtin = self._write_call(parent, p.Builtin.COMPARE, opts, *self.args)
builtin.comparison = self.comparison
class CompareLT(Comparison):
comparison = p.Builtin.LT
format_string = "(%s < %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class CompareLE(Comparison):
comparison = p.Builtin.LE
format_string = "(%s <= %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class CompareEQ(Comparison):
comparison = p.Builtin.EQ
format_string = "(%s == %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class CompareNE(Comparison):
comparison = p.Builtin.NE
format_string = "(%s != %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class CompareGT(Comparison):
comparison = p.Builtin.GT
format_string = "(%s > %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
class CompareGE(Comparison):
comparison = p.Builtin.GE
format_string = "(%s >= %s)"
arg_wrapped_flags = [PRETTY_PRINT_EXPR_WRAPPED, PRETTY_PRINT_EXPR_WRAPPED]
wrapped_flag = PRETTY_PRINT_EXPR_WRAPPED
# `All` is not a subclass of `Builtin` because it needs to work with an
# arbitrary number of arguments to support the syntactic sugar for `e.filter()`.
class All(ExpressionInner):
def __init__(self, *args):
self.args = [query.expr(a) for a in args]
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.ALL, opts, *self.args)
def pretty_print(self, printer):
return ("(" + " & ".join(printer.expr_wrapped(a, ["arg:%d" % i]) for i, a in enumerate(self.args)) + ")",
PRETTY_PRINT_EXPR_WRAPPED)
class Has(ExpressionInner):
def __init__(self, parent, key):
self.parent = query.expr(parent)
self.key = key
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.HASATTR, opts, self.parent)
parent.call.builtin.attr = self.key
def pretty_print(self, printer):
return ("%s.contains(%r)" % (printer.expr_wrapped(self.parent, ["arg:0"]), self.key), PRETTY_PRINT_EXPR_WRAPPED)
class Length(ExpressionInner):
def __init__(self, seq):
self.seq = seq
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.LENGTH, opts, self.seq)
def pretty_print(self, printer):
return ("%s.length()" % printer.expr_wrapped(self.seq, ["arg:0"]), PRETTY_PRINT_EXPR_WRAPPED)
class Attr(ExpressionInner):
def __init__(self, parent, key):
self.parent = query.expr(parent)
self.key = key
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.GETATTR, opts, self.parent)
parent.call.builtin.attr = self.key
def pretty_print(self, printer):
return ("%s[%s]" % (
printer.expr_wrapped(self.parent, ["arg:0"]),
printer.simple_string(repr(self.key), ["attr"])),
PRETTY_PRINT_EXPR_WRAPPED)
class GetAttrs(ExpressionInner):
def __init__(self, parent, attrs):
self.parent = parent
self.attrs = attrs
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.PICKATTRS, opts, self.parent)
parent.call.builtin.attrs.extend(self.attrs)
def pretty_print(self, printer):
return ("%s.pick(%s)" % (
printer.expr_wrapped(self.parent, ["arg:0"]),
printer.simple_string(', '.join(self.attrs), ["attrs"])),
PRETTY_PRINT_EXPR_WRAPPED)
class UnGetAttrs(ExpressionInner):
def __init__(self, parent, attrs):
self.parent = parent
self.attrs = attrs
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.WITHOUT, opts, self.parent)
parent.call.builtin.attrs.extend(self.attrs)
def pretty_print(self, printer):
return ("%s.unpick(%s)" % (
printer.expr_wrapped(self.parent, ["arg:0"]),
printer.simple_string(', '.join(self.attrs), ["attrs"])),
PRETTY_PRINT_EXPR_WRAPPED)
class ImplicitAttr(ExpressionInner):
def __init__(self, attr):
self.attr = attr
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.IMPLICIT_GETATTR, opts)
parent.call.builtin.attr = self.attr
def pretty_print(self, printer):
return ("r[%s]" % printer.simple_string(repr(self.attr), ["attr"]), PRETTY_PRINT_EXPR_WRAPPED)
class ImplicitVar(ExpressionInner):
def _write_ast(self, parent, opts):
parent.type = p.Term.IMPLICIT_VAR
def pretty_print(self, printer):
return ("r['@']", PRETTY_PRINT_EXPR_WRAPPED)
class ToStream(ExpressionInner):
def __init__(self, array):
self.array = query.expr(array)
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.ARRAYTOSTREAM, opts, self.array)
def pretty_print(self, printer):
return ("%s.array_to_stream()" % printer.expr_wrapped(self.array, ["arg:0"]), PRETTY_PRINT_EXPR_WRAPPED)
class Nth(ExpressionInner):
def __init__(self, stream, index):
self.stream = stream
self.index = query.expr(index)
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.NTH, opts, self.stream, self.index)
def pretty_print(self, printer):
return ("%s[%s]" % (printer.expr_wrapped(self.stream, ["arg:0"]), printer.expr_unwrapped(self.index, ["arg:1"])), PRETTY_PRINT_EXPR_WRAPPED)
class Slice(ExpressionInner):
def __init__(self, parent, start, stop):
self.parent = parent
self.start = query.expr(start)
self.stop = query.expr(stop)
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.SLICE, opts, self.parent, self.start, self.stop)
def pretty_print(self, printer):
return ("%s[%s:%s]" % (
printer.expr_wrapped(self.parent, ["arg:0"]),
printer.expr_unwrapped(self.start, ["arg:1"]),
printer.expr_unwrapped(self.stop, ["arg:2"])),
PRETTY_PRINT_EXPR_WRAPPED)
class Skip(ExpressionInner):
def __init__(self, parent, offset):
self.parent = parent
self.offset = query.expr(offset)
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.SKIP, opts, self.parent, self.offset)
def pretty_print(self, printer):
return ("%s[%s:]" % (
printer.expr_wrapped(self.parent, ["arg:0"]),
printer.expr_unwrapped(self.offset, ["arg:1"])),
PRETTY_PRINT_EXPR_WRAPPED)
class Filter(ExpressionInner):
def __init__(self, parent, selector):
self.parent = parent
self.selector = selector
def _write_ast(self, parent, opts):
builtin = self._write_call(parent, p.Builtin.FILTER, opts, self.parent)
self.selector.write_mapping(builtin.filter.predicate, opts)
def pretty_print(self, printer):
return ("%s.filter(%s)" % (
printer.expr_wrapped(self.parent, ["arg:0"]),
self.selector._pretty_print(printer, ["predicate"])),
PRETTY_PRINT_EXPR_WRAPPED)
class OrderBy(ExpressionInner):
def __init__(self, parent, ordering):
self.parent = parent
self.ordering = ordering
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.ORDERBY, opts, self.parent)
for key, val in self.ordering:
elem = parent.call.builtin.order_by.add()
elem.attr = key
elem.ascending = bool(val)
def pretty_print(self, printer):
return ("%s.orderby(%s)" % (
printer.expr_wrapped(self.parent, ["arg:0"]),
printer.simple_string(", ".join(repr(attr) for attr in self.ordering), ["order_by"])),
PRETTY_PRINT_EXPR_WRAPPED)
class Range(ExpressionInner):
def __init__(self, parent, lowerbound, upperbound, attrname):
self.parent = parent
self.lowerbound = query.expr(lowerbound)
self.upperbound = query.expr(upperbound)
self.attrname = attrname
def _write_ast(self, parent, opts):
builtin = self._write_call(parent, p.Builtin.RANGE, opts, self.parent)
builtin.range.attrname = self.attrname
self.lowerbound._inner._write_ast(builtin.range.lowerbound, opts)
self.upperbound._inner._write_ast(builtin.range.upperbound, opts)
def pretty_print(self, printer):
return ("%s.range(%s, %s%s)" % (
printer.expr_wrapped(self.parent, ["arg:0"]),
printer.expr_unwrapped(self.lowerbound, ["lowerbound"]),
printer.expr_unwrapped(self.upperbound, ["upperbound"]),
"" if self.attrname == "id" else ", attr_name = %r" % self.attrname),
PRETTY_PRINT_EXPR_WRAPPED)
class Get(ExpressionInner):
def __init__(self, table, key, attr_name):
self.table = table
self.key = query.expr(key)
self.attr_name = attr_name
def _write_ast(self, parent, opts):
parent.type = p.Term.GETBYKEY
self._write_point_ast(parent.get_by_key, opts)
def _write_point_ast(self, parent, opts):
self.table._write_ref_ast(parent.table_ref, opts)
parent.attrname = self.attr_name
self.key._inner._write_ast(parent.key, opts)
def pretty_print(self, printer):
return ("%s.get(%s, attr_name = %r)" % (
printer.expr_wrapped(self.table, ["table_ref"]),
printer.expr_unwrapped(self.key, ["key"]),
self.attr_name),
PRETTY_PRINT_EXPR_WRAPPED)
class If(ExpressionInner):
def __init__(self, test, true_branch, false_branch):
# TODO: Actually support things other than `JSONExpression`
self.test = query.expr(test)
self.true_branch = query.expr(true_branch)
self.false_branch = query.expr(false_branch)
def _write_ast(self, parent, opts):
parent.type = p.Term.IF
self.test._inner._write_ast(parent.if_.test, opts)
self.true_branch._inner._write_ast(parent.if_.true_branch, opts)
self.false_branch._inner._write_ast(parent.if_.false_branch, opts)
def pretty_print(self, printer):
return ("branch(%s, %s, %s)" % (
printer.expr_unwrapped(self.test, ["test"]),
printer.expr_unwrapped(self.true_branch, ["true"]),
printer.expr_unwrapped(self.false_branch, ["false"])),
PRETTY_PRINT_EXPR_WRAPPED)
class Map(ExpressionInner):
def __init__(self, parent, mapping):
self.parent = parent
self.mapping = mapping
def _write_ast(self, parent, opts):
builtin = self._write_call(parent, p.Builtin.MAP, opts, self.parent)
self.mapping.write_mapping(builtin.map.mapping, opts)
def pretty_print(self, printer):
return ("%s.map(%s)" % (
printer.expr_wrapped(self.parent, ["arg:0"]),
self.mapping._pretty_print(printer, ["mapping"])),
PRETTY_PRINT_EXPR_WRAPPED)
class ConcatMap(ExpressionInner):
def __init__(self, parent, mapping):
self.parent = parent
self.mapping = mapping
def _write_ast(self, parent, opts):
builtin = self._write_call(parent, p.Builtin.CONCATMAP, opts, self.parent)
self.mapping.write_mapping(builtin.concat_map.mapping, opts)
def pretty_print(self, printer):
return ("%s.concat_map(%s)" % (
printer.expr_wrapped(self.parent, ["arg:0"]),
self.mapping._pretty_print(printer, ["mapping"])),
PRETTY_PRINT_EXPR_WRAPPED)
class GroupedMapReduce(ExpressionInner):
def __init__(self, input, group_mapping, value_mapping, reduction_base, reduction_func):
self.input = input
self.group_mapping = group_mapping
self.value_mapping = value_mapping
self.reduction_base = query.expr(reduction_base)
self.reduction_func = reduction_func
def _write_ast(self, parent, opts):
builtin = self._write_call(parent, p.Builtin.GROUPEDMAPREDUCE, opts, self.input)
self.group_mapping.write_mapping(builtin.grouped_map_reduce.group_mapping, opts)
self.value_mapping.write_mapping(builtin.grouped_map_reduce.value_mapping, opts)
self.reduction_func.write_reduction(builtin.grouped_map_reduce.reduction, self.reduction_base, opts)
def pretty_print(self, printer):
return ("%s.grouped_map_reduce(%s, %s, %s, %s)" % (
printer.expr_wrapped(self.input, ["arg:0"]),
self.group_mapping._pretty_print(printer, ["group_mapping"]),
self.value_mapping._pretty_print(printer, ["value_mapping"]),
printer.expr_unwrapped(self.reduction_base, ["reduction", "base"]),
self.reduction_func._pretty_print(printer, ["reduction", "body"])),
PRETTY_PRINT_EXPR_WRAPPED)
class Distinct(ExpressionInner):
def __init__(self, parent):
self.parent = parent
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.DISTINCT, opts, self.parent)
def pretty_print(self, printer):
return ("%s.distinct()" % printer.expr_wrapped(self.parent, ["arg:0"]), PRETTY_PRINT_EXPR_WRAPPED)
class Reduce(ExpressionInner):
def __init__(self, parent, base, reduction):
self.parent = parent
self.base = query.expr(base)
self.reduction = reduction
def _write_ast(self, parent, opts):
builtin = self._write_call(parent, p.Builtin.REDUCE, opts, self.parent)
self.reduction.write_reduction(builtin.reduce, self.base, opts)
def pretty_print(self, printer):
return ("%s.reduce(%s, %s)" % (
printer.expr_wrapped(self.parent, ["arg:0"]),
printer.expr_unwrapped(self.base, ["reduce", "base"]),
self.reduction._pretty_print(printer, ["reduce", "body"])),
PRETTY_PRINT_EXPR_WRAPPED)
class Let(ExpressionInner):
def __init__(self, expr, bindings):
self.expr = expr
self.bindings = []
for key in bindings:
self.bindings.append((key, query.expr(bindings[key])))
def _write_ast(self, parent, opts):
parent.type = p.Term.LET
for var, value in self.bindings:
binding = parent.let.binds.add()
binding.var = var
value._inner._write_ast(binding.term, opts)
self.expr._inner._write_ast(parent.let.expr, opts)
def pretty_print(self, printer):
return ("let(%s, %s)" % (
", ".join("(%r, %s)" % (var, printer.expr_unwrapped(val, ["bind:%s" % var]))
for var, val in self.bindings),
printer.expr_unwrapped(self.expr, ["expr"])),
PRETTY_PRINT_EXPR_WRAPPED)
class Var(ExpressionInner):
def __init__(self, name):
self.name = name
def _write_ast(self, parent, opts):
parent.type = p.Term.VAR
parent.var = self.name
def pretty_print(self, printer):
return ("%s" % self.name, PRETTY_PRINT_EXPR_WRAPPED)
class Table(ExpressionInner):
def __init__(self, table):
assert isinstance(table, query.Table)
self.table = table
def _write_ast(self, parent, opts):
parent.type = p.Term.TABLE
self.table._write_ref_ast(parent.table.table_ref, opts)
def pretty_print(self, printer):
res = ""
if self.table.db_expr:
res += "db(%r)." % self.table.db_expr.db_name
res += "table(%r)" % self.table.table_name
return (printer.simple_string(res, ['table_ref']), PRETTY_PRINT_EXPR_WRAPPED)
class ForEach(WriteQueryInner):
def __init__(self, expr, fun):
self.expr = expr;
self.fun = fun;
def _write_write_query(self, parent, opts):
parent.type = p.WriteQuery.FOREACH
self.expr._inner._write_ast(parent.for_each.stream, opts)
self.fun.write_foreach(parent.for_each, opts)
def pretty_print(self, printer):
return "%s.for_each(%s)" % (printer.expr_wrapped(self.expr, ['arg:0']),
self.fun._pretty_print_foreach_queries(printer, ['mapping']))
class Union(Builtin):
def __init__(self, *args):
self.args = args
def _write_ast(self, parent, opts):
self._write_call(parent, p.Builtin.UNION, opts, *self.args)
def pretty_print(self, printer):
printed_args = []
for i, arg in enumerate(self.args):
printed_args.append(printer.expr_unwrapped(arg, ["arg:%d" % i]))
return ("%s.union(%s" % (self.args[0], ', '.join(printed_args)),
PRETTY_PRINT_EXPR_WRAPPED)
|
jfriedly/rethinkdb
|
drivers/python/rethinkdb/internal.py
|
Python
|
agpl-3.0
| 35,796
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import StringIO
from datetime import datetime
from flask import url_for
from udata.frontend import csv
from udata.models import Badge, Site, PUBLIC_SERVICE
from udata.core.dataset.factories import DatasetFactory, ResourceFactory
from udata.core.organization.factories import OrganizationFactory
from udata.core.site.views import current_site
from udata.core.reuse.factories import ReuseFactory
from udata.tests.frontend import FrontTestCase
class SiteViewsTest(FrontTestCase):
def test_site_global(self):
'''It should create and/or load the current site'''
with self.app.test_request_context(''):
self.app.preprocess_request()
self.assertIsInstance(current_site._get_current_object(), Site)
self.assertEqual(current_site.id, self.app.config['SITE_ID'])
def test_render_robotstxt(self):
'''It should render the robots.txt with all pages allowed.'''
response = self.get('/robots.txt')
self.assertEqual(response.data.split('\n'), [
'User-agent: *',
'Disallow: /fr/users/',
'Disallow: /en/users/',
'Disallow: /es/users/',
''
])
def test_render_home(self):
'''It should render the home page'''
for i in range(3):
org = OrganizationFactory()
DatasetFactory(organization=org)
ReuseFactory(organization=org)
current_site.settings.home_datasets = [
DatasetFactory() for _ in range(3)]
current_site.settings.home_reuses = [
ReuseFactory() for _ in range(3)]
response = self.get(url_for('site.home'))
self.assert200(response)
def test_render_home_no_data(self):
'''It should render the home page without data'''
response = self.get(url_for('site.home'))
self.assert200(response)
def test_render_dashboard(self):
'''It should render the search page'''
for i in range(3):
org = OrganizationFactory()
DatasetFactory(organization=org)
ReuseFactory(organization=org)
response = self.get(url_for('site.dashboard'))
self.assert200(response)
def test_render_dashboard_no_data(self):
'''It should render the search page without data'''
response = self.get(url_for('site.dashboard'))
self.assert200(response)
def test_datasets_csv(self):
with self.autoindex():
datasets = [DatasetFactory(resources=[ResourceFactory()])
for _ in range(5)]
hidden_dataset = DatasetFactory()
response = self.get(url_for('site.datasets_csv'))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO.StringIO(response.data)
reader = csv.get_reader(csvfile)
header = reader.next()
self.assertEqual(header[0], 'id')
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('tags', header)
self.assertIn('metric.reuses', header)
rows = list(reader)
ids = [row[0] for row in rows]
self.assertEqual(len(rows), len(datasets))
for dataset in datasets:
self.assertIn(str(dataset.id), ids)
self.assertNotIn(str(hidden_dataset.id), ids)
def test_datasets_csv_with_filters(self):
'''Should handle filtering but ignore paging or facets'''
with self.autoindex():
filtered_datasets = [
DatasetFactory(resources=[ResourceFactory()],
tags=['selected'])
for _ in range(6)]
datasets = [DatasetFactory(resources=[ResourceFactory()])
for _ in range(3)]
hidden_dataset = DatasetFactory()
response = self.get(
url_for(
'site.datasets_csv', tag='selected', page_size=3, facets=True))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO.StringIO(response.data)
reader = csv.get_reader(csvfile)
header = reader.next()
self.assertEqual(header[0], 'id')
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('tags', header)
self.assertIn('metric.reuses', header)
rows = list(reader)
ids = [row[0] for row in rows]
# Should ignore paging
self.assertEqual(len(rows), len(filtered_datasets))
# SHoulf pass filter
for dataset in filtered_datasets:
self.assertIn(str(dataset.id), ids)
for dataset in datasets:
self.assertNotIn(str(dataset.id), ids)
self.assertNotIn(str(hidden_dataset.id), ids)
def test_resources_csv(self):
with self.autoindex():
datasets = [
DatasetFactory(resources=[ResourceFactory(),
ResourceFactory()])
for _ in range(3)]
DatasetFactory()
response = self.get(url_for('site.resources_csv'))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO.StringIO(response.data)
reader = csv.get_reader(csvfile)
header = reader.next()
self.assertEqual(header[0], 'dataset.id')
self.assertIn('dataset.title', header)
self.assertIn('dataset.url', header)
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('filetype', header)
self.assertIn('url', header)
self.assertIn('created_at', header)
self.assertIn('modified', header)
self.assertIn('downloads', header)
resource_id_index = header.index('id')
rows = list(reader)
ids = [(row[0], row[resource_id_index]) for row in rows]
self.assertEqual(len(rows), sum(len(d.resources) for d in datasets))
for dataset in datasets:
for resource in dataset.resources:
self.assertIn((str(dataset.id), str(resource.id)), ids)
def test_resources_csv_with_filters(self):
'''Should handle filtering but ignore paging or facets'''
with self.autoindex():
filtered_datasets = [DatasetFactory(resources=[ResourceFactory(),
ResourceFactory()],
tags=['selected'])
for _ in range(6)]
[DatasetFactory(resources=[ResourceFactory()]) for _ in range(3)]
DatasetFactory()
response = self.get(
url_for('site.resources_csv', tag='selected', page_size=3,
facets=True))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO.StringIO(response.data)
reader = csv.get_reader(csvfile)
header = reader.next()
self.assertEqual(header[0], 'dataset.id')
self.assertIn('dataset.title', header)
self.assertIn('dataset.url', header)
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('filetype', header)
self.assertIn('url', header)
self.assertIn('created_at', header)
self.assertIn('modified', header)
self.assertIn('downloads', header)
resource_id_index = header.index('id')
rows = list(reader)
ids = [(row[0], row[resource_id_index]) for row in rows]
self.assertEqual(len(rows),
sum(len(d.resources) for d in filtered_datasets))
for dataset in filtered_datasets:
for resource in dataset.resources:
self.assertIn((str(dataset.id), str(resource.id)), ids)
def test_organizations_csv(self):
with self.autoindex():
orgs = [OrganizationFactory() for _ in range(5)]
hidden_org = OrganizationFactory(deleted=datetime.now())
response = self.get(url_for('site.organizations_csv'))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO.StringIO(response.data)
reader = csv.get_reader(csvfile)
header = reader.next()
self.assertEqual(header[0], 'id')
self.assertIn('name', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('metric.datasets', header)
rows = list(reader)
ids = [row[0] for row in rows]
self.assertEqual(len(rows), len(orgs))
for org in orgs:
self.assertIn(str(org.id), ids)
self.assertNotIn(str(hidden_org.id), ids)
def test_organizations_csv_with_filters(self):
'''Should handle filtering but ignore paging or facets'''
user = self.login()
with self.autoindex():
public_service_badge = Badge(
kind=PUBLIC_SERVICE,
created_by=user
)
filtered_orgs = [
OrganizationFactory(badges=[public_service_badge])
for _ in range(6)]
orgs = [OrganizationFactory() for _ in range(3)]
hidden_org = OrganizationFactory(deleted=datetime.now())
response = self.get(
url_for('site.organizations_csv', badge=PUBLIC_SERVICE,
page_size=3, facets=True))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO.StringIO(response.data)
reader = csv.get_reader(csvfile)
header = reader.next()
self.assertEqual(header[0], 'id')
self.assertIn('name', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('metric.datasets', header)
rows = list(reader)
ids = [row[0] for row in rows]
# Should ignore paging
self.assertEqual(len(rows), len(filtered_orgs))
# SHoulf pass filter
for org in filtered_orgs:
self.assertIn(str(org.id), ids)
for org in orgs:
self.assertNotIn(str(org.id), ids)
self.assertNotIn(str(hidden_org.id), ids)
def test_reuses_csv(self):
with self.autoindex():
reuses = [ReuseFactory(datasets=[DatasetFactory()])
for _ in range(5)]
hidden_reuse = ReuseFactory()
response = self.get(url_for('site.reuses_csv'))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO.StringIO(response.data)
reader = csv.get_reader(csvfile)
header = reader.next()
self.assertEqual(header[0], 'id')
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('tags', header)
self.assertIn('metric.datasets', header)
rows = list(reader)
ids = [row[0] for row in rows]
self.assertEqual(len(rows), len(reuses))
for reuse in reuses:
self.assertIn(str(reuse.id), ids)
self.assertNotIn(str(hidden_reuse.id), ids)
def test_reuses_csv_with_filters(self):
'''Should handle filtering but ignore paging or facets'''
with self.autoindex():
filtered_reuses = [
ReuseFactory(datasets=[DatasetFactory()], tags=['selected'])
for _ in range(6)]
reuses = [ReuseFactory(datasets=[DatasetFactory()])
for _ in range(3)]
hidden_reuse = ReuseFactory()
response = self.get(
url_for('site.reuses_csv', tag='selected', page_size=3,
facets=True))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO.StringIO(response.data)
reader = csv.get_reader(csvfile)
header = reader.next()
self.assertEqual(header[0], 'id')
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('tags', header)
self.assertIn('metric.datasets', header)
rows = list(reader)
ids = [row[0] for row in rows]
# Should ignore paging
self.assertEqual(len(rows), len(filtered_reuses))
# SHoulf pass filter
for reuse in filtered_reuses:
self.assertIn(str(reuse.id), ids)
for reuse in reuses:
self.assertNotIn(str(reuse.id), ids)
self.assertNotIn(str(hidden_reuse.id), ids)
def test_map_view(self):
response = self.get(url_for('site.map'))
self.assert200(response)
def test_terms_view(self):
response = self.client.get(url_for('site.terms'))
self.assert200(response)
|
jphnoel/udata
|
udata/tests/site/test_site_views.py
|
Python
|
agpl-3.0
| 13,868
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nssimpleacl6(base_resource) :
""" Configuration for simple ACL6 resource. """
def __init__(self) :
self._aclname = ""
self._td = 0
self._aclaction = ""
self._srcipv6 = ""
self._destport = 0
self._protocol = ""
self._ttl = 0
self._estsessions = False
self._hits = 0
self.___count = 0
@property
def aclname(self) :
"""Name for the simple ACL6 rule. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the simple ACL6 rule is created.<br/>Minimum length = 1.
"""
try :
return self._aclname
except Exception as e:
raise e
@aclname.setter
def aclname(self, aclname) :
"""Name for the simple ACL6 rule. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the simple ACL6 rule is created.<br/>Minimum length = 1
"""
try :
self._aclname = aclname
except Exception as e:
raise e
@property
def td(self) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094.
"""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094
"""
try :
self._td = td
except Exception as e:
raise e
@property
def aclaction(self) :
"""Drop incoming IPv6 packets that match the simple ACL6 rule.<br/>Possible values = DENY.
"""
try :
return self._aclaction
except Exception as e:
raise e
@aclaction.setter
def aclaction(self, aclaction) :
"""Drop incoming IPv6 packets that match the simple ACL6 rule.<br/>Possible values = DENY
"""
try :
self._aclaction = aclaction
except Exception as e:
raise e
@property
def srcipv6(self) :
"""IP address to match against the source IP address of an incoming IPv6 packet.
"""
try :
return self._srcipv6
except Exception as e:
raise e
@srcipv6.setter
def srcipv6(self, srcipv6) :
"""IP address to match against the source IP address of an incoming IPv6 packet.
"""
try :
self._srcipv6 = srcipv6
except Exception as e:
raise e
@property
def destport(self) :
"""Port number to match against the destination port number of an incoming IPv6 packet.
Omitting the port number creates an all-ports simple ACL6 rule, which matches any port. In that case, you cannot create another simple ACL6 rule specifying a specific port and the same source IPv6 address.
"""
try :
return self._destport
except Exception as e:
raise e
@destport.setter
def destport(self, destport) :
"""Port number to match against the destination port number of an incoming IPv6 packet.
Omitting the port number creates an all-ports simple ACL6 rule, which matches any port. In that case, you cannot create another simple ACL6 rule specifying a specific port and the same source IPv6 address.
"""
try :
self._destport = destport
except Exception as e:
raise e
@property
def protocol(self) :
"""Protocol to match against the protocol of an incoming IPv6 packet. You must set this parameter if you set the Destination Port parameter.<br/>Possible values = TCP, UDP.
"""
try :
return self._protocol
except Exception as e:
raise e
@protocol.setter
def protocol(self, protocol) :
"""Protocol to match against the protocol of an incoming IPv6 packet. You must set this parameter if you set the Destination Port parameter.<br/>Possible values = TCP, UDP
"""
try :
self._protocol = protocol
except Exception as e:
raise e
@property
def ttl(self) :
"""Number of seconds, in multiples of four, after which the simple ACL6 rule expires. If you do not want the simple ACL6 rule to expire, do not specify a TTL value.<br/>Minimum length = 4<br/>Maximum length = 0x7FFFFFFF.
"""
try :
return self._ttl
except Exception as e:
raise e
@ttl.setter
def ttl(self, ttl) :
"""Number of seconds, in multiples of four, after which the simple ACL6 rule expires. If you do not want the simple ACL6 rule to expire, do not specify a TTL value.<br/>Minimum length = 4<br/>Maximum length = 0x7FFFFFFF
"""
try :
self._ttl = ttl
except Exception as e:
raise e
@property
def estsessions(self) :
try :
return self._estsessions
except Exception as e:
raise e
@estsessions.setter
def estsessions(self, estsessions) :
try :
self._estsessions = estsessions
except Exception as e:
raise e
@property
def hits(self) :
"""Number of hits for this SACL6 rule.
"""
try :
return self._hits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nssimpleacl6_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nssimpleacl6
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.aclname) :
return str(self.aclname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add nssimpleacl6.
"""
try :
if type(resource) is not list :
addresource = nssimpleacl6()
addresource.aclname = resource.aclname
addresource.td = resource.td
addresource.aclaction = resource.aclaction
addresource.srcipv6 = resource.srcipv6
addresource.destport = resource.destport
addresource.protocol = resource.protocol
addresource.ttl = resource.ttl
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nssimpleacl6() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].aclname = resource[i].aclname
addresources[i].td = resource[i].td
addresources[i].aclaction = resource[i].aclaction
addresources[i].srcipv6 = resource[i].srcipv6
addresources[i].destport = resource[i].destport
addresources[i].protocol = resource[i].protocol
addresources[i].ttl = resource[i].ttl
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def clear(cls, client, resource="") :
""" Use this API to clear nssimpleacl6.
"""
try :
if type(resource) is not list :
clearresource = nssimpleacl6()
return clearresource.perform_operation(client,"clear")
else :
if (resource and len(resource) > 0) :
clearresources = [ nssimpleacl6() for _ in range(len(resource))]
result = cls.perform_operation_bulk_request(client, clearresources,"clear")
return result
except Exception as e :
raise e
@classmethod
def flush(cls, client, resource) :
""" Use this API to flush nssimpleacl6.
"""
try :
if type(resource) is not list :
flushresource = nssimpleacl6()
flushresource.estsessions = resource.estsessions
return flushresource.perform_operation(client,"flush")
else :
if (resource and len(resource) > 0) :
flushresources = [ nssimpleacl6() for _ in range(len(resource))]
for i in range(len(resource)) :
flushresources[i].estsessions = resource[i].estsessions
result = cls.perform_operation_bulk_request(client, flushresources,"flush")
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete nssimpleacl6.
"""
try :
if type(resource) is not list :
deleteresource = nssimpleacl6()
if type(resource) != type(deleteresource):
deleteresource.aclname = resource
else :
deleteresource.aclname = resource.aclname
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nssimpleacl6() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].aclname = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nssimpleacl6() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].aclname = resource[i].aclname
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the nssimpleacl6 resources that are configured on netscaler.
"""
try :
if not name :
obj = nssimpleacl6()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = nssimpleacl6()
obj.aclname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [nssimpleacl6() for _ in range(len(name))]
obj = [nssimpleacl6() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = nssimpleacl6()
obj[i].aclname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of nssimpleacl6 resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nssimpleacl6()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the nssimpleacl6 resources configured on NetScaler.
"""
try :
obj = nssimpleacl6()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of nssimpleacl6 resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nssimpleacl6()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Protocol:
TCP = "TCP"
UDP = "UDP"
class Aclaction:
DENY = "DENY"
class nssimpleacl6_response(base_response) :
def __init__(self, length=1) :
self.nssimpleacl6 = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nssimpleacl6 = [nssimpleacl6() for _ in range(length)]
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/ns/nssimpleacl6.py
|
Python
|
apache-2.0
| 12,674
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-10-16 00:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('anagrafica', '0041_auto_20160513_0954'),
('formazione', '0014_partecipazionecorsobase_automatica'),
]
operations = [
migrations.CreateModel(
name='InvitoCorsoBase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creazione', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
('ultima_modifica', models.DateTimeField(auto_now=True, db_index=True)),
('corso', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='inviti', to='formazione.CorsoBase')),
('persona', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inviti_corsi', to='anagrafica.Persona')),
],
options={
'verbose_name': 'Invito di partecipazione a corso base',
'permissions': (('view_invitocorsobase', 'Can view invito partecipazione corso base'),),
'verbose_name_plural': 'Inviti di partecipazione a corso base',
'ordering': ('persona__nome', 'persona__cognome', 'persona__codice_fiscale'),
},
),
]
|
CroceRossaItaliana/jorvik
|
formazione/migrations/0015_invitocorsobase.py
|
Python
|
gpl-3.0
| 1,528
|
import copy
from django.db import models
from django.db.models.fields import NOT_PROVIDED
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import SortedDict
from django.utils.translation import get_language
from django.utils.functional import lazy
LANGUAGE_CODE = 0
LANGUAGE_NAME = 1
def get_languages():
return getattr(settings, 'TRANSMETA_LANGUAGES', settings.LANGUAGES)
def get_real_fieldname(field, lang=None):
if lang is None:
lang = get_language().split('-')[0] # both 'en-US' and 'en' -> 'en'
return str('%s_%s' % (field, lang))
def get_field_language(real_field):
""" return language for a field. i.e. returns "en" for "name_en" """
return real_field.split('_')[1]
def get_fallback_fieldname(field, lang=None):
return get_real_fieldname(field, lang=fallback_language())
def get_real_fieldname_in_each_language(field):
return [get_real_fieldname(field, lang[LANGUAGE_CODE])
for lang in get_languages()]
def canonical_fieldname(db_field):
""" all "description_en", "description_fr", etc. field names will return "description" """
return getattr(db_field, 'original_fieldname', db_field.name) # original_fieldname is set by transmeta
def fallback_language():
""" returns fallback language """
return getattr(settings, 'TRANSMETA_DEFAULT_LANGUAGE', \
settings.LANGUAGE_CODE)
def get_all_translatable_fields(model, model_trans_fields=None, column_in_current_table=False):
""" returns all translatable fields in a model (including superclasses ones) """
if model_trans_fields is None:
model_trans_fields = set()
model_trans_fields.update(set(getattr(model._meta, 'translatable_fields', [])))
for parent in model.__bases__:
if getattr(parent, '_meta', None) and (not column_in_current_table or parent._meta.abstract):
get_all_translatable_fields(parent, model_trans_fields, column_in_current_table)
return tuple(model_trans_fields)
def default_value(field):
'''
When accessing to the name of the field itself, the value
in the current language will be returned. Unless it's set,
the value in the default language will be returned.
'''
def default_value_func(self):
attname = lambda x: get_real_fieldname(field, x)
if getattr(self, attname(get_language()), None):
result = getattr(self, attname(get_language()))
elif getattr(self, attname(get_language()[:2]), None):
result = getattr(self, attname(get_language()[:2]))
else:
default_language = fallback_language()
result = getattr(self, attname(default_language), None)
return result
return default_value_func
class TransMeta(models.base.ModelBase):
'''
Metaclass that allow a django field, to store a value for
every language. The syntax to us it is next:
class MyClass(models.Model):
__metaclass__ = transmeta.TransMeta
my_field = models.CharField(max_length=20)
my_i18n_field = models.CharField(max_length=30)
class Meta:
translate = ('my_i18n_field',)
Then we'll be able to access a specific language by
<field_name>_<language_code>. If just <field_name> is
accessed, we'll get the value of the current language,
or if null, the value in the default language.
'''
def __new__(cls, name, bases, attrs):
attrs = SortedDict(attrs)
if 'Meta' in attrs and hasattr(attrs['Meta'], 'translate'):
fields = attrs['Meta'].translate
delattr(attrs['Meta'], 'translate')
else:
new_class = super(TransMeta, cls).__new__(cls, name, bases, attrs)
# we inherits possible translatable_fields from superclasses
abstract_model_bases = [base for base in bases if hasattr(base, '_meta') \
and base._meta.abstract]
translatable_fields = []
for base in abstract_model_bases:
if hasattr(base._meta, 'translatable_fields'):
translatable_fields.extend(list(base._meta.translatable_fields))
new_class._meta.translatable_fields = tuple(translatable_fields)
return new_class
if not isinstance(fields, tuple):
raise ImproperlyConfigured("Meta's translate attribute must be a tuple")
default_language = fallback_language()
for field in fields:
if not field in attrs or \
not isinstance(attrs[field], models.fields.Field):
raise ImproperlyConfigured(
"There is no field %(field)s in model %(name)s, "\
"as specified in Meta's translate attribute" % \
dict(field=field, name=name))
original_attr = attrs[field]
for lang in get_languages():
lang_code = lang[LANGUAGE_CODE]
lang_attr_name = get_real_fieldname(field, lang_code)
if not attrs.get(lang_attr_name):
# only add the attr if there is no localized field defined yet in the original class
lang_attr = copy.copy(original_attr)
lang_attr.original_fieldname = field
if lang_code != default_language:
# only will be required for default language
if not lang_attr.null and lang_attr.default is NOT_PROVIDED:
lang_attr.null = True
if not lang_attr.blank:
lang_attr.blank = True
if hasattr(lang_attr, 'verbose_name'):
lang_attr.verbose_name = LazyString(lang_attr.verbose_name, lang_code)
attrs[lang_attr_name] = lang_attr
del attrs[field]
attrs[field] = property(default_value(field))
new_class = super(TransMeta, cls).__new__(cls, name, bases, attrs)
if hasattr(new_class, '_meta'):
new_class._meta.translatable_fields = fields
return new_class
class LazyString(object):
def __init__(self, proxy, lang):
self.proxy = proxy
self.lang = lang
def __unicode__(self):
return u'%s %s' % (self.proxy, self.lang)
|
mcallistersean/django-transmeta
|
transmeta/__init__.py
|
Python
|
lgpl-3.0
| 6,437
|
# encoding: utf-8
import copy
from yast import import_module
import_module('UI')
from yast import *
class Table8MultiSelClient:
def main(self):
new_items = [
Item(Id(1), "Mercedes", 60000),
Item(Id(2), "Audi", 50000),
Item(Id(3), "VW", 40000),
Item(Id(4), "BMW", 60000),
Item(Id(5), "Porsche", 80000)
]
orig_items = [
Item(Id(1), "Chili", 6),
Item(Id(2), "Salami Baguette", None),
Item(Id(3), "Spaghetti", 8),
Item(Id(4), "Steak Sandwich", 12)
]
UI.OpenDialog(
VBox(
Heading("Today's menu"),
MinSize(
30,
10,
Table(
Id("menu"),
Opt("notify", "multiSelection"),
Header("Name", "Price"),
[
Item(Id(1), "Chili", 6),
Item(Id(2), "Salami Baguette", None),
Item(Id(3), "Spaghetti", 8),
Item(Id(4), "Steak Sandwich", 12)
]
)
),
Label("Get notified on 'Return' or double click"),
HBox(Label("Selected: "), TextEntry(Id("info"), "")),
HBox(
PushButton(Id("next"), "Change &Table Contents"),
PushButton(Id("cancel"), "&OK")
)
)
)
UI.ChangeWidget("menu", "SelectedItems", [1, 2])
sel = UI.QueryWidget("menu", "SelectedItems")
selItems = ""
for val in ycpbuiltins.foreach(sel):
selItems = selItems + " " + str(val)
UI.ChangeWidget("info", "Value", selItems)
event = {}
num = 0
while True:
selItems2 = ""
event = UI.WaitForEvent()
if event["ID"] == "menu":
sel = UI.QueryWidget("menu", "SelectedItems")
for val in ycpbuiltins.foreach(sel):
selItems2 = selItems2 + " " + str(val)
UI.ChangeWidget("info", "Value", selItems2)
elif event["ID"] == "next":
num = num + 1
items = []
if num % 2 == 1:
#items = copy.deepcopy(new_items)
items = new_items
else:
#items = copy.deepcopy(orig_items)
items = orig_items
# Change table contents
UI.ChangeWidget("menu", "Items", items)
sel = UI.QueryWidget("menu", "SelectedItems")
for val in ycpbuiltins.foreach(sel):
selItems2 = selItems2 + " " + val
UI.ChangeWidget("info", "Value", selItems2)
# Double check: Retrieve contents and dump to log
ycpbuiltins.y2milestone(
"New table content:\n%1",
UI.QueryWidget("menu", "Items")
)
if event["ID"] == "cancel":
break
sel = UI.QueryWidget("menu", "SelectedItems")
ycpbuiltins.y2milestone("Selected: %1", sel)
UI.CloseDialog()
Table8MultiSelClient().main()
|
yast/yast-python-bindings
|
examples/Table8-multiSel.py
|
Python
|
gpl-2.0
| 2,914
|
import urllib2
import json
import os
from fantasydota.lib.herodict import herodict
from fantasydota.lib.calibration import calibrate_all_hero_values, squeeze_values_together
from fantasydota.lib.constants import API_URL, DEFAULT_LEAGUE
def create_league(name, tournament_id, url):
FE_APIKEY = os.environ.get("FE_APIKEY")
if not FE_APIKEY:
print "Set your fantasy esport APIKEY environment variable"
data = {
'name': name,
'apiKey': FE_APIKEY,
'tournamentId': tournament_id,
'gameId': 1,
'pickeeDescription': 'Hero',
'periodDescription': 'Day',
'startingMoney': 50.0,
'transferInfo': {
'transferWildcard': True,
"transferBlockedDuringPeriod": False,
"transferDelayMinutes": 60,
"noWildcardForLateRegister": True,
'transferLimit': 5
},
"extraStats": ["wins", "picks", "bans"],
"periods": [
{"start": "2019-03-14 10:00", "end": "2019-03-14 21:00", "multiplier": 1},
{"start": "2019-03-15 10:00", "end": "2019-03-15 21:00", "multiplier": 1},
{"start": "2019-03-16 10:00", "end": "2019-03-16 21:00", "multiplier": 2},
{"start": "2019-03-17 10:00", "end": "2019-03-17 21:00", "multiplier": 2},
{"start": "2019-03-18 10:00", "end": "2019-03-18 21:00", "multiplier": 2},
{"start": "2019-03-19 10:00", "end": "2019-03-19 21:00", "multiplier": 2},
{"start": "2019-03-20 10:00", "end": "2019-03-20 21:00", "multiplier": 2},
{"start": "2019-03-22 10:00", "end": "2019-03-22 21:00", "multiplier": 2},
{"start": "2019-03-23 10:00", "end": "2019-03-23 21:00", "multiplier": 2},
{"start": "2019-03-24 10:00", "end": "2019-03-24 21:00", "multiplier": 3},
],
"url": url,
"applyPointsAtStartTime": False
}
# 60 group games, 31 mainstage
pickees = []
calib_tournaments = [10560, 10575, 10733, 10681, 10532, 10646, 10153]
hero_values = squeeze_values_together(calibrate_all_hero_values(calib_tournaments, 1549241783))
for id, name in herodict.items():
#pickees.append({"id": id, "name": name, "value": 9.0})#hero_values[id]})
pickees.append({"id": id, "name": name, "value": hero_values[id]})
data['pickees'] = pickees
try:
req = urllib2.Request(
API_URL + "leagues/", data=json.dumps(data), headers={
"Content-Type": "application/json"
}
)
response = urllib2.urlopen(req)
print(response.read())
except urllib2.HTTPError as e:
print(e.read())
try:
req = urllib2.Request(
API_URL + "leagues/" + str(DEFAULT_LEAGUE), data=json.dumps({'transferOpen': True, 'transferDelayMinutes': 60}), headers={
"Content-Type": "application/json",
"apiKey": FE_APIKEY
}
)
response = urllib2.urlopen(req)
print(response.read())
except urllib2.HTTPError as e:
print(e.read())
# req = urllib2.Request(
# API_URL + "leagues/1/startPeriod", data=json.dumps(data), headers={
# 'User-Agent': 'ubuntu:fantasydotaheroes:v1.0.0 (by /u/LePianoDentist)',
# "Content-Type": "application/json"
# }
# )
# response = urllib2.urlopen(req)
# print(response.read())
if __name__ == "__main__":
create_league("Dreamleague 11", 10681, "https://liquipedia.net/dota2/DreamLeague/Season_11")
|
ThePianoDentist/fantasy-dota-heroes
|
fantasydota/scripts/create_league.py
|
Python
|
apache-2.0
| 3,546
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from neon import NervanaObject
from neon.transforms import CrossEntropyBinary, Logistic
from neon.util.persist import load_obj
from neon.layers import Merge, Activation
import numpy as np
class Model(NervanaObject):
"""
Basic model class which stores a list of layers describing the model. Can train the layer
weights on a dataset, evaluate on a test set and serialize the mode.
Additional functionality can be added to fit through callback functions.
Arguments:
layers (list): List of layers that compose a model.
name (str): Model name. Defaults to "model"
optimizer (Optimizer): Optimizer object which defines the learning rule
for updating model parameters (ie DescentMomentum, AdaDelta)
"""
def __init__(self, layers=[], name="model", optimizer=None):
super(Model, self).__init__(name)
self.optimizer = optimizer
self.params = None
self.states = None
self.epoch_index = 0
self.finished = False
self.initialized = False
self.layers = []
self.layers_to_optimize = []
for layer in layers:
if isinstance(layer, list):
self.layers.extend(layer)
else:
self.layers.append(layer)
for layer in self.layers:
if layer.has_params:
self.layers_to_optimize.append(layer)
elif isinstance(layer, Merge):
self.layers_to_optimize += layer.layers_to_optimize
def set_shortcut(self):
# infer whether bprop shortcut can be used on final activation
# self.cost should be set to run this otherwise do nothing
lastlayer = self.layers[-1]
try:
if self.cost.costfunc.__class__ is CrossEntropyBinary:
if (lastlayer.__class__ is Activation and
lastlayer.transform.__class__ is Logistic):
lastlayer.transform.set_shortcut(True)
except:
# if any attributes are not set or any other exception
# is thrown leave transform.shortcut as is (do nothing)
pass
def load_weights(self, weight_path):
"""
Loads the layer weights saved in weight_path from serialize().
Arguments:
weight_path (str): File containing serialized python dict with layer
weights and states.
"""
pdict = load_obj(weight_path)
self.epoch_index = pdict['epoch_index']
param_layers = [l for l in self.layers_to_optimize]
param_dict_list = pdict['layer_params_states']
for l, ps in zip(param_layers, param_dict_list):
l.set_params(ps['params'])
if 'states' in ps:
l.set_states(ps['states'])
def initialize(self, dataset, cost=None):
if self.initialized:
return
# Propagate shapes through the layers to configure
prev_input = dataset
for l in self.layers:
prev_input = l.configure(prev_input)
if cost is not None:
cost.initialize(prev_input)
# Now allocate space
for l in self.layers:
l.allocate()
self.initialized = True
def print_layers(self):
"""
Print network layers
"""
config_string = "Network Layers:"
for layer in self.layers:
config_string = config_string + "\n\t" + str(layer)
config_string = config_string + "\n"
print config_string
def fit(self, dataset, cost, optimizer, num_epochs, callbacks):
"""
Trains the model parameters on a dataset by minimizing the cost function through
gradient descent and updates the layer weights according to a learning rule
defined in optimizer.
Arguments:
dataset (iterator): An iterable of minibatches where each
element is a (x, y) tuple where x is the input data and y are the labels.
x is of dimension (feature_size, batch_size)
y is of dimension (label_size, batch_size)
Length of the iterator is num_batches which is num_data / batch_size
cost (Cost): Defines the function which the model is minimizing based
on the output of the last layer and the input labels
optimizer (Optimizer): Defines the learning rule for updating the model parameters
num_epochs: Number of times to iterate over the dataset.
"""
self.cost = cost
self.initialize(dataset, cost)
self.print_layers()
self.set_shortcut() # infer if bprop shortcut can be used
self.optimizer = optimizer
self.total_cost = self.be.empty((1, 1))
callbacks.on_train_begin(num_epochs)
while self.epoch_index < num_epochs and not self.finished:
callbacks.on_epoch_begin(self.epoch_index)
self._epoch_fit(dataset, callbacks)
callbacks.on_epoch_end(self.epoch_index)
self.epoch_index += 1
callbacks.on_train_end()
def _epoch_fit(self, dataset, callbacks):
"""
Helper function for fit which performs training on a dataset for one epoch.
Arguments:
dataset (iterable): Dataset iterator to perform fit on
"""
epoch = self.epoch_index
self.total_cost[:] = 0
# iterate through minibatches of the dataset
for mb_idx, (x, t) in enumerate(dataset):
callbacks.on_minibatch_begin(epoch, mb_idx)
x = self.fprop(x)
self.total_cost[:] = self.total_cost + self.cost.get_cost(x, t)
# deltas back propagate through layers
# for every layer in reverse except the 0th one
delta = self.cost.get_errors(x, t)
self.bprop(delta)
self.optimizer.optimize(self.layers_to_optimize, epoch=epoch)
callbacks.on_minibatch_end(epoch, mb_idx)
# now we divide total cost by the number of batches,
# so it was never total cost, but sum of averages
# across all the minibatches we trained on
self.total_cost[:] = self.total_cost / dataset.nbatches
def fprop(self, x, inference=False):
"""
Forward propagates a minibatch x through the model.
Arguments:
x (Tensor): Input minibatch data
inference (bool): Flag for performing training or inference
Only affects batch norm and dropout layers.
Returns:
Tensor: the output of the final layer in the model
"""
for l in self.layers:
x = l.fprop(x, inference)
return x
def bprop(self, delta, do_acts=True):
"""
Back propagates the error of a minibatch through the model.
Arguments:
delta (Tensor): Derivative of cost with respect to the last layer's output
do_acts (bool): Whether to compute the output deltas of layer. The first layer
does not need to compute output deltas and so do_acts is set to False.
"""
for l in reversed(self.layers[1:]):
delta = l.bprop(delta)
return self.layers[0].bprop(delta, do_acts=False)
def eval(self, dataset, metric):
"""
Evaluates a model on a dataset according to an input metric.
Arguments:
datasets (iterable): dataset to evaluate on.
metric (Cost): what function to evaluate dataset on.
"""
self.initialize(dataset)
running_error = np.zeros((len(metric.metric_names)), dtype=np.float32)
nprocessed = 0
dataset.reset()
for x, t in dataset:
x = self.fprop(x, inference=True)
# This logic is for handling partial batch sizes at the end of the dataset
bsz = min(dataset.ndata - nprocessed, self.be.bsz)
metric(x, t)
running_error += metric.outputs.get()[:, :bsz].sum(axis=1)
nprocessed += bsz
running_error /= nprocessed
return running_error
def get_outputs(self, dataset):
"""
Get the activation outputs of the final model layer for the dataset
Arguments:
dataset (iterable): Dataset iterator to perform fit on
Returns:
Host numpy array: the output of the final layer for the entire Dataset
"""
self.initialize(dataset)
dataset.reset() # Move "pointer" back to beginning of dataset
n = dataset.nbatches
x = self.layers[-1].outputs
(dim0, dim1) = x.shape
Ypred = np.empty((n * dim1, dim0), dtype=x.dtype)
nsteps = dim1 / self.be.bsz
for idx, (x, t) in enumerate(dataset):
x = self.fprop(x, inference=True)
cur_batch = slice(idx * dim1, (idx + 1) * dim1)
Ypred[cur_batch] = x.get().T
# Handle the recurrent case
if nsteps != 1:
b, s = (self.be.bsz, nsteps)
Ypred = Ypred.reshape((n, b, s, -1)).transpose(1, 0, 2, 3).copy().reshape(n*b, s, -1)
return Ypred[:dataset.ndata]
def get_description(self):
"""
Gets a description of the model required to reconstruct the model with
no weights like from a yaml file.
Returns:
dict: Description of each component of the model.
"""
pdict = dict()
pdict['backend'] = 'gpu'
pdict['cost'] = self.cost.costfunc.__class__.__name__
pdict['layers'] = [l.get_description() for l in self.layers]
if self.optimizer:
pdict['optimizer'] = self.optimizer.get_description()
return pdict
# serialize tells how to write out the parameters we've learned so
# far and associate them with layers. it can ignore layers with no
# learned parameters. the model stores states to pass to the
# optimizers. if we're saving the model out for inference, we
# don't need to remember states.
def serialize(self, keep_states=True):
"""
Creates a dictionary storing the layer parameters and epochs complete.
Arguments:
keep_states (bool): Whether to save optimizer states.
Returns:
dict: Model data including layer parameters and epochs complete.
"""
pdict = dict()
params_states = [l.get_params_serialize(keep_states) for l in self.layers_to_optimize]
pdict['layer_params_states'] = params_states
# start training again on the next epoch
pdict['epoch_index'] = self.epoch_index + 1
return pdict
|
jfsantos/neon
|
neon/models/model.py
|
Python
|
apache-2.0
| 11,462
|
import contextlib
import os
import sys
import tracemalloc
import unittest
from unittest.mock import patch
from test.support.script_helper import (assert_python_ok, assert_python_failure,
interpreter_requires_environment)
from test import support
try:
import _testcapi
except ImportError:
_testcapi = None
EMPTY_STRING_SIZE = sys.getsizeof(b'')
def get_frames(nframe, lineno_delta):
frames = []
frame = sys._getframe(1)
for index in range(nframe):
code = frame.f_code
lineno = frame.f_lineno + lineno_delta
frames.append((code.co_filename, lineno))
lineno_delta = 0
frame = frame.f_back
if frame is None:
break
return tuple(frames)
def allocate_bytes(size):
nframe = tracemalloc.get_traceback_limit()
bytes_len = (size - EMPTY_STRING_SIZE)
frames = get_frames(nframe, 1)
data = b'x' * bytes_len
return data, tracemalloc.Traceback(frames)
def create_snapshots():
traceback_limit = 2
# _tracemalloc._get_traces() returns a list of (domain, size,
# traceback_frames) tuples. traceback_frames is a tuple of (filename,
# line_number) tuples.
raw_traces = [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
]
snapshot = tracemalloc.Snapshot(raw_traces, traceback_limit)
raw_traces2 = [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 2, (('a.py', 5), ('b.py', 4))),
(2, 5000, (('a.py', 5), ('b.py', 4))),
(4, 400, (('c.py', 578),)),
]
snapshot2 = tracemalloc.Snapshot(raw_traces2, traceback_limit)
return (snapshot, snapshot2)
def frame(filename, lineno):
return tracemalloc._Frame((filename, lineno))
def traceback(*frames):
return tracemalloc.Traceback(frames)
def traceback_lineno(filename, lineno):
return traceback((filename, lineno))
def traceback_filename(filename):
return traceback_lineno(filename, 0)
class TestTracemallocEnabled(unittest.TestCase):
def setUp(self):
if tracemalloc.is_tracing():
self.skipTest("tracemalloc must be stopped before the test")
tracemalloc.start(1)
def tearDown(self):
tracemalloc.stop()
def test_get_tracemalloc_memory(self):
data = [allocate_bytes(123) for count in range(1000)]
size = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size, 0)
tracemalloc.clear_traces()
size2 = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size2, 0)
self.assertLessEqual(size2, size)
def test_get_object_traceback(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(traceback, obj_traceback)
def test_set_traceback_limit(self):
obj_size = 10
tracemalloc.stop()
self.assertRaises(ValueError, tracemalloc.start, -1)
tracemalloc.stop()
tracemalloc.start(10)
obj2, obj2_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj2)
self.assertEqual(len(traceback), 10)
self.assertEqual(traceback, obj2_traceback)
tracemalloc.stop()
tracemalloc.start(1)
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(len(traceback), 1)
self.assertEqual(traceback, obj_traceback)
def find_trace(self, traces, traceback):
for trace in traces:
if trace[2] == traceback._frames:
return trace
self.fail("trace not found")
def test_get_traces(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traces = tracemalloc._get_traces()
trace = self.find_trace(traces, obj_traceback)
self.assertIsInstance(trace, tuple)
domain, size, traceback = trace
self.assertEqual(size, obj_size)
self.assertEqual(traceback, obj_traceback._frames)
tracemalloc.stop()
self.assertEqual(tracemalloc._get_traces(), [])
def test_get_traces_intern_traceback(self):
# dummy wrappers to get more useful and identical frames in the traceback
def allocate_bytes2(size):
return allocate_bytes(size)
def allocate_bytes3(size):
return allocate_bytes2(size)
def allocate_bytes4(size):
return allocate_bytes3(size)
# Ensure that two identical tracebacks are not duplicated
tracemalloc.stop()
tracemalloc.start(4)
obj_size = 123
obj1, obj1_traceback = allocate_bytes4(obj_size)
obj2, obj2_traceback = allocate_bytes4(obj_size)
traces = tracemalloc._get_traces()
obj1_traceback._frames = tuple(reversed(obj1_traceback._frames))
obj2_traceback._frames = tuple(reversed(obj2_traceback._frames))
trace1 = self.find_trace(traces, obj1_traceback)
trace2 = self.find_trace(traces, obj2_traceback)
domain1, size1, traceback1 = trace1
domain2, size2, traceback2 = trace2
self.assertIs(traceback2, traceback1)
def test_get_traced_memory(self):
# Python allocates some internals objects, so the test must tolerate
# a small difference between the expected size and the real usage
max_error = 2048
# allocate one object
obj_size = 1024 * 1024
tracemalloc.clear_traces()
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
self.assertGreaterEqual(peak_size, size)
self.assertLessEqual(size - obj_size, max_error)
self.assertLessEqual(peak_size - size, max_error)
# destroy the object
obj = None
size2, peak_size2 = tracemalloc.get_traced_memory()
self.assertLess(size2, size)
self.assertGreaterEqual(size - size2, obj_size - max_error)
self.assertGreaterEqual(peak_size2, peak_size)
# clear_traces() must reset traced memory counters
tracemalloc.clear_traces()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
# allocate another object
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
# stop() also resets traced memory counters
tracemalloc.stop()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
def test_clear_traces(self):
obj, obj_traceback = allocate_bytes(123)
traceback = tracemalloc.get_object_traceback(obj)
self.assertIsNotNone(traceback)
tracemalloc.clear_traces()
traceback2 = tracemalloc.get_object_traceback(obj)
self.assertIsNone(traceback2)
def test_is_tracing(self):
tracemalloc.stop()
self.assertFalse(tracemalloc.is_tracing())
tracemalloc.start()
self.assertTrue(tracemalloc.is_tracing())
def test_snapshot(self):
obj, source = allocate_bytes(123)
# take a snapshot
snapshot = tracemalloc.take_snapshot()
# write on disk
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
# load from disk
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.traces, snapshot.traces)
# tracemalloc must be tracing memory allocations to take a snapshot
tracemalloc.stop()
with self.assertRaises(RuntimeError) as cm:
tracemalloc.take_snapshot()
self.assertEqual(str(cm.exception),
"the tracemalloc module must be tracing memory "
"allocations to take a snapshot")
def test_snapshot_save_attr(self):
# take a snapshot with a new attribute
snapshot = tracemalloc.take_snapshot()
snapshot.test_attr = "new"
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
# load() should recreate the attribute
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.test_attr, "new")
def fork_child(self):
if not tracemalloc.is_tracing():
return 2
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
if traceback is None:
return 3
# everything is fine
return 0
@unittest.skipUnless(hasattr(os, 'fork'), 'need os.fork()')
def test_fork(self):
# check that tracemalloc is still working after fork
pid = os.fork()
if not pid:
# child
exitcode = 1
try:
exitcode = self.fork_child()
finally:
os._exit(exitcode)
else:
pid2, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
exitcode = os.WEXITSTATUS(status)
self.assertEqual(exitcode, 0)
class TestSnapshot(unittest.TestCase):
maxDiff = 4000
def test_create_snapshot(self):
raw_traces = [(0, 5, (('a.py', 2),))]
with contextlib.ExitStack() as stack:
stack.enter_context(patch.object(tracemalloc, 'is_tracing',
return_value=True))
stack.enter_context(patch.object(tracemalloc, 'get_traceback_limit',
return_value=5))
stack.enter_context(patch.object(tracemalloc, '_get_traces',
return_value=raw_traces))
snapshot = tracemalloc.take_snapshot()
self.assertEqual(snapshot.traceback_limit, 5)
self.assertEqual(len(snapshot.traces), 1)
trace = snapshot.traces[0]
self.assertEqual(trace.size, 5)
self.assertEqual(len(trace.traceback), 1)
self.assertEqual(trace.traceback[0].filename, 'a.py')
self.assertEqual(trace.traceback[0].lineno, 2)
def test_filter_traces(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.Filter(False, "b.py")
filter2 = tracemalloc.Filter(True, "a.py", 2)
filter3 = tracemalloc.Filter(True, "a.py", 5)
original_traces = list(snapshot.traces._traces)
# exclude b.py
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(3, 7, (('<unknown>', 0),)),
])
# filter_traces() must not touch the original snapshot
self.assertEqual(snapshot.traces._traces, original_traces)
# only include two lines of a.py
snapshot4 = snapshot3.filter_traces((filter2, filter3))
self.assertEqual(snapshot4.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
])
# No filter: just duplicate the snapshot
snapshot5 = snapshot.filter_traces(())
self.assertIsNot(snapshot5, snapshot)
self.assertIsNot(snapshot5.traces, snapshot.traces)
self.assertEqual(snapshot5.traces, snapshot.traces)
self.assertRaises(TypeError, snapshot.filter_traces, filter1)
def test_filter_traces_domain(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.Filter(False, "a.py", domain=1)
filter2 = tracemalloc.Filter(True, "a.py", domain=1)
original_traces = list(snapshot.traces._traces)
# exclude a.py of domain 1
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
])
# include domain 1
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
])
def test_filter_traces_domain_filter(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.DomainFilter(False, domain=3)
filter2 = tracemalloc.DomainFilter(True, domain=3)
# exclude domain 2
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
])
# include domain 2
snapshot3 = snapshot.filter_traces((filter2,))
self.assertEqual(snapshot3.traces._traces, [
(3, 7, (('<unknown>', 0),)),
])
def test_snapshot_group_by_line(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_lineno('<unknown>', 0)
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_c_578 = traceback_lineno('c.py', 578)
# stats per file and line
stats1 = snapshot.statistics('lineno')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
# stats per file and line (2)
stats2 = snapshot2.statistics('lineno')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a_5, 5002, 2),
tracemalloc.Statistic(tb_c_578, 400, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
])
# stats diff per file and line
statistics = snapshot2.compare_to(snapshot, 'lineno')
self.assertEqual(statistics, [
tracemalloc.StatisticDiff(tb_a_5, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb_c_578, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b_1, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb_a_2, 30, 0, 3, 0),
])
def test_snapshot_group_by_file(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_c = traceback_filename('c.py')
# stats per file
stats1 = snapshot.statistics('filename')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b, 66, 1),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
# stats per file (2)
stats2 = snapshot2.statistics('filename')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a, 5032, 5),
tracemalloc.Statistic(tb_c, 400, 1),
])
# stats diff per file
diff = snapshot2.compare_to(snapshot, 'filename')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb_a, 5032, 5000, 5, 1),
tracemalloc.StatisticDiff(tb_c, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
])
def test_snapshot_group_by_traceback(self):
snapshot, snapshot2 = create_snapshots()
# stats per file
tb1 = traceback(('a.py', 2), ('b.py', 4))
tb2 = traceback(('a.py', 5), ('b.py', 4))
tb3 = traceback(('b.py', 1))
tb4 = traceback(('<unknown>', 0))
stats1 = snapshot.statistics('traceback')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb3, 66, 1),
tracemalloc.Statistic(tb1, 30, 3),
tracemalloc.Statistic(tb4, 7, 1),
tracemalloc.Statistic(tb2, 2, 1),
])
# stats per file (2)
tb5 = traceback(('c.py', 578))
stats2 = snapshot2.statistics('traceback')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb2, 5002, 2),
tracemalloc.Statistic(tb5, 400, 1),
tracemalloc.Statistic(tb1, 30, 3),
])
# stats diff per file
diff = snapshot2.compare_to(snapshot, 'traceback')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb2, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb5, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb3, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb4, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb1, 30, 0, 3, 0),
])
self.assertRaises(ValueError,
snapshot.statistics, 'traceback', cumulative=True)
def test_snapshot_group_by_cumulative(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_b_4 = traceback_lineno('b.py', 4)
# per file
stats = snapshot.statistics('filename', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b, 98, 5),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
# per line
stats = snapshot.statistics('lineno', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_b_4, 32, 4),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
def test_trace_format(self):
snapshot, snapshot2 = create_snapshots()
trace = snapshot.traces[0]
self.assertEqual(str(trace), 'b.py:4: 10 B')
traceback = trace.traceback
self.assertEqual(str(traceback), 'b.py:4')
frame = traceback[0]
self.assertEqual(str(frame), 'b.py:4')
def test_statistic_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot.statistics('lineno')
stat = stats[0]
self.assertEqual(str(stat),
'b.py:1: size=66 B, count=1, average=66 B')
def test_statistic_diff_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot2.compare_to(snapshot, 'lineno')
stat = stats[0]
self.assertEqual(str(stat),
'a.py:5: size=5002 B (+5000 B), count=2 (+1), average=2501 B')
def test_slices(self):
snapshot, snapshot2 = create_snapshots()
self.assertEqual(snapshot.traces[:2],
(snapshot.traces[0], snapshot.traces[1]))
traceback = snapshot.traces[0].traceback
self.assertEqual(traceback[:2],
(traceback[0], traceback[1]))
def test_format_traceback(self):
snapshot, snapshot2 = create_snapshots()
def getline(filename, lineno):
return ' <%s, %s>' % (filename, lineno)
with unittest.mock.patch('tracemalloc.linecache.getline',
side_effect=getline):
tb = snapshot.traces[0].traceback
self.assertEqual(tb.format(),
[' File "b.py", line 4',
' <b.py, 4>',
' File "a.py", line 2',
' <a.py, 2>'])
self.assertEqual(tb.format(limit=1),
[' File "a.py", line 2',
' <a.py, 2>'])
self.assertEqual(tb.format(limit=-1),
[' File "b.py", line 4',
' <b.py, 4>'])
self.assertEqual(tb.format(most_recent_first=True),
[' File "a.py", line 2',
' <a.py, 2>',
' File "b.py", line 4',
' <b.py, 4>'])
self.assertEqual(tb.format(limit=1, most_recent_first=True),
[' File "a.py", line 2',
' <a.py, 2>'])
self.assertEqual(tb.format(limit=-1, most_recent_first=True),
[' File "b.py", line 4',
' <b.py, 4>'])
class TestFilters(unittest.TestCase):
maxDiff = 2048
def test_filter_attributes(self):
# test default values
f = tracemalloc.Filter(True, "abc")
self.assertEqual(f.inclusive, True)
self.assertEqual(f.filename_pattern, "abc")
self.assertIsNone(f.lineno)
self.assertEqual(f.all_frames, False)
# test custom values
f = tracemalloc.Filter(False, "test.py", 123, True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
# parameters passed by keyword
f = tracemalloc.Filter(inclusive=False, filename_pattern="test.py", lineno=123, all_frames=True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
# read-only attribute
self.assertRaises(AttributeError, setattr, f, "filename_pattern", "abc")
def test_filter_match(self):
# filter without line number
f = tracemalloc.Filter(True, "abc")
self.assertTrue(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc")
self.assertFalse(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
# filter with line number > 0
f = tracemalloc.Filter(True, "abc", 5)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 5)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
# filter with line number 0
f = tracemalloc.Filter(True, "abc", 0)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 0)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
def test_filter_match_filename(self):
def fnmatch(inclusive, filename, pattern):
f = tracemalloc.Filter(inclusive, pattern)
return f._match_frame(filename, 0)
self.assertTrue(fnmatch(True, "abc", "abc"))
self.assertFalse(fnmatch(True, "12356", "abc"))
self.assertFalse(fnmatch(True, "<unknown>", "abc"))
self.assertFalse(fnmatch(False, "abc", "abc"))
self.assertTrue(fnmatch(False, "12356", "abc"))
self.assertTrue(fnmatch(False, "<unknown>", "abc"))
def test_filter_match_filename_joker(self):
def fnmatch(filename, pattern):
filter = tracemalloc.Filter(True, pattern)
return filter._match_frame(filename, 0)
# empty string
self.assertFalse(fnmatch('abc', ''))
self.assertFalse(fnmatch('', 'abc'))
self.assertTrue(fnmatch('', ''))
self.assertTrue(fnmatch('', '*'))
# no *
self.assertTrue(fnmatch('abc', 'abc'))
self.assertFalse(fnmatch('abc', 'abcd'))
self.assertFalse(fnmatch('abc', 'def'))
# a*
self.assertTrue(fnmatch('abc', 'a*'))
self.assertTrue(fnmatch('abc', 'abc*'))
self.assertFalse(fnmatch('abc', 'b*'))
self.assertFalse(fnmatch('abc', 'abcd*'))
# a*b
self.assertTrue(fnmatch('abc', 'a*c'))
self.assertTrue(fnmatch('abcdcx', 'a*cx'))
self.assertFalse(fnmatch('abb', 'a*c'))
self.assertFalse(fnmatch('abcdce', 'a*cx'))
# a*b*c
self.assertTrue(fnmatch('abcde', 'a*c*e'))
self.assertTrue(fnmatch('abcbdefeg', 'a*bd*eg'))
self.assertFalse(fnmatch('abcdd', 'a*c*e'))
self.assertFalse(fnmatch('abcbdefef', 'a*bd*eg'))
# replace .pyc suffix with .py
self.assertTrue(fnmatch('a.pyc', 'a.py'))
self.assertTrue(fnmatch('a.py', 'a.pyc'))
if os.name == 'nt':
# case insensitive
self.assertTrue(fnmatch('aBC', 'ABc'))
self.assertTrue(fnmatch('aBcDe', 'Ab*dE'))
self.assertTrue(fnmatch('a.pyc', 'a.PY'))
self.assertTrue(fnmatch('a.py', 'a.PYC'))
else:
# case sensitive
self.assertFalse(fnmatch('aBC', 'ABc'))
self.assertFalse(fnmatch('aBcDe', 'Ab*dE'))
self.assertFalse(fnmatch('a.pyc', 'a.PY'))
self.assertFalse(fnmatch('a.py', 'a.PYC'))
if os.name == 'nt':
# normalize alternate separator "/" to the standard separator "\"
self.assertTrue(fnmatch(r'a/b', r'a\b'))
self.assertTrue(fnmatch(r'a\b', r'a/b'))
self.assertTrue(fnmatch(r'a/b\c', r'a\b/c'))
self.assertTrue(fnmatch(r'a/b/c', r'a\b\c'))
else:
# there is no alternate separator
self.assertFalse(fnmatch(r'a/b', r'a\b'))
self.assertFalse(fnmatch(r'a\b', r'a/b'))
self.assertFalse(fnmatch(r'a/b\c', r'a\b/c'))
self.assertFalse(fnmatch(r'a/b/c', r'a\b\c'))
# as of 3.5, .pyo is no longer munged to .py
self.assertFalse(fnmatch('a.pyo', 'a.py'))
def test_filter_match_trace(self):
t1 = (("a.py", 2), ("b.py", 3))
t2 = (("b.py", 4), ("b.py", 5))
t3 = (("c.py", 5), ('<unknown>', 0))
unknown = (('<unknown>', 0),)
f = tracemalloc.Filter(True, "b.py", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "b.py", all_frames=False)
self.assertFalse(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "<unknown>", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
class TestCommandLine(unittest.TestCase):
def test_env_var_disabled_by_default(self):
# not tracing by default
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
@unittest.skipIf(interpreter_requires_environment(),
'Cannot run -E tests when PYTHON env vars are required.')
def test_env_var_ignored_with_E(self):
"""PYTHON* environment variables must be ignored when -E is present."""
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-E', '-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
def test_env_var_enabled_at_startup(self):
# tracing at startup
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'True')
def test_env_limit(self):
# start and set the number of frames
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='10')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'10')
def check_env_var_invalid(self, nframe):
with support.SuppressCrashReport():
ok, stdout, stderr = assert_python_failure(
'-c', 'pass',
PYTHONTRACEMALLOC=str(nframe))
if b'ValueError: the number of frames must be in range' in stderr:
return
if b'PYTHONTRACEMALLOC: invalid number of frames' in stderr:
return
self.fail(f"unexpeced output: {stderr!a}")
def test_env_var_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
self.check_env_var_invalid(nframe)
def test_sys_xoptions(self):
for xoptions, nframe in (
('tracemalloc', 1),
('tracemalloc=1', 1),
('tracemalloc=15', 15),
):
with self.subTest(xoptions=xoptions, nframe=nframe):
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-X', xoptions, '-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, str(nframe).encode('ascii'))
def check_sys_xoptions_invalid(self, nframe):
args = ('-X', 'tracemalloc=%s' % nframe, '-c', 'pass')
with support.SuppressCrashReport():
ok, stdout, stderr = assert_python_failure(*args)
if b'ValueError: the number of frames must be in range' in stderr:
return
if b'-X tracemalloc=NFRAME: invalid number of frames' in stderr:
return
self.fail(f"unexpeced output: {stderr!a}")
def test_sys_xoptions_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
self.check_sys_xoptions_invalid(nframe)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_pymem_alloc0(self):
# Issue #21639: Check that PyMem_Malloc(0) with tracemalloc enabled
# does not crash.
code = 'import _testcapi; _testcapi.test_pymem_alloc0(); 1'
assert_python_ok('-X', 'tracemalloc', '-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
class TestCAPI(unittest.TestCase):
maxDiff = 80 * 20
def setUp(self):
if tracemalloc.is_tracing():
self.skipTest("tracemalloc must be stopped before the test")
self.domain = 5
self.size = 123
self.obj = allocate_bytes(self.size)[0]
# for the type "object", id(obj) is the address of its memory block.
# This type is not tracked by the garbage collector
self.ptr = id(self.obj)
def tearDown(self):
tracemalloc.stop()
def get_traceback(self):
frames = _testcapi.tracemalloc_get_traceback(self.domain, self.ptr)
if frames is not None:
return tracemalloc.Traceback(frames)
else:
return None
def track(self, release_gil=False, nframe=1):
frames = get_frames(nframe, 2)
_testcapi.tracemalloc_track(self.domain, self.ptr, self.size,
release_gil)
return frames
def untrack(self):
_testcapi.tracemalloc_untrack(self.domain, self.ptr)
def get_traced_memory(self):
# Get the traced size in the domain
snapshot = tracemalloc.take_snapshot()
domain_filter = tracemalloc.DomainFilter(True, self.domain)
snapshot = snapshot.filter_traces([domain_filter])
return sum(trace.size for trace in snapshot.traces)
def check_track(self, release_gil):
nframe = 5
tracemalloc.start(nframe)
size = tracemalloc.get_traced_memory()[0]
frames = self.track(release_gil, nframe)
self.assertEqual(self.get_traceback(),
tracemalloc.Traceback(frames))
self.assertEqual(self.get_traced_memory(), self.size)
def test_track(self):
self.check_track(False)
def test_track_without_gil(self):
# check that calling _PyTraceMalloc_Track() without holding the GIL
# works too
self.check_track(True)
def test_track_already_tracked(self):
nframe = 5
tracemalloc.start(nframe)
# track a first time
self.track()
# calling _PyTraceMalloc_Track() must remove the old trace and add
# a new trace with the new traceback
frames = self.track(nframe=nframe)
self.assertEqual(self.get_traceback(),
tracemalloc.Traceback(frames))
def test_untrack(self):
tracemalloc.start()
self.track()
self.assertIsNotNone(self.get_traceback())
self.assertEqual(self.get_traced_memory(), self.size)
# untrack must remove the trace
self.untrack()
self.assertIsNone(self.get_traceback())
self.assertEqual(self.get_traced_memory(), 0)
# calling _PyTraceMalloc_Untrack() multiple times must not crash
self.untrack()
self.untrack()
def test_stop_track(self):
tracemalloc.start()
tracemalloc.stop()
with self.assertRaises(RuntimeError):
self.track()
self.assertIsNone(self.get_traceback())
def test_stop_untrack(self):
tracemalloc.start()
self.track()
tracemalloc.stop()
with self.assertRaises(RuntimeError):
self.untrack()
def test_main():
support.run_unittest(
TestTracemallocEnabled,
TestSnapshot,
TestFilters,
TestCommandLine,
TestCAPI,
)
if __name__ == "__main__":
test_main()
|
FFMG/myoddweb.piger
|
monitor/api/python/Python-3.7.2/Lib/test/test_tracemalloc.py
|
Python
|
gpl-2.0
| 37,313
|
import logging; logger = logging.getLogger("morse." + __name__)
import socket
import select
import json
import morse.core.middleware
from functools import partial
from morse.core import services
class MorseSocketServ:
def __init__(self, port, component_name):
# List of socket clients
self._client_sockets = []
self._message_size = 1024
self._component_name = component_name
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._server.bind((str(socket.INADDR_ANY), port))
self._server.listen(1)
logger.info("Socket Mw Server now listening on port " + str(port) + \
" for component " + str(component_name) + ".")
def __del__(self):
""" Terminate the ports used to accept requests """
if self._client_sockets:
logger.info("Closing client sockets...")
for s in self._client_sockets:
s.close()
if self._server:
logger.info("Shutting down connections to server...")
self._server.shutdown(socket.SHUT_RDWR)
logger.info("Closing socket server...")
self._server.close()
del self._server
def main_export(self, encode, component_instance):
sockets = self._client_sockets + [self._server]
try:
inputready, outputready, exceptready = select.select(sockets, sockets, [], 0)
except select.error:
pass
except socket.error:
pass
if self._server in inputready:
sock, addr = self._server.accept()
self._client_sockets.append(sock)
if outputready != []:
message = encode(component_instance)
for o in outputready:
try:
o.send(message)
except socket.error:
self.close_socket(o)
def main_read(self, decode, component_instance):
sockets = self._client_sockets + [self._server]
try:
inputready, outputready, exceptready = select.select(sockets, [], [], 0)
except select.error:
pass
except socket.error:
pass
for i in inputready:
if i == self._server:
sock, addr = self._server.accept()
if self._client_sockets != []:
logger.warning("More than one clients for an actuator!!")
self._client_sockets.append(sock)
else:
try:
msg = i.recv(self._message_size)
logger.debug("received msg %s" % msg)
if msg == b'':
self.close_socket(i)
else:
component_instance.local_data = decode(msg)
except socket.error as detail:
self.close_socket(i)
def close_socket(self, sock):
self._client_sockets.remove(sock)
try:
sock.close()
except socket.error as error_info:
logger.warning("Socket error catched while closing: " + str(error_info))
class MorseSocketClass(morse.core.middleware.MorseMiddlewareClass):
""" External communication using sockets. """
def __init__(self):
""" Initialize the socket connections """
# Call the constructor of the parent class
super(self.__class__,self).__init__()
# port -> MorseSocketServ
self._server_dict = {}
# component name (string) -> Port (int)
self._component_nameservice = {}
self._base_port = 60000
# Register two special services in the socket service manager:
# TODO To use a new special component instead of 'simulation',
# uncomment the line :-)
# bge.logic.morse_services.register_request_manager_mapping("streams", "SocketRequestManager")
services.do_service_registration(self.list_streams, 'simulation')
services.do_service_registration(self.get_stream_port, 'simulation')
services.do_service_registration(self.get_all_stream_ports, 'simulation')
def list_streams(self):
""" List all publish streams.
"""
return list(self._component_nameservice.keys())
def get_stream_port(self, name):
""" Get stream port for stream name.
"""
port = -1
try:
port = self._component_nameservice[name]
except KeyError:
pass
return port
def get_all_stream_ports(self):
""" Get stream ports for all streams.
"""
return self._component_nameservice
def register_component(self, component_name, component_instance, mw_data):
""" Open the port used to communicate by the specified component.
"""
# Create a socket server for this component
serv = MorseSocketServ(self._base_port, component_name)
self._server_dict[self._base_port] = serv
self._component_nameservice[component_name] = self._base_port
self._base_port = self._base_port + 1
# Extract the information for this middleware
# This will be tailored for each middleware according to its needs
function_name = mw_data[1]
fun = self._check_function_exists(function_name)
if fun != None:
# Choose what to do, depending on the function being used
# Data read functions
if function_name == "read_message":
component_instance.input_functions.append(partial(MorseSocketServ.main_read, serv, fun))
# Data write functions
elif function_name == "post_message":
component_instance.output_functions.append(partial(MorseSocketServ.main_export, serv, fun))
# If the function is external and has already been loaded before
else:
# Pass by mw_data the generated server
mw_data.append(serv)
self._add_method(mw_data, component_instance)
else:
# Pass by mw_data the generated server
mw_data.append(serv)
self._add_method(mw_data, component_instance)
def post_message(self, component_instance):
return (json.dumps(component_instance.local_data) + '\n').encode()
def read_message(self, msg):
return json.loads(msg.decode('utf-8'))
def print_open_sockets(self):
""" Display a list of all currently opened sockets."""
logger.info("Socket Mid: Currently opened sockets:")
for name, socket in self._socket_dict.iteritems():
logger.info(" - Port name '{0}' = '{1}'".format(name, socket))
|
Arkapravo/morse-0.6
|
src/morse/middleware/socket_mw.py
|
Python
|
bsd-3-clause
| 6,797
|
import datetime
import re
from fractions import Fraction
BUILD_VERSION = '1.0.0'
SQLALCHEMY_DATABASE_PASSWORD = ''
SQLALCHEMY_DATABASE_URI = ''
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_POOL_RECYCLE = 540
SMTP_SERVER = ''
SMTP_USERNAME = ''
SMTP_PASSWORD = ''
SMTP_PORT = ''
SMTP_ADDRESS = ''
ES_HOSTS = ['es1']
ES_AUTH = {}
BROKER_URL = 'redis://redis:6379/0'
REDIS_HOST = 'redis'
REDIS_PORT = '6379'
REDIS_DB = '1'
REDIS_ENABLED = 'false'
USER_TOKEN_LIFETIME = datetime.timedelta(days=60)
DB_SECRET_KEY = 'demosecretkey'
GCLOUD_REGISTRATION_TOKEN_LIFETIME = datetime.timedelta(hours=1)
TOKEN_SECRET = 'JWT Token Secret String'
GOOGLE_SECRET = ''
GOOGLE_CLIENT_ID = ''
GOOGLE_RECAPTCHA_SECRET = ''
GOOGLE_RECAPTCHA_SITE_KEY = ''
HOST_DOMAIN = 'api.trackit.io'
WEB_UI_HOST = 'trackit.io'
OAUTH_URIS = {
'auth_google_initiate': '/auth/google/initiate',
'auth_google_callback': '/auth/google/callback',
'key_registration_google_callback': '/gcloud/identity/callback'
}
GOOGLE_OAUTH = {
'client_id': '',
'client_secret': ''
}
AZURE_SUBSCRIPTION_ID = ''
AZURE_AD_USERNAME = ''
AZURE_AD_PASSWORD = ''
CONTACT_USER_EMAIL = ''
NEW_USER_EMAIL = ''
BUG_NOTIFICATION_EMAIL = ''
DEVELOPMENT = 'false'
EMAIL_TEMPLATE_DIR = '/usr/trackit/templates'
CLIENT_BILLING_BUCKET = ''
IMPORT_BILLING_AWS_KEY = ''
IMPORT_BILLING_AWS_SECRET = ''
LOCAL_BILLS_DIR = '/root/api/.csv/'
ACCOUNT_DEFAULT_ENABLED = 'true'
ACCOUNT_DEFAULT_CREDENTIALS = {
'email': u'admin',
'password': u'admin',
'firstname': u'Admin',
'lastname': u'Admin',
'admin': True,
}
BILLING_FILE_REGEX = re.compile(
r'(?:^|/)(?P<basename>(?P<account_id>\d+)'
r'-aws-billing-detailed-line-items-with-resources-and-tags-'
r'(?P<date>\d{4}-\d{2})\.csv\.zip)$'
)
import imp
import sys
import os
import json
#TODO: Handle the more complex cases.
options = [
"BUILD_VERSION",
"SQLALCHEMY_DATABASE_PASSWORD",
"SQLALCHEMY_DATABASE_URI",
"SQLALCHEMY_POOL_RECYCLE",
"TOKEN_SECRET",
"DB_SECRET_KEY",
"SMTP_SERVER",
"SMTP_USERNAME",
"SMTP_PASSWORD",
"SMTP_PORT",
"SMTP_ADDRESS",
"AZURE_SUBSCRIPTION_ID",
"AZURE_AD_USERNAME",
"AZURE_AD_PASSWORD",
"CONTACT_USER_EMAIL",
"NEW_USER_EMAIL",
"BUG_NOTIFICATION_EMAIL",
"ACCOUNT_DEFAULT_ENABLED",
"ES_HOSTS",
"ES_AUTH",
"BROKER_URL",
"TOKEN_SECRET",
"GOOGLE_SECRET",
"GOOGLE_CLIENT_ID",
"GOOGLE_OAUTH",
"GOOGLE_RECAPTCHA_SECRET",
"GOOGLE_RECAPTCHA_SITE_KEY",
"HOST_DOMAIN",
"WEB_UI_HOST",
"OAUTH_URIS",
"GOOGLE_OAUTH",
"DEVELOPMENT",
"REDIS_HOST",
"REDIS_PORT",
"REDIS_DB",
"REDIS_ENABLED",
"CLIENT_BILLING_BUCKET",
"IMPORT_BILLING_AWS_KEY",
"IMPORT_BILLING_AWS_SECRET",
]
config_module = sys.modules[__name__]
for option in options:
env_option = ('TRACKIT_%s' % option)
if env_option in os.environ:
if isinstance(getattr(config_module, option), str):
setattr(config_module, option, os.environ[env_option])
else:
try:
setattr(sys.modules[__name__], option, json.loads(os.environ[env_option]))
except ValueError:
pass
try:
config_overrides = imp.load_source('config_overrides', '/etc/trackitio.py')
for config_name in dir(config_overrides):
if config_name.upper() == config_name:
setattr(sys.modules[__name__], config_name, getattr(config_overrides, config_name))
except IOError:
pass
HOST = "%s" % (HOST_DOMAIN, )
if not SQLALCHEMY_DATABASE_URI:
SQLALCHEMY_DATABASE_URI = 'mysql+mysqldb://root:%s@mysql/trackitio?charset=utf8' % SQLALCHEMY_DATABASE_PASSWORD
if DEVELOPMENT.lower() in ['y', 'yes', '1', 't', 'true', 'on', 'o', 'oui']:
DEVELOPMENT = True
else:
DEVELOPMENT = False
if REDIS_ENABLED.lower() in ['y', 'yes', '1', 't', 'true', 'on', 'o', 'oui']:
REDIS_ENABLED = True
else:
REDIS_ENABLED = False
if ACCOUNT_DEFAULT_ENABLED.lower() in ['y', 'yes', '1', 't', 'true', 'on', 'o', 'oui']:
ACCOUNT_DEFAULT_ENABLED = True
else:
ACCOUNT_DEFAULT_ENABLED = False
for option in options:
print('{}={}'.format(option, getattr(config_module, option)))
if not len(IMPORT_BILLING_AWS_KEY) or not len(IMPORT_BILLING_AWS_SECRET):
IMPORT_BILLING_AWS_KEY = None
IMPORT_BILLING_AWS_SECRET = None
if not len(CLIENT_BILLING_BUCKET):
CLIENT_BILLING_BUCKET = None
if not len(SMTP_ADDRESS):
SMTP_ADDRESS = None
|
giubil/trackit
|
api/files/api/config.py
|
Python
|
apache-2.0
| 4,470
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
|
FederatedAI/FATE
|
examples/pipeline/data_transform/pipeline-data-transform-dense.py
|
Python
|
apache-2.0
| 2,334
|
import os, sys
if sys.platform == 'darwin':
# Setup sys.path so we can build and test a built version without
# installing in the system site-packages, which reqire root access.
builddir = [os.path.join('build', name) for name in os.listdir('build')
if name.startswith('lib.%s' % sys.platform)]
sys.path = builddir + sys.path
|
nirs/pyfribidi
|
syspath.py
|
Python
|
gpl-2.0
| 360
|
#########################################################################
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#########################################################################
import os, sys, re, string, math, datetime, time, pkgutil
import common_params as cp
import specific_params as sp
import common_functions as cf
output_format = []
for cat in range(cp.NUM_CATS-1):
output_format.append(cp.CAT_STR[cat])
workbook = ""
fname_prefix = ""
results_app_table = {} # app, igid, bfm, outcome,
num_injections_app_table = {} # app, igid, bfm, num_injections
runtime_app_table = {} # app, igid, bfm, runtime
runtime_app_nt_table = {} # app, igid, bfm, runtime without Timeouts
results_kname_table = {} # app, kname, igid, bfm, outcome,
results_kiid_table = {} # app, kname, kid, igid, bfm, outcome,
def check_and_create_nested_dict(dict_name, k1, k2, k3, k4="", k5="", k6=""):
if k1 not in dict_name:
dict_name[k1] = {}
if k2 not in dict_name[k1]:
dict_name[k1][k2] = {}
if k3 not in dict_name[k1][k2]:
dict_name[k1][k2][k3] = 0 if k4 == "" else {}
if k4 == "":
return
if k4 not in dict_name[k1][k2][k3]:
dict_name[k1][k2][k3][k4] = 0 if k5 == "" else {}
if k5 == "":
return
if k5 not in dict_name[k1][k2][k3][k4]:
dict_name[k1][k2][k3][k4][k5] = 0 if k6 == "" else {}
if k6 == "":
return
if k6 not in dict_name[k1][k2][k3][k4][k5]:
dict_name[k1][k2][k3][k4][k5][k6] = 0
###############################################################################
# Add the sassifi injection result to the results*table dictionary
###############################################################################
def add(app, kname, kiid, igid, bfm, outcome, runtime):
check_and_create_nested_dict(results_app_table, app, igid, bfm, outcome)
results_app_table[app][igid][bfm][outcome] += 1
check_and_create_nested_dict(num_injections_app_table, app, igid, bfm)
num_injections_app_table[app][igid][bfm] += 1
check_and_create_nested_dict(runtime_app_table, app, igid, bfm)
runtime_app_table[app][igid][bfm] += runtime
if outcome != cp.TIMEOUT:
check_and_create_nested_dict(runtime_app_nt_table, app, igid, bfm)
runtime_app_nt_table[app][igid][bfm] += runtime
check_and_create_nested_dict(results_kname_table, app, kname, igid, bfm, outcome)
results_kname_table[app][kname][igid][bfm][outcome] += 1
check_and_create_nested_dict(results_kiid_table, app, kname, kiid, igid, bfm, outcome)
results_kiid_table[app][kname][kiid][igid][bfm][outcome] += 1
###############################################################################
# inst_fraction contains the fraction of IADD, FADD, IMAD, FFMA, ISETP, etc.
# instructions per application
###############################################################################
inst_fraction = {}
inst_count = {}
def populate_inst_fraction():
global inst_fraction
for app in results_app_table:
inst_counts = cf.get_total_counts(cf.read_inst_counts(sp.app_dir[app], app))
total = cf.get_total_insts(cf.read_inst_counts(sp.app_dir[app], app), False)
inst_fraction[app] = [total] + [1.0*i/total for i in inst_counts]
inst_count[app] = inst_counts
###############################################################################
# Print instruction distribution to a worksheet in the xlsx file
###############################################################################
def print_inst_fractions_worksheet():
worksheet = workbook.add_worksheet("Instruction Fractions")
row = 0
worksheet.write_row(row, 0, ["App", "Total"] + cf.get_inst_count_format().split(':')[2:])
for app in inst_fraction:
row += 1
worksheet.write(row, 0, app)
worksheet.write_row(row, 1, inst_fraction[app])
###############################################################################
# Print instruction distribution to a txt file
###############################################################################
def print_inst_fractions_txt():
f = open(fname_prefix + "instruction-fractions.txt", "w")
f.write("\t".join(["App", "Total"] + cf.get_inst_count_format().split(':')[2:]) + "\n")
for app in inst_fraction:
f.write("\t".join([app] + map(str, inst_fraction[app])) + "\n")
f.close()
def parse_results_file(app, inj_mode, igid, bfm):
try:
rf = open(sp.app_log_dir[app] + "results-mode" + inj_mode + "-igid" + str(igid) + ".bfm" + str(bfm) + "." + str(sp.NUM_INJECTIONS) + ".txt", "r")
except IOError:
print "Error opening file: " + sp.app_log_dir[app] + "results-mode" + inj_mode + "-igid" + str(igid) + ".bfm" + str(bfm) + "." + str(sp.NUM_INJECTIONS) + ".txt"
print "It is possible that no injections were performed for app=%s, inj_mode=%s, igid=%s, bfm=%s " %(app, inj_mode, str(igid), str(bfm))
return
num_lines = 0
for line in rf: # for each injection site
#Example line: _Z22bpnn_layerforward_CUDAPfS_S_S_ii-0-26605491-0.506809798834-0.560204950825:..:MOV:773546:17:0.759537:3:dmesg:value_before_value_after,
#kname-kcount-iid-opid-bid:pc:opcode:tid:injBID:runtime_sec:outcome_category:dmesg:value_before:value_after
words = line.split(":")
inj_site_info = words[0].split("-")
[kname, invocation_index, opcode, injBID, runtime, outcome] = [inj_site_info[0], int(inj_site_info[1]), words[2], int(words[4]), float(words[5]), int(words[6])]
if igid == "rf":
add(app, kname, invocation_index, igid, bfm, outcome, runtime)
else:
if opcode != "":
add(app, kname, invocation_index, igid, bfm, outcome, runtime)
num_lines += 1
rf.close()
if num_lines == 0 and app in results_app_table and os.stat(sp.app_log_dir[app] + "injection-list/mode" + inj_mode + "-igid" + str(igid) + ".bfm" + str(bfm) + "." + str(sp.NUM_INJECTIONS) + ".txt").st_size != 0:
print "%s, inj_mode=%s, igid=%d, bfm=%d not done" %(app, inj_mode, igid, bfm)
###################################################################################
# Parse results files and populate summary to results table
###################################################################################
def parse_results_apps(typ):
for app in sp.parse_apps:
if typ == cp.INST_VALUE_MODE:
for igid in sp.parse_inst_value_igid_bfm_map:
for bfm in sp.parse_inst_value_igid_bfm_map[igid]:
parse_results_file(app, typ, igid, bfm)
elif typ == cp.INST_ADDRESS_MODE:
for igid in sp.parse_inst_address_igid_bfm_map:
for bfm in sp.parse_inst_address_igid_bfm_map[igid]:
parse_results_file(app, typ, igid, bfm)
else:
for bfm in sp.parse_rf_bfm_list:
parse_results_file(app, typ, "rf", bfm)
###############################################################################
# Convert a dictionary to list
# input: d (dictionary), s (size)
###############################################################################
def to_list(d, s):
# if a specific category is not found then make it zero
l = []
for i in range(1,s-1):
if i not in d:
d[i] = 0
l.append(d[i])
return l
###############################################################################
# Helper function
###############################################################################
def get_igid_list(inj_mode):
if inj_mode == cp.INST_VALUE_MODE:
return sp.parse_inst_value_igid_bfm_map
elif inj_mode == cp.INST_ADDRESS_MODE:
return sp.parse_inst_address_igid_bfm_map
else: # if inj_mode == cp.RF_MODE:
return ["rf"]
def get_bfm_list(inj_mode, igid):
if inj_mode == cp.INST_VALUE_MODE:
return sp.parse_inst_value_igid_bfm_map[igid]
elif inj_mode == cp.INST_ADDRESS_MODE:
return sp.parse_inst_address_igid_bfm_map[igid]
else: # if inj_mode == cp.RF_MODE:
return sp.parse_rf_bfm_list
def get_igid_str(inj_mode, igid):
if inj_mode == cp.INST_VALUE_MODE or inj_mode == cp.INST_ADDRESS_MODE:
return cp.IGID_STR[igid]
else: # if inj_mode == cp.RF_MODE:
return "rf"
###############################################################################
# Print Stats to a worksheet in the xlsx file
###############################################################################
def print_stats_worksheet(typ):
ws2 = workbook.add_worksheet("Stats")
ws2.write_row(0, 0, ["App", "IGID", "Injection Model", "Num Jobs", "Total Runtime", "Total Runtime without Timeouts"])
row = 1
for app in num_injections_app_table:
ws2.write(row, 0, app)
igid_list = get_igid_list(typ)
for igid in igid_list:
ws2.write(row, 1, get_igid_str(typ, igid))
bfm_list = get_bfm_list(typ, igid)
for bfm in bfm_list:
if igid in num_injections_app_table[app]:
if bfm in num_injections_app_table[app][igid]:
ws2.write_row(row, 2, [cp.EM_STR[bfm], num_injections_app_table[app][igid][bfm], runtime_app_table[app][igid][bfm], runtime_app_nt_table[app][igid][bfm]])
else:
ws2.write_row(row, 2, [cp.EM_STR[bfm], 0, 0])
row += 1
###############################################################################
# Print Stats to a txt file
###############################################################################
def print_stats_txt(typ):
f = open(fname_prefix + "stats.txt", "w")
f.write("\t".join(["App", "IGID", "Injection Model", "Num Jobs", "Total Runtime", "Total Runtime without Timeouts"]) + "\n")
for app in num_injections_app_table:
f.write(app + "\t")
igid_list = get_igid_list(typ)
for igid in igid_list:
f.write(get_igid_str(typ, igid) + "\t")
bfm_list = get_bfm_list(typ, igid)
for bfm in bfm_list:
if igid in num_injections_app_table[app]:
if bfm in num_injections_app_table[app][igid]:
f.write("\t".join([cp.EM_STR[bfm], str(num_injections_app_table[app][igid][bfm]), str(runtime_app_table[app][igid][bfm]), str(runtime_app_nt_table[app][igid][bfm])]) + "\n")
else:
f.write("\t".join([cp.EM_STR[bfm], "0", "0"] + "\n"))
f.close()
###############################################################################
# Print detailed SASSIFI Results for analysis to a worksheet in a text file
###############################################################################
def print_detailed_sassifi_results_txt(typ):
f = open(fname_prefix + "SASSIFI_details.txt", "w")
f.write("\t".join(["App", "IGID", "Injection Model"] + output_format) + "\n")
for app in results_app_table:
f.write(app + "\t") # write app name
igid_list = get_igid_list(typ)
for igid in igid_list:
f.write(get_igid_str(typ, igid) + "\t")
bfm_list = get_bfm_list(typ, igid)
for bfm in bfm_list:
written = False
if igid in results_app_table[app]:
if bfm in results_app_table[app][igid]:
f.write("\t".join([cp.EM_STR[bfm]] + map(str,to_list(results_app_table[app][igid][bfm], cp.NUM_CATS))) + "\n")
written = True
if not written:
f.write("\t".join([cp.EM_STR[bfm]] + map(str,to_list({}, cp.NUM_CATS))))
f.close()
###############################################################################
# Print detailed SASSIFI Results for analysis to a worksheet in the xlsx file
###############################################################################
def print_detailed_sassifi_results_worksheet(typ):
ws0 = workbook.add_worksheet("SASSIFI Details")
ws0.write_row(0, 0, ["App", "IGID", "Injection Model"] + output_format)
row0 = 1
for app in results_app_table:
ws0.write(row0, 0, app) # write app name
igid_list = get_igid_list(typ)
for igid in igid_list:
ws0.write(row0, 1, get_igid_str(typ, igid))
bfm_list = get_bfm_list(typ, igid)
for bfm in bfm_list:
written = False
if igid in results_app_table[app]:
if bfm in results_app_table[app][igid]:
ws0.write_row(row0, 2, [cp.EM_STR[bfm]] + to_list(results_app_table[app][igid][bfm], cp.NUM_CATS))
row0 += 1
written = True
if not written:
ws0.write_row(row0, 2, [cp.EM_STR[bfm]] + to_list({}, cp.NUM_CATS))
row0 += 1
###############################################################################
# Print detailed SASSIFI Results on per kernel basis for analysis to a
# worksheet in the xlsx file
###############################################################################
def print_detailed_sassifi_kernel_results_worksheet(typ):
ws0 = workbook.add_worksheet("SASSIFI Kernel Details")
ws0.write_row(0, 0, ["App", "kernel", "IGID", "Injection Model"] + output_format)
row0 = 1
for app in results_kname_table:
ws0.write(row0, 0, app) # write app name
for kname in results_kname_table[app]:
ws0.write(row0, 1, kname) # write app name
igid_list = get_igid_list(typ)
for igid in igid_list:
ws0.write(row0, 2, get_igid_str(typ, igid))
bfm_list = get_bfm_list(typ, igid)
for bfm in bfm_list:
written = False
if igid in results_kname_table[app][kname]:
if bfm in results_kname_table[app][kname][igid]:
ws0.write_row(row0, 3, [cp.EM_STR[bfm]] + to_list(results_kname_table[app][kname][igid][bfm], cp.NUM_CATS))
row0 += 1
written = True
if not written:
ws0.write_row(row0, 3, [cp.EM_STR[bfm]] + to_list({}, cp.NUM_CATS))
row0 += 1
def print_usage():
print "Usage: \n python parse_results.py rf/inst_value/inst_address"
exit(1)
###############################################################################
# Main function that processes files, analyzes results and prints them to an
# xlsx file
###############################################################################
def main():
if len(sys.argv) != 2:
print_usage()
inj_type = sys.argv[1] # inst_value or inst_address or rf
parse_results_apps(inj_type) # parse sassifi results into local data structures
# populate instruction fractions
if inj_type == "inst_value" or inj_type == "inst_address":
populate_inst_fraction()
if pkgutil.find_loader('xlsxwriter') is not None:
import xlsxwriter
workbook_name = sp.logs_base_dir + "results/results_" + inj_type + "_" + str(sp.NUM_INJECTIONS) + ".xlsx"
os.system("rm -f " + workbook_name)
global workbook
workbook = xlsxwriter.Workbook(workbook_name)
if inj_type == "inst_value" or inj_type == "inst_address":
print_inst_fractions_worksheet()
print_detailed_sassifi_results_worksheet(sys.argv[1])
print_detailed_sassifi_kernel_results_worksheet(sys.argv[1])
print_stats_worksheet(sys.argv[1])
workbook.close()
print "Results: %s" %workbook_name
else:
global fname_prefix
fname_prefix = sp.logs_base_dir + "results/results_" + inj_type + "_" + str(sp.NUM_INJECTIONS) + "_"
if inj_type == "inst_value" or inj_type == "inst_address":
print_inst_fractions_txt()
print_detailed_sassifi_results_txt(sys.argv[1])
print_stats_txt(sys.argv[1])
print "Results: %s" %(sp.logs_base_dir + "results/")
if __name__ == "__main__":
main()
|
NVlabs/sassifi
|
scripts/parse_results.py
|
Python
|
bsd-3-clause
| 16,150
|
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2013, 2016. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
import ssl
import requests
def get_unverified_context():
"""
Get an unverified ssl context. Used to disable the server certificate
verification.
@return: unverified ssl context.
"""
context = None
if hasattr(ssl, '_create_unverified_context'):
context = ssl._create_unverified_context()
return context
def get_unverified_session():
"""
Get a requests session with cert verification disabled.
Also disable the insecure warnings message.
Note this is not recommended in production code.
@return: a requests session with verification disabled.
"""
session = requests.session()
session.verify = False
requests.packages.urllib3.disable_warnings()
return session
|
pgbidkar/vsphere-automation-sdk-python
|
samples/vsphere/common/ssl_helper.py
|
Python
|
mit
| 1,311
|
# -*- coding: utf-8 -*-
from ..config import ARGS_ALWAYS
from .logger import logger
def init_cmd(args):
cmd = [args.executor_path, args.subcommand] \
+ ARGS_ALWAYS.get(args.subcommand, [])
logger.debug("patch_through_args: %s", args.patch_through_args)
for pt_arg in args.patch_through_args:
if pt_arg not in cmd:
cmd.append(pt_arg)
return cmd
|
joernhees/userdocker
|
userdocker/helpers/cmd.py
|
Python
|
mit
| 393
|
# This file is part of cldoc. cldoc is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from .cclass import Class
from ..clang import cindex
class Struct(Class):
kind = cindex.CursorKind.STRUCT_DECL
def __init__(self, cursor, comment):
Class.__init__(self, cursor, comment)
self.typedef = None
self.current_access = cindex.AccessSpecifier.PUBLIC
@property
def is_anonymous(self):
return self.anonymous_id > 0 and self.typedef is None
@property
def comment(self):
ret = Class.comment.fget(self)
if not ret and self.typedef:
ret = self.typedef.comment
return ret
@property
def name(self):
if not self.typedef is None:
# The name is really the one of the typedef
return self.typedef.name
else:
return Class.name.fget(self)
@property
def force_page(self):
return not self.is_anonymous
# vi:ts=4:et
|
jessevdk/cldoc
|
cldoc/nodes/cstruct.py
|
Python
|
gpl-2.0
| 1,548
|
import Numeric
import visual
class wiresphere:
def __init__(self,density):
self.density=density
self.stack_increment = Numeric.pi /(density+1)
self.slice_increment = Numeric.pi /density
def getstacks(self):
stackAngle = self.stack_increment
wireStack=[0]*self.density
for i in range(self.density):
sliceAngle = 0.0;
newstack = [0]*self.density
for j in range(self.density):
newstackvect = (Numeric.sin(stackAngle) * Numeric.sin(sliceAngle),
Numeric.sin(stackAngle) * Numeric.cos(sliceAngle),
Numeric.cos(stackAngle))
newstack[j] = newstackvect
sliceAngle -= self.slice_increment * 2.0
newstack.append(newstack[0])
wireStack[i] = newstack
stackAngle +=self.stack_increment
return Numeric.array(wireStack,'d')
def getslices(self):
sliceAngle = 0.0;
wireSlice=[0]*self.density
for i in range(self.density):
stackAngle = 0.0
newslice=[1]*(self.density+1)
for j in range(self.density+1):
newslicevect=visual.vector(Numeric.sin(stackAngle) * Numeric.sin(sliceAngle),
Numeric.sin(stackAngle) * Numeric.cos(sliceAngle),
Numeric.cos(stackAngle))
newslice[j] =newslicevect
stackAngle -= self.slice_increment
wireSlice[i] = newslice
sliceAngle += self.slice_increment * 2.0
return Numeric.array(wireSlice,'d')
|
eeue56/PyGeo2
|
pygeo/base/wiresphere.py
|
Python
|
gpl-2.0
| 1,555
|
from qgis.PyQt.QtGui import QIcon
from qgis.core import QgsProcessingProvider
from .carbon import TCSummary
from .utilities import GenerateMask, ClipRaster
class Provider(QgsProcessingProvider):
def loadAlgorithms(self, *args, **kwargs):
self.addAlgorithm(TCSummary())
self.addAlgorithm(GenerateMask())
self.addAlgorithm(ClipRaster())
def id(self, *args, **kwargs):
return 'trendsearth'
def name(self, *args, **kwargs):
return self.tr('Trends.Earth')
def icon(self):
"""Should return a QIcon which is used for your provider inside
the Processing toolbox.
"""
return QIcon(':/plugins/LDMP/trends_earth_logo_square_32x32.png')
|
ConservationInternational/ldmp-qgis-plugin
|
LDMP/processing_provider/provider.py
|
Python
|
gpl-2.0
| 720
|
from web3.utils.functional import compose
def test_composition_no_functions():
fn = compose()
assert fn(5) == 5
def test_composition_single_function():
def fn(x):
return x * 2
assert compose(fn)(5) == 10
def test_composition_multiple_function():
def fn(x):
return x + 1
assert compose(fn, fn, fn)(5) == 8
|
shravan-shandilya/web3.py
|
tests/utilities/test_functional_tools.py
|
Python
|
mit
| 353
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'plasmoggl_config.ui'
#
# Created: Sun Mar 8 19:52:06 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_PlasmogglConfigDialog(object):
def setupUi(self, PlasmogglConfigDialog):
PlasmogglConfigDialog.setObjectName(_fromUtf8("PlasmogglConfigDialog"))
PlasmogglConfigDialog.resize(222, 103)
self.formLayoutWidget = QtGui.QWidget(PlasmogglConfigDialog)
self.formLayoutWidget.setGeometry(QtCore.QRect(10, 5, 188, 69))
self.formLayoutWidget.setObjectName(_fromUtf8("formLayoutWidget"))
self.formLayout_2 = QtGui.QFormLayout(self.formLayoutWidget)
self.formLayout_2.setMargin(0)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.showElapsedLabel = QtGui.QLabel(self.formLayoutWidget)
self.showElapsedLabel.setObjectName(_fromUtf8("showElapsedLabel"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.showElapsedLabel)
self.showElapsed = QtGui.QCheckBox(self.formLayoutWidget)
self.showElapsed.setText(_fromUtf8(""))
self.showElapsed.setObjectName(_fromUtf8("showElapsed"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.showElapsed)
self.showSecondLabel = QtGui.QLabel(self.formLayoutWidget)
self.showSecondLabel.setObjectName(_fromUtf8("showSecondLabel"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.showSecondLabel)
self.showSeconds = QtGui.QCheckBox(self.formLayoutWidget)
self.showSeconds.setText(_fromUtf8(""))
self.showSeconds.setObjectName(_fromUtf8("showSeconds"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.showSeconds)
self.refreshPeriodLabel = QtGui.QLabel(self.formLayoutWidget)
self.refreshPeriodLabel.setObjectName(_fromUtf8("refreshPeriodLabel"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.refreshPeriodLabel)
self.refreshPeriod = QtGui.QSpinBox(self.formLayoutWidget)
self.refreshPeriod.setObjectName(_fromUtf8("refreshPeriod"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.refreshPeriod)
self.retranslateUi(PlasmogglConfigDialog)
QtCore.QMetaObject.connectSlotsByName(PlasmogglConfigDialog)
def retranslateUi(self, PlasmogglConfigDialog):
PlasmogglConfigDialog.setWindowTitle(_translate("PlasmogglConfigDialog", "Form", None))
self.showElapsedLabel.setText(_translate("PlasmogglConfigDialog", "Show elapsed time:", None))
self.showSecondLabel.setText(_translate("PlasmogglConfigDialog", "Show seconds", None))
self.refreshPeriodLabel.setText(_translate("PlasmogglConfigDialog", "Refresh period (s)", None))
|
FedericoVaga/plasmoggl
|
contents/code/config/plasmoggl_configui.py
|
Python
|
gpl-3.0
| 3,332
|
# polynom.py - Find the roots of polynomials using the Aberth method
# Copyright (C) 2017 Shiva Iyer <shiva.iyer AT g m a i l DOT c o m>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from math import cos,sin,pi
from numpy import abs,array,max,min,sum,zeros
from numpy.linalg import norm
from linalg.gausseli import gausseli
def polyval(ply, x):
val = 0.0
for i in range(ply.size - 1):
val = (val + ply[i,0]) * x
val += ply[-1,0]
return(val)
def polyadd(pl1, pl2):
if (pl1.size >= pl2.size):
x = pl1.copy()
y = pl2
else:
x = pl2.copy()
y = pl1
x[-y.size:,0] += y[:,0]
return(x)
def polymul(pl1, pl2):
if (pl1.dtype == "complex" or pl2.dtype == "complex"):
mul = zeros([pl1.size+pl2.size-1,1], dtype = "complex")
else:
mul = zeros([pl1.size+pl2.size-1,1])
for i in range(pl1.size):
mul[i:i+pl2.size,0] += pl1[i,0]*pl2[:,0]
return(mul)
def polydiv(num, den):
if (num.size < den.size):
return(array([]))
A = zeros([num.size,num.size], dtype = num.dtype)
for i in range(den.size):
for j in range(num.size-den.size+1):
A[i+j,j] = den[i,0]
for i in range(den.size-1):
A[-i-1,-i-1] = 1.0
b = gausseli(A, num)
quo = b[:num.size-den.size+1,0]
rem = b[num.size-den.size+1:,0]
return(quo, rem)
def polyder(ply):
der = array([[(ply.size-i-1.0) * ply[i,0]
for i in range(ply.size-1)]], dtype = ply.dtype).T
return(der)
def roots(ply, tol = 1E-3, maxiter = 20):
n = ply.size
a,b = abs(ply[n-1,0]), abs(ply[:n-1,0])
lb = max([a / (a + max(b)), a / max([a, sum(b)])])
a,b = abs(ply[0,0]), abs(ply[1:,0])
ub = min([1.0 + max(b) / a, max([1.0, sum(b) / a])])
rts = zeros([n-1,1], dtype = "complex")
for i in range(0, n - 1, 2):
mag = lb + (ub - lb) * (i + 1.0) / n
pha = pi * (i + 1.0) / n
if (i > n - 3):
rts[i,0] = mag
else:
rts[i,0] = mag * (cos(pha) + 1.0j * sin(pha))
rts[i+1,0] = rts[i,0].conj()
cor = zeros([n-1,1], dtype = "complex")
for iter in range(maxiter):
for i in range(n - 1):
rat = polyval(ply, rts[i,0]) / polyval(polyder(ply), rts[i,0])
s = 0.0
for j in range(n - 1):
if (rts[i,0] != rts[j,0]):
s += 1.0 / (rts[i,0] - rts[j,0])
cor[i,0] = rat / (rat * s - 1.0)
rts += cor
if (norm(cor, 2) <= tol):
break
else:
rts = array([])
return(rts, iter + 1)
|
Shiva-Iyer/euler
|
solver/polynom.py
|
Python
|
gpl-3.0
| 3,210
|
#静态类
class Lang:
"""语言包配置
>>Demo:
from lang import Lang
print Lang.getLang("ErrorCode")
"""
@staticmethod
def getLang(name):
"""获取语言包配置属性,参数name:属性名"""
return Lang.__langconfig[name]
__langconfig = {
"ErrorCode": 10000,
"ErrorInfo": "系统繁忙"
}
|
wenhulove333/ScutServer
|
Sample/GameRanking/Server/src/GameRanking.Web/Script/PyScript/Lib/lang.py
|
Python
|
mit
| 382
|
"""Natural numbers, modularly.
This is aspirational -- a lot of the features used below haven't been implemented yet.
"""
from .. import *
from . import boolean, opt, num
@component
def Nats():
case_rules(+t, +a) == {
zero : +a,
succ : +t > +a
}
@interface
def INat():
t [:: Type]
zero [: t]
succ [: t > t]
case [: t * case_rules(t, +a) > +a]
eq [: t * t > boolean]
of_num [: num > opt(t)]
to_num [: t > num]
@INat
def NumNat():
t == num
zero = 0
succ = _ [: num] + 1
def case(n, rules):
if n == 0: rules.zero
else: rules.succ(n - 1)
def eq(n1, n2):
n1 == n2
def of_num(n):
if n < 0: None
else: Some(n)
def to_num(n):
n
@INat
def UnaryNat():
t == Z + S(t)
zero = Z
succ = S(_)
def case(n, rules):
match[n]
with Z: rules.zero
with S(p): rules.succ(p)
def eq(*n):
match[n]
with (Z, Z): True
with (Z, _): False
with (S(p1), S(p2)): eq(p1, p2)
with (_, _): False
def of_num(n):
if n < 0: None
else: Some(S(of_num(n - 1)))
def to_num(n):
match[n]
with Z: 0
with S(p): to_num(p) + 1
@component
def TestNats():
@component
def TestINat(N):
Nat.INat > _
assert N.zero.succ.succ.succ.to_num == 3
assert {N.of_num(3)} is {Some(_): True, None: False}
assert {N.of_num(-4)} is {Some(_): False, None: True}
assert {N.of_num(3), N.of_num(4)} is {(Some(three), Some(four)): three != four, _: False}
TestINat(Nats.NumNat)
TestINat(Nats.UnaryNat)
|
cyrus-/tydy
|
tydy/experimental/nats.py
|
Python
|
mit
| 1,938
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Nadam."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras.optimizers.optimizer_v2 import nadam
def get_beta_accumulators(opt, dtype):
local_step = tf.cast(opt.iterations + 1, dtype)
beta_1_t = tf.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_t = tf.cast(opt._get_hyper("beta_2"), dtype)
beta_2_power = tf.pow(beta_2_t, local_step)
return (beta_1_power, beta_2_power)
def update_m_cache(m_cache, t, beta1=0.9):
mu_t = beta1 * (1 - 0.5 * 0.96**(0.004 * (t + 1)))
m_cache_t = m_cache * mu_t
return m_cache_t
def nadam_update_numpy(param,
g_t,
t,
m,
v,
m_cache,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
mu_t = beta1 * (1 - 0.5 * 0.96**(0.004 * (t + 1)))
mu_t_1 = beta1 * (1 - 0.5 * 0.96**(0.004 * (t + 2)))
m_cache_t_1 = m_cache * mu_t_1
g_prime_t = g_t / (1 - m_cache)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
m_prime_t = m_t / (1 - m_cache_t_1)
v_prime_t = v_t / (1 - beta2**(t + 1))
m_bar_t = (1 - mu_t) * g_prime_t + mu_t_1 * m_prime_t
param_t = param - alpha * m_bar_t / (np.sqrt(v_prime_t) + epsilon)
return param_t, m_t, v_t
class NadamOptimizerTest(tf.test.TestCase):
def testSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
sparse_epsilon = 1e-7
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1, mcache = 0.0, 0.0, 0.0, 0.0, 1.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = tf.IndexedSlices(
tf.constant(grads0_np[grads0_np_indices]),
tf.constant(grads0_np_indices), tf.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = tf.IndexedSlices(
tf.constant(grads1_np[grads1_np_indices]),
tf.constant(grads1_np_indices), tf.constant([3]))
opt = nadam.Nadam(epsilon=sparse_epsilon)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], var0)
self.assertAllClose([3.0, 3.0, 4.0], var1)
beta1_power, beta2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Nadam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power)
self.assertAllCloseAccordingToType(0.999**(t + 1), beta2_power)
update.run()
mcache = update_m_cache(mcache, t)
var0_np, m0, v0 = nadam_update_numpy(
var0_np, grads0_np, t, m0, v0, mcache, epsilon=sparse_epsilon)
var1_np, m1, v1 = nadam_update_numpy(
var1_np, grads1_np, t, m1, v1, mcache, epsilon=sparse_epsilon)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
def testBasic(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1, mcache = 0.0, 0.0, 0.0, 0.0, 1.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = nadam.Nadam()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0)
self.assertAllClose([3.0, 4.0], var1)
# Run 3 steps of Nadam
for t in range(3):
update.run()
mcache = update_m_cache(mcache, t)
var0_np, m0, v0 = nadam_update_numpy(var0_np, grads0_np, t, m0, v0,
mcache)
var1_np, m1, v1 = nadam_update_numpy(var1_np, grads1_np, t, m1, v1,
mcache)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
def testConstructNAdamWithLR(self):
opt = nadam.Nadam(lr=1.0)
opt_2 = nadam.Nadam(learning_rate=0.1, lr=1.0)
opt_3 = nadam.Nadam(learning_rate=0.1)
self.assertIsInstance(opt.lr, tf.Variable)
self.assertIsInstance(opt_2.lr, tf.Variable)
self.assertIsInstance(opt_3.lr, tf.Variable)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
def testConstructNAdamWithScheduleDecay(self):
opt = nadam.Nadam(schedule_decay=0.2)
self.assertIsInstance(opt.decay, tf.Variable)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.decay), (0.2))
if __name__ == "__main__":
tf.test.main()
|
keras-team/keras
|
keras/optimizers/optimizer_v2/nadam_test.py
|
Python
|
apache-2.0
| 6,798
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.