code
stringlengths 1
199k
|
|---|
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class res_users(orm.Model):
"""
Custom res_users object
Add a CAFAT ID for use in New Caledonia
It's for odoo user not partner
For partner you'll find the CAFAT ID in res.parner object
"""
_inherit = "res.users"
_columns = {
'cafat_id':fields.char('CAFAT ID', size = 16, help = 'CAFAT ID of the doctor = convention number. This is not the CAFAT Number as for a patient'),
}
|
import django_filters
from django.forms import TextInput
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from base.models.entity_version import EntityVersion
class EntityVersionFilter(django_filters.FilterSet):
acronym = django_filters.CharFilter(
lookup_expr='icontains', label=_("Acronym"),
widget=TextInput(attrs={'style': "text-transform:uppercase"})
)
title = django_filters.CharFilter(lookup_expr='icontains', label=_("Title"), )
class Meta:
model = EntityVersion
fields = ["entity_type"]
class EntityListSerializer(serializers.Serializer):
acronym = serializers.CharField()
title = serializers.CharField()
entity_type = serializers.CharField()
# Display human readable value
entity_type_text = serializers.CharField(source='get_entity_type_display', read_only=True)
organization = serializers.SerializerMethodField()
select_url = serializers.SerializerMethodField()
def get_organization(self, obj):
return str(obj.entity.organization)
def get_select_url(self, obj):
return reverse(
"entity_read",
kwargs={'entity_version_id': obj.id}
)
|
import os
import numpy as np
import pytest
import mdtraj as md
from mdtraj.formats import HDF5TrajectoryFile, NetCDFTrajectoryFile
from mdtraj.reporters import HDF5Reporter, NetCDFReporter, DCDReporter
from mdtraj.testing import eq
try:
from simtk.unit import nanometers, kelvin, picoseconds, femtoseconds
from simtk.openmm import LangevinIntegrator, Platform
from simtk.openmm.app import PDBFile, ForceField, Simulation, CutoffNonPeriodic, CutoffPeriodic, HBonds
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
pytestmark = pytest.mark.skipif(not HAVE_OPENMM, reason='test_reporter.py needs OpenMM.')
def test_reporter(tmpdir, get_fn):
pdb = PDBFile(get_fn('native.pdb'))
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
# NO PERIODIC BOUNDARY CONDITIONS
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True, cell=True)
reporter3 = DCDReporter(dcdfile, 2)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, 22, 3))
eq(got.velocities.shape, (50, 22, 3))
eq(got.cell_lengths, None)
eq(got.cell_angles, None)
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb')).top
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, None)
eq(cell_angles, None)
eq(time, 0.002 * 2 * (1 + np.arange(50)))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=get_fn('native.pdb'))
netcdf_traj = md.load(ncfile, top=get_fn('native.pdb'))
# we don't have to convert units here, because md.load already
# handles that
assert hdf5_traj.unitcell_vectors is None
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
# yield lambda: eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
def test_reporter_subset(tmpdir, get_fn):
pdb = PDBFile(get_fn('native2.pdb'))
pdb.topology.setUnitCellDimensions([2, 2, 2])
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffPeriodic,
nonbondedCutoff=1 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
atomSubset = [0, 1, 2, 4, 5]
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True, atomSubset=atomSubset)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True,
cell=True, atomSubset=atomSubset)
reporter3 = DCDReporter(dcdfile, 2, atomSubset=atomSubset)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
t = md.load(get_fn('native.pdb'))
t.restrict_atoms(atomSubset)
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, len(atomSubset), 3))
eq(got.velocities.shape, (50, len(atomSubset), 3))
eq(got.cell_lengths, 2 * np.ones((50, 3)))
eq(got.cell_angles, 90 * np.ones((50, 3)))
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb'), atom_indices=atomSubset).topology
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, 20 * np.ones((50, 3)))
eq(cell_angles, 90 * np.ones((50, 3)))
eq(time, 0.002 * 2 * (1 + np.arange(50)))
eq(xyz.shape, (50, len(atomSubset), 3))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=hdf5_traj)
netcdf_traj = md.load(ncfile, top=hdf5_traj)
# we don't have to convert units here, because md.load already handles that
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
|
import pytest
from spack.main import SpackCommand
versions = SpackCommand('versions')
def test_safe_only_versions():
"""Only test the safe versions of a package.
(Using the deprecated command line argument)
"""
versions('--safe-only', 'zlib')
def test_safe_versions():
"""Only test the safe versions of a package."""
versions('--safe', 'zlib')
@pytest.mark.network
def test_remote_versions():
"""Test a package for which remote versions should be available."""
versions('zlib')
@pytest.mark.network
def test_remote_versions_only():
"""Test a package for which remote versions should be available."""
versions('--remote', 'zlib')
@pytest.mark.network
@pytest.mark.usefixtures('mock_packages')
def test_new_versions_only():
"""Test a package for which new versions should be available."""
versions('--new', 'brillig')
@pytest.mark.network
def test_no_versions():
"""Test a package for which no remote versions are available."""
versions('converge')
@pytest.mark.network
def test_no_unchecksummed_versions():
"""Test a package for which no unchecksummed versions are available."""
versions('bzip2')
@pytest.mark.network
def test_versions_no_url():
"""Test a package with versions but without a ``url`` attribute."""
versions('graphviz')
@pytest.mark.network
def test_no_versions_no_url():
"""Test a package without versions or a ``url`` attribute."""
versions('opengl')
|
from dumper import *
def dumpLiteral(d, value):
d.putSimpleCharArray(value["_chars"], value["_size"])
def qdump__Core__Id(d, value):
try:
name = d.parseAndEvaluate("Core::nameForId(%d)" % value["m_id"])
d.putSimpleCharArray(name)
except:
d.putValue(value["m_id"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__GdbMi(d, value):
str = d.encodeByteArray(value["m_name"]) + "3a20" \
+ d.encodeByteArray(value["m_data"])
d.putValue(str, Hex2EncodedLatin1)
d.putPlainChildren(value)
def qdump__Debugger__Internal__DisassemblerLine(d, value):
d.putByteArrayValue(value["m_data"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__WatchData(d, value):
d.putByteArrayValue(value["iname"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__WatchItem(d, value):
d.putByteArrayValue(value["d"]["iname"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__BreakpointModelId(d, value):
d.putValue("%s.%s" % (int(value["m_majorPart"]), int(value["m_minorPart"])))
d.putPlainChildren(value)
def qdump__Debugger__Internal__ThreadId(d, value):
d.putValue("%s" % value["m_id"])
d.putPlainChildren(value)
def qdump__CPlusPlus__ByteArrayRef(d, value):
d.putSimpleCharArray(value["m_start"], value["m_length"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Identifier(d, value):
d.putSimpleCharArray(value["_chars"], value["_size"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Symbol(d, value):
name = d.downcast(value["_name"])
dumpLiteral(d, name)
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__IntegerType(d, value):
d.putValue(value["_kind"])
d.putPlainChildren(value)
def qdump__CPlusPlus__NamedType(d, value):
literal = d.downcast(value["_name"])
dumpLiteral(d, literal)
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__TemplateNameId(d, value):
dumpLiteral(d, value["_identifier"].dereference())
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__Literal(d, value):
dumpLiteral(d, value)
d.putPlainChildren(value)
def qdump__CPlusPlus__StringLiteral(d, value):
d.putSimpleCharArray(value["_chars"], value["_size"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Internal__Value(d, value):
d.putValue(value["l"])
d.putPlainChildren(value)
def qdump__Utils__FileName(d, value):
d.putStringValue(value)
d.putPlainChildren(value)
def qdump__Utils__ElfSection(d, value):
d.putByteArrayValue(value["name"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Token(d, value):
k = value["f"]["kind"]
if int(k) == 6:
d.putValue("T_IDENTIFIER. offset: %d, len: %d"
% (value["utf16charOffset"], value["f"]["utf16chars"]))
elif int(k) == 7:
d.putValue("T_NUMERIC_LITERAL. offset: %d, len: %d"
% (value["utf16charOffset"], value["f"]["utf16chars"]))
else:
val = str(k.cast(d.lookupType("CPlusPlus::Kind")))
d.putValue(val[11:]) # Strip "CPlusPlus::"
d.putPlainChildren(value)
def qdump__CPlusPlus__Internal__PPToken(d, value):
data, size, alloc = d.byteArrayData(value["m_src"])
length = int(value["f"]["utf16chars"])
offset = int(value["utf16charOffset"])
#warn("size: %s, alloc: %s, offset: %s, length: %s, data: %s"
# % (size, alloc, offset, length, data))
d.putValue(d.readMemory(data + offset, min(100, length)),
Hex2EncodedLatin1)
d.putPlainChildren(value)
|
from basetest import BaseTest
import sys, tempfile, os, time
import unittest
import data
sys.path.insert(0, '..')
from zeroinstall.injector import model, gpg, trust
from zeroinstall.injector.namespaces import config_site
from zeroinstall.injector.iface_cache import PendingFeed
from zeroinstall.support import basedir
class TestIfaceCache(BaseTest):
def testList(self):
iface_cache = self.config.iface_cache
self.assertEquals([], iface_cache.list_all_interfaces())
iface_dir = basedir.save_cache_path(config_site, 'interfaces')
file(os.path.join(iface_dir, 'http%3a%2f%2ffoo'), 'w').close()
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
# TODO: test overrides
def testCheckSigned(self):
iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
feed_url = 'http://foo'
src = tempfile.TemporaryFile()
# Unsigned
src.write("hello")
src.flush()
src.seek(0)
try:
PendingFeed(feed_url, src)
assert 0
except model.SafeException:
pass
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
# Signed
src.seek(0)
src.write(data.foo_signed_xml)
src.flush()
src.seek(0)
pending = PendingFeed(feed_url, src)
assert iface_cache.update_feed_if_trusted(feed_url, pending.sigs, pending.new_xml)
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
feed = iface_cache.get_feed(feed_url)
self.assertEquals(1154850229, feed.last_modified)
def testXMLupdate(self):
iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
iface = iface_cache.get_interface('http://foo')
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
pending = PendingFeed(iface.uri, src)
assert iface_cache.update_feed_if_trusted(iface.uri, pending.sigs, pending.new_xml)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified == 1154850229
# mtimes are unreliable because copying often changes them -
# check that we extract the time from the signature when upgrading
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape(feed.url))
os.utime(cached, None)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified > 1154850229
src = tempfile.TemporaryFile()
src.write(data.new_foo_signed_xml)
src.seek(0)
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
# Can't 'update' to an older copy
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
try:
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
assert 0
except model.SafeException:
pass
def testTimes(self):
iface_cache = self.config.iface_cache
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape('http://foo'))
stream = file(cached, 'w')
stream.write(data.foo_signed_xml)
stream.close()
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
signed = iface_cache._get_signature_date('http://foo')
assert signed == 1154850229
stream = file(cached, 'w+')
stream.seek(0)
stream.write('Hello')
stream.close()
# When the signature is invalid, we just return None.
# This is because versions < 0.22 used to corrupt the signatue
# by adding an attribute to the XML
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
def testCheckAttempt(self):
iface_cache = self.config.iface_cache
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar.xml"))
start_time = time.time() - 5 # Seems to be some odd rounding here
iface_cache.mark_as_checking("http://foo/bar.xml")
last_check = iface_cache.get_last_check_attempt("http://foo/bar.xml")
assert last_check is not None
assert last_check >= start_time, (last_check, start_time)
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar2.xml"))
if __name__ == '__main__':
unittest.main()
|
from pyrtist.lib2d import Point, Tri
bbox1 = Point(0.0, 50.0); bbox2 = Point(100.0, 12.5838926174)
p1 = Point(3.15540458874, 46.942241204)
p2 = Point(3.23537580547, 42.1395946309)
p4 = Point(28.5119375629, 38.1285583893)
q1 = Point(73.1545885714, 21.8120805369)
q3 = Point(93.6244457143, 38.4228187919)
q5 = Point(66.4133738602, 33.8755592617)
q6 = Point(94.2249240122, 24.9089847651)
q7 = Point(84.8024316109, 26.7326948322)
q2 = Tri(q5, Point(70.1344457143, 37.2483221477))
q4 = Tri(q6, Point(90.4365171429, 20.1342281879), q7)
from pyrtist.lib2d import *
w = Window()
s = " "
font = Font(2, "Helvetica")
w << Args(
Text(p1, font, Offset(0, 1), Color.red,
"Pyrtist allows creating curves (cubic Bezier splines)."
"\nThis example explains how."),
Text(p2, font, Offset(0, 1),
"STEP 1: launch Pyrtist or create a new document (CTRL+N)\n",
"STEP 2: click on the button to create a new curved polygon\n",
"STEP 3: move the mouse where you want to create the first vertex.\n",
s, "Click on the left button of the mouse\n",
"STEP 4: repeat step 3 to create other 3 vertices. You should see\n",
s, "a black polygon with straight boundaries\n",
"STEP 5: move the mouse over one of the vertices. Press the CTRL\n",
s, "key and the left mouse button, simultaneously. Keep them\n",
s, "pressed while moving the mouse out of the vertex. A round\n",
s, "reference point appears and the polygon edge is rounded.\n",
"STEP 6: you can repeat step 5 for the same vertex or for other\n",
s, "vertices. You should obtain something similar to what shown on\n",
s, "the left")
)
w << Curve(q1, q2, q3, q4)
w << Image("curve.png", p4, 2.5)
w << BBox(bbox1, bbox2)
gui(w)
|
"""
Pychan extension widgets.
Extension widgets are partly experimental, partly rarely used widgets
which are added here. They are by default not included in the widgets
registry and thus cannot be loaded from XML files. Use L{pychan.widgets.registerWidget}
to enable that.
Not the same care to keep the API stable will be taken for them and
before and if they are added (or replace) the standard widgets they
will have to be reviewed in detail.
"""
|
import sys
import requests
try:
url = sys.argv[1]
r = requests.get('http://%s' %url ,timeout=3)
except requests.exceptions.Timeout:
print 'url timeout\n%s' %url
sys.exit(2)
except:
print 'url error \n%s' %url
sys.exit(2)
url_status = r.status_code
if url_status == 200:
print 'url_status %s\n%s' %(url_status,url)
sys.exit(0)
else:
print 'url_status %s\n%s' %(url_status,url)
sys.exit(2)
|
import os
import shutil
from cerbero.config import Platform
from cerbero.utils import shell
CLEAN_ENV = os.environ.copy()
if CLEAN_ENV.has_key('LD_LIBRARY_PATH'):
CLEAN_ENV.pop('LD_LIBRARY_PATH')
GIT = 'git'
def init(git_dir):
'''
Initialize a git repository with 'git init'
@param git_dir: path of the git repository
@type git_dir: str
'''
shell.call('mkdir -p %s' % git_dir)
shell.call('%s init' % GIT, git_dir, env=CLEAN_ENV)
def clean(git_dir):
'''
Clean a git respository with clean -dfx
@param git_dir: path of the git repository
@type git_dir: str
'''
return shell.call('%s clean -dfx' % GIT, git_dir, env=CLEAN_ENV)
def list_tags(git_dir, fail=True):
'''
List all tags
@param git_dir: path of the git repository
@type git_dir: str
@param fail: raise an error if the command failed
@type fail: false
@return: list of tag names (str)
@rtype: list
'''
tags = shell.check_call('%s tag -l' % GIT, git_dir, fail=fail, env=CLEAN_ENV)
tags = tags.strip()
if tags:
tags = tags.split('\n')
return tags
def create_tag(git_dir, tagname, tagdescription, commit, fail=True):
'''
Create a tag using commit
@param git_dir: path of the git repository
@type git_dir: str
@param tagname: name of the tag to create
@type tagname: str
@param tagdescription: the tag description
@type tagdescription: str
@param commit: the tag commit to use
@type commit: str
@param fail: raise an error if the command failed
@type fail: false
'''
shell.call('%s tag -s %s -m "%s" %s' %
(GIT, tagname, tagdescription, commit), git_dir, fail=fail,
env=CLEAN_ENV)
return shell.call('%s push origin %s' % (GIT, tagname), git_dir, fail=fail,
env=CLEAN_ENV)
def delete_tag(git_dir, tagname, fail=True):
'''
Delete a tag
@param git_dir: path of the git repository
@type git_dir: str
@param tagname: name of the tag to delete
@type tagname: str
@param fail: raise an error if the command failed
@type fail: false
'''
return shell.call('%s tag -d %s' % (GIT, tagname), git_dir, fail=fail,
env=CLEAN_ENV)
def fetch(git_dir, fail=True):
'''
Fetch all refs from all the remotes
@param git_dir: path of the git repository
@type git_dir: str
@param fail: raise an error if the command failed
@type fail: false
'''
return shell.call('%s fetch --all' % GIT, git_dir, fail=fail, env=CLEAN_ENV)
def submodules_update(git_dir, src_dir=None, fail=True):
'''
Update somdules from local directory
@param git_dir: path of the git repository
@type git_dir: str
@param src_dir: path or base URI of the source directory
@type src_dir: src
@param fail: raise an error if the command failed
@type fail: false
'''
if src_dir:
config = shell.check_call('%s config --file=.gitmodules --list' % GIT,
git_dir)
config_array = [s.split('=', 1) for s in config.split('\n')]
for c in config_array:
if c[0].startswith('submodule.') and c[0].endswith('.path'):
submodule = c[0][len('submodule.'):-len('.path')]
shell.call("%s config --file=.gitmodules submodule.%s.url %s" %
(GIT, submodule, os.path.join(src_dir, c[1])),
git_dir)
shell.call("%s submodule init" % GIT, git_dir)
shell.call("%s submodule sync" % GIT, git_dir)
shell.call("%s submodule update" % GIT, git_dir, fail=fail)
if src_dir:
for c in config_array:
if c[0].startswith('submodule.') and c[0].endswith('.url'):
shell.call("%s config --file=.gitmodules %s %s" %
(GIT, c[0], c[1]), git_dir)
shell.call("%s submodule sync" % GIT, git_dir)
def checkout(git_dir, commit):
'''
Reset a git repository to a given commit
@param git_dir: path of the git repository
@type git_dir: str
@param commit: the commit to checkout
@type commit: str
'''
return shell.call('%s reset --hard %s' % (GIT, commit), git_dir,
env=CLEAN_ENV)
def get_hash(git_dir, commit):
'''
Get a commit hash from a valid commit.
Can be used to check if a commit exists
@param git_dir: path of the git repository
@type git_dir: str
@param commit: the commit to log
@type commit: str
'''
return shell.check_call('%s show -s --pretty=%%H %s' %
(GIT, commit), git_dir, env=CLEAN_ENV)
def local_checkout(git_dir, local_git_dir, commit):
'''
Clone a repository for a given commit in a different location
@param git_dir: destination path of the git repository
@type git_dir: str
@param local_git_dir: path of the source git repository
@type local_git_dir: str
@param commit: the commit to checkout
@type commit: false
'''
# reset to a commit in case it's the first checkout and the masterbranch is
# missing
branch_name = 'cerbero_build'
shell.call('%s reset --hard %s' % (GIT, commit), local_git_dir,
env=CLEAN_ENV)
shell.call('%s branch %s' % (GIT, branch_name), local_git_dir, fail=False,
env=CLEAN_ENV)
shell.call('%s checkout %s' % (GIT, branch_name), local_git_dir,
env=CLEAN_ENV)
shell.call('%s reset --hard %s' % (GIT, commit), local_git_dir,
env=CLEAN_ENV)
shell.call('%s clone %s -s -b %s .' % (GIT, local_git_dir,
branch_name),
git_dir, env=CLEAN_ENV)
submodules_update(git_dir, local_git_dir)
def add_remote(git_dir, name, url):
'''
Add a remote to a git repository
@param git_dir: destination path of the git repository
@type git_dir: str
@param name: name of the remote
@type name: str
@param url: url of the remote
@type url: str
'''
try:
shell.call('%s remote add %s %s' % (GIT, name, url), git_dir,
env=CLEAN_ENV)
except:
shell.call('%s remote set-url %s %s' % (GIT, name, url), git_dir,
env=CLEAN_ENV)
def check_line_endings(platform):
'''
Checks if on windows we don't use the automatic line endings conversion
as it breaks everything
@param platform: the host platform
@type platform: L{cerbero.config.Platform}
@return: true if git config is core.autorlf=false
@rtype: bool
'''
if platform != Platform.WINDOWS:
return True
val = shell.check_call('%s config --get core.autocrlf' % GIT, env=CLEAN_ENV)
if ('false' in val.lower()):
return True
return False
def init_directory(git_dir):
'''
Initialize a git repository with the contents
of a directory
@param git_dir: path of the git repository
@type git_dir: str
'''
init(git_dir)
try:
shell.call('%s add --force -A .' % GIT, git_dir, env=CLEAN_ENV)
shell.call('%s commit -m "Initial commit" > /dev/null 2>&1' % GIT,
git_dir, env=CLEAN_ENV)
except:
pass
def apply_patch(patch, git_dir):
'''
Applies a commit patch usign 'git am'
of a directory
@param git_dir: path of the git repository
@type git_dir: str
@param patch: path of the patch file
@type patch: str
'''
shell.call('%s am --ignore-whitespace %s' % (GIT, patch), git_dir,
env=CLEAN_ENV)
|
"""Test of table output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("End"))
sequence.append(KeyComboAction("Up"))
sequence.append(KeyComboAction("<Shift>Right"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Return"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"1. Table Where Am I",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 3 bottles of coke'",
" VISIBLE: '3 bottles of coke', cursor=1",
"SPEECH OUTPUT: 'table.'",
"SPEECH OUTPUT: 'Number.'",
"SPEECH OUTPUT: 'table cell.'",
"SPEECH OUTPUT: '3.'",
"SPEECH OUTPUT: 'column 1 of 3'",
"SPEECH OUTPUT: 'row 1 of 5.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Next row",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles'",
" VISIBLE: '5 packages of noodles', cursor=1",
"SPEECH OUTPUT: '5 packages of noodles.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"3. Table Where Am I (again)",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles'",
" VISIBLE: '5 packages of noodles', cursor=1",
"SPEECH OUTPUT: 'table.'",
"SPEECH OUTPUT: 'Number.'",
"SPEECH OUTPUT: 'table cell.'",
"SPEECH OUTPUT: '5.'",
"SPEECH OUTPUT: 'column 1 of 3'",
"SPEECH OUTPUT: 'row 2 of 5.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("F11"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"4. Turn row reading off",
["BRAILLE LINE: 'Speak cell'",
" VISIBLE: 'Speak cell', cursor=0",
"SPEECH OUTPUT: 'Speak cell'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"5. Table Right to the Product column in the packages of noodles row",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles'",
" VISIBLE: '5 packages of noodles', cursor=1",
"BRAILLE LINE: 'gtk-demo application Shopping list frame table Product column header packages of noodles table cell'",
" VISIBLE: 'packages of noodles table cell', cursor=1",
"SPEECH OUTPUT: 'Product column header packages of noodles.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"6. Table up to bottles of coke",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Product column header bottles of coke table cell'",
" VISIBLE: 'bottles of coke table cell', cursor=1",
"SPEECH OUTPUT: 'bottles of coke.'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from platform import python_version_tuple
import re
import math
if python_version_tuple() >= ("3", "3", "0"):
from collections.abc import Iterable
else:
from collections import Iterable
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = long # noqa
_float_type = float
_text_type = unicode # noqa
_binary_type = str
def _is_file(f):
return isinstance(f, file) # noqa
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
basestring = str
import io
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.8.4"
MIN_PADDING = 2
PRESERVE_WHITESPACE = False
_DEFAULT_FLOATFMT = "g"
_DEFAULT_MISSINGVAL = ""
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w)
for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _textile_row_with_attrs(cell_values, colwidths, colaligns):
cell_values[0] += ' '
alignment = {"left": "<.", "right": ">.", "center": "=.", "decimal": ">."}
values = (alignment.get(a, '') + v for a, v in zip(colaligns, cell_values))
return '|' + '|'.join(values) + '|'
def _html_begin_table_without_header(colwidths_ignore, colaligns_ignore):
# this table header will be suppressed if there is a header row
return "\n".join(["<table>", "<tbody>"])
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"'}
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(
celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
rowhtml = "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
if celltag == "th": # it's a header row, create a new table header
rowhtml = "\n".join(["<table>",
"<thead>",
rowhtml,
"</thead>",
"<tbody>"])
return rowhtml
def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns,
header=''):
alignment = {"left": '',
"right": '<style="text-align: right;">',
"center": '<style="text-align: center;">',
"decimal": '<style="text-align: right;">'}
values_with_attrs = ["{0}{1} {2} ".format(celltag,
alignment.get(a, ''),
header+c+header)
for c, a in zip(cell_values, colaligns)]
return "".join(values_with_attrs)+"||"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns, escrules=LATEX_ESCAPE_RULES):
def escape_char(c):
return escrules.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
def _rst_escape_first_column(rows, headers):
def escape_empty(val):
if isinstance(val, (_text_type, _binary_type)) and not val.strip():
return ".."
else:
return val
new_headers = list(headers)
new_rows = []
if headers:
new_headers[0] = escape_empty(headers[0])
for row in rows:
new_row = list(row)
if new_row:
new_row[0] = escape_empty(row[0])
new_rows.append(new_row)
return new_rows, new_headers
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("â•’", "â•", "╤", "â••"),
linebelowheader=Line("╞", "â•", "╪", "â•¡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "â•", "â•§", "â•›"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"github":
TableFormat(lineabove=Line("|", "-", "|", "|"),
linebelowheader=Line("|", "-", "|", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"jira":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("||", "||", "||"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"presto":
TableFormat(lineabove=None,
linebelowheader=Line("", "-", "+", ""),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", ""),
datarow=DataRow("", "|", ""),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line(
"{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(
_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"moinmoin":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=partial(_moin_row_with_attrs, "||",
header="'''"),
datarow=partial(_moin_row_with_attrs, "||"),
padding=1, with_header_hide=None),
"youtrack":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|| ", " || ", " || "),
datarow=DataRow("| ", " | ", " |"),
padding=1, with_header_hide=None),
"html":
TableFormat(lineabove=_html_begin_table_without_header,
linebelowheader="",
linebetweenrows=None,
linebelow=Line("</tbody>\n</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=["lineabove"]),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "",
""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_raw":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "",
""),
headerrow=partial(_latex_row, escrules={}),
datarow=partial(_latex_row, escrules={}),
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular,
booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}",
"", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None),
"textile":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("|_. ", "|_.", "|"),
datarow=_textile_row_with_attrs,
padding=1, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
multiline_formats = {
"plain": "plain",
"simple": "simple",
"grid": "grid",
"fancy_grid": "fancy_grid",
"pipe": "pipe",
"orgtbl": "orgtbl",
"jira": "jira",
"presto": "presto",
"psql": "psql",
"rst": "rst",
}
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
_invisible_codes = re.compile(r"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m")
_invisible_codes_bytes = re.compile(b"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m")
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
""" # noqa
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
>>> _isnumber("123e45678")
False
>>> _isnumber("inf")
True
"""
if not _isconvertible(float, string):
return False
elif isinstance(string, (_text_type, _binary_type)) and (
math.isinf(float(string)) or math.isnan(float(string))):
return string.lower() in ['inf', '-inf', 'nan']
return True
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return (type(string) is inttype or
(isinstance(string, _binary_type) or
isinstance(string, _text_type))
and _isconvertible(inttype, string))
def _isbool(string):
"""
>>> _isbool(True)
True
>>> _isbool("False")
True
>>> _isbool(1)
False
"""
return (type(string) is _bool_type or
(isinstance(string, (_binary_type, _text_type))
and string in ("True", "False")))
def _type(string, has_invisible=True, numparse=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isbool(string):
return _bool_type
elif _isint(string) and numparse:
return int
elif _isint(string, _long_type) and numparse:
return int
elif _isnumber(string) and numparse:
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
fmt = "{0:>%ds}" % width
return fmt.format(s)
def _padright(width, s):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
""" # noqa
fmt = "{0:<%ds}" % width
return fmt.format(s)
def _padboth(width, s):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
fmt = "{0:^%ds}" % width
return fmt.format(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
# optional wide-character support
if wcwidth is not None and WIDE_CHARS_MODE:
len_fn = wcwidth.wcswidth
else:
len_fn = len
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len_fn(_strip_invisible(s))
else:
return len_fn(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
"""Visible width of a potentially multiline content."""
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn) # noqa
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False,
is_multiline=False):
"""[string] -> [padded_string]"""
strings, padfn = _align_column_choose_padfn(strings, alignment,
has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms)))
for ms in strings]
visible_widths = [maxwidth - (w - l)
for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = ["\n".join([padfn(w, s)
for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l)
for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s)
for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0,
_bool_type: 1,
int: 2,
float: 3,
_binary_type: 4,
_text_type: 5}
invtypes = {5: _text_type,
4: _binary_type,
3: float,
2: int,
1: _bool_type,
0: _none_type}
moregeneric = max(types.get(type1, 5), types.get(type2, 5))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True, numparse=True):
"""The least generic type all column values are convertible to.
>>> _column_type([True, False]) is _bool_type
True
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible, numparse)
for s in strings]
return reduce(_more_generic, types, _bool_type)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value according to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
""" # noqa
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = (has_invisible and
isinstance(val, (_text_type, _binary_type)))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, is_multiline=False,
width_fn=None):
"Pad string header to width chars given known visible_width of the header."
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, width_fn(h))
for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = len(header) - visible_width
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v]+list(row) for v, row in zip(index, rows)]
return rows
def _bool(val):
"A wrapper around standard bool() which doesn't throw on NumPy arrays"
try:
return bool(val)
except ValueError: # val is likely to be a numpy array with many elements
return False
def _normalize_tabular_data(tabular_data, headers, showindex="default"):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
If showindex="default", show row indices of the pandas.DataFrame.
If showindex="always", show row indices for all types of data.
If showindex="never", don't show row indices for all types of data.
If showindex is an iterable, show its values as row indices.
"""
try:
bool(headers)
except ValueError: # numpy.ndarray, pandas.core.index.Index, ...
headers = list(headers)
index = None
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
# columns have to be transposed
rows = list(izip_longest(*tabular_data.values()))
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a
# pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data)
if tabular_data.index.name is not None:
if isinstance(tabular_data.index.name, list):
keys[:0] = tabular_data.index.name
else:
keys[:0] = [tabular_data.index.name]
# values matrix doesn't need to be transposed
vals = tabular_data.values
# for DataFrames add an index per default
index = list(tabular_data.index)
rows = [list(row) for row in vals]
else:
raise ValueError(
"tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and not rows):
# an empty table (issue #81)
headers = []
elif (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
# Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError(
'headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif (headers == "keys"
and hasattr(tabular_data, "description")
and hasattr(tabular_data, "fetchone")
and hasattr(tabular_data, "rowcount")):
# Python Database API cursor object (PEP 0249)
# print tabulate(cursor, headers='keys')
headers = [column[0] for column in tabular_data.description]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
if index is not None:
headers = [index[0]] + list(rows[0])
index = index[1:]
else:
headers = rows[0]
headers = list(map(_text_type, headers)) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type, headers))
rows = list(map(list, rows))
# add or remove an index column
showindex_is_a_str = type(showindex) in [_text_type, _binary_type]
if showindex == "default" and index is not None:
rows = _prepend_row_index(rows, index)
elif isinstance(showindex, Iterable) and not showindex_is_a_str:
rows = _prepend_row_index(rows, list(showindex))
elif showindex == "always" or (_bool(showindex) and
not showindex_is_a_str):
if index is None:
index = list(range(len(rows)))
rows = _prepend_row_index(rows, index)
elif showindex == "never" or (not _bool(showindex) and
not showindex_is_a_str):
pass
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt=_DEFAULT_FLOATFMT, numalign="decimal", stralign="left",
missingval=_DEFAULT_MISSINGVAL, showindex="default",
disable_numparse=False, colalign=None):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
By default, pandas.DataFrame data have an additional column called
row index. To add a similar column to all other types of data,
use `showindex="always"` or `showindex=True`. To suppress row indices
for all types of data, pass `showindex="never" or `showindex=False`.
To add a custom row index column, pass `showindex=some_iterable`.
>>> print(tabulate([["F",24],["M",19]], showindex="always"))
- - --
0 F 24
1 M 19
- - --
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point. This can also be
a list or tuple of format strings, one per column.
`None` values are replaced with a `missingval` string (like
`floatfmt`, this can also be a list of values for different
columns):
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', 'latex_raw' and 'latex_booktabs'. Variable `tabulate_formats`
contains the list of currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
â•’â•â•â•â•â•â•â•â•â•â•â•╤â•â•â•â•â•â•â•â•â•â•â•â••
│ strings │ numbers │
╞â•â•â•â•â•â•â•â•â•â•â•╪â•â•â•â•â•â•â•â•â•â•â•â•¡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘â•â•â•â•â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•›
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
"presto" is like tables produce by the Presto CLI:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "presto"))
strings | numbers
-----------+-----------
spam | 41.9999
eggs | 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<thead>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
</thead>
<tbody>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</tbody>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_raw" is similar to "latex", but doesn't escape special characters,
such as backslash and underscore, so LaTeX commands may embedded into
cells' values:
>>> print(tabulate([["spam$_9$", 41.9999], ["\\\\emph{eggs}", "451.0"]], tablefmt="latex_raw"))
\\begin{tabular}{lr}
\\hline
spam$_9$ & 41.9999 \\\\
\\emph{eggs} & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
Number parsing
--------------
By default, anything which can be parsed as a number is a number.
This ensures numbers represented as strings are aligned properly.
This can lead to weird results for particular strings such as
specific git SHAs e.g. "42992e1" will be parsed into the number
429920 and aligned as such.
To completely disable number parsing (and alignment), use
`disable_numparse=True`. For more fine grained control, a list column
indices is used to disable number parsing only on those columns
e.g. `disable_numparse=[0, 2]` would disable number parsing only on the
first and third columns.
""" # noqa
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(
tabular_data, headers, showindex=showindex)
# empty values in the first column of RST tables should be
# escaped (issue #82)
# "" should be escaped as "\\ " or ".."
if tablefmt == 'rst':
list_of_lists, headers = _rst_escape_first_column(list_of_lists,
headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\t'.join(['\t'.join(map(_text_type, headers))] +
['\t'.join(map(_text_type, row))
for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
if tablefmt in multiline_formats and _is_multiline(plain_text):
tablefmt = multiline_formats.get(tablefmt, tablefmt)
is_multiline = True
else:
is_multiline = False
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(izip_longest(*list_of_lists))
numparses = _expand_numparse(disable_numparse, len(cols))
coltypes = [_column_type(col, numparse=np) for col, np in
zip(cols, numparses)]
if isinstance(floatfmt, basestring): # old version
# just duplicate the string to use in each column
float_formats = len(cols) * [floatfmt]
else: # if floatfmt is list, tuple etc we have one per column
float_formats = list(floatfmt)
if len(float_formats) < len(cols):
float_formats.extend((len(cols)-len(float_formats)) *
[_DEFAULT_FLOATFMT])
if isinstance(missingval, basestring):
missing_vals = len(cols) * [missingval]
else:
missing_vals = list(missingval)
if len(missing_vals) < len(cols):
missing_vals.extend((len(cols)-len(missing_vals)) *
[_DEFAULT_MISSINGVAL])
cols = [[_format(v, ct, fl_fmt, miss_v, has_invisible) for v in c]
for c, ct, fl_fmt, miss_v in zip(cols, coltypes, float_formats,
missing_vals)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
if colalign is not None:
assert isinstance(colalign, Iterable)
for idx, align in enumerate(colalign):
aligns[idx] = align
minwidths = [width_fn(h) + MIN_PADDING
for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars,
is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, max(width_fn(cl)
for cl in c)) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), is_multiline,
width_fn)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [max(width_fn(cl) for cl in c) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns,
is_multiline)
def _expand_numparse(disable_numparse, column_count):
"""
Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are
False, and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same.
"""
if isinstance(disable_numparse, Iterable):
numparses = [True] * column_count
for index in disable_numparse:
numparses[index] = False
return numparses
else:
return [not disable_numparse] * column_count
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths,
colaligns, rowfmt, pad):
colwidths = [w - 2*pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' '*w]*(nlines - len(cl)))
for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, pad)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
if is_multiline:
# do it later, in _append_multiline_row
pad_row = lambda row, _: row # noqa
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns,
fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
if headers or rows:
return "\n".join(lines)
else: # a completely empty table
return ""
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data.
See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-o FILE, --output FILE print table to FILE (default: stdout)
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-F FPFMT, --float FPFMT floating point number format (default: g)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_raw,
latex_booktabs, tsv
(default: simple)
""" # noqa
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1o:s:F:A:f:",
["help", "header", "output", "sep=",
"float=", "align=", "format="])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
floatfmt = _DEFAULT_FLOATFMT
colalign = None
tablefmt = "simple"
sep = r"\s+"
outfile = "-"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-o", "--output"]:
outfile = value
elif opt in ["-F", "--float"]:
floatfmt = value
elif opt in ["-C", "--colalign"]:
colalign = value.split()
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
with (sys.stdout if outfile == "-" else open(outfile, "w")) as out:
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out,
colalign=colalign)
else:
with open(f) as fobj:
_pprint_file(fobj, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out,
colalign=colalign)
def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, file, colalign):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows if r.strip()]
print(tabulate(table, headers, tablefmt, floatfmt=floatfmt,
colalign=colalign), file=file)
if __name__ == "__main__":
_main()
|
{
"name": "Delivery Sequence",
"vesion": "12.0.1.0.0",
"author": "IT-Projects LLC, Ivan Yelizariev",
"license": "LGPL-3",
"category": "Custom",
"website": "https://yelizariev.github.io",
"depends": ["delivery"],
"data": ["views.xml"],
"installable": False,
}
|
from setuptools import setup, Extension
import sys
import os
import psutil
def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
N = psutil.cpu_count(logical=False) # number of parallel compilations
import multiprocessing.pool
def _single_compile(obj):
try: src, ext = build[obj]
except KeyError: return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
list(multiprocessing.pool.ThreadPool(N).imap(_single_compile,objects))
return objects
''' Note:
to build Boost.Python on Windows with mingw
bjam target-os=windows/python=3.4 toolset=gcc variant=debug,release link=static,shared threading=multi runtime-link=shared cxxflags="-include cmath "
also insert this on top of boost/python.hpp :
'''
def getExtensions():
platform = sys.platform
extensionsList = []
sources = ['src/Genome.cpp',
'src/Innovation.cpp',
'src/NeuralNetwork.cpp',
'src/Parameters.cpp',
'src/PhenotypeBehavior.cpp',
'src/Population.cpp',
'src/Random.cpp',
'src/Species.cpp',
'src/Substrate.cpp',
'src/Utils.cpp']
extra = ['-march=native',
'-mtune=native',
'-g',
]
if platform == 'darwin':
extra += ['-stdlib=libc++',
'-std=c++11',]
else:
extra += ['-std=gnu++11']
is_windows = 'win' in platform and platform != 'darwin'
if is_windows:
extra.append('/EHsc')
else:
extra.append('-w')
prefix = os.getenv('PREFIX')
if prefix and len(prefix) > 0:
extra += ["-I{}/include".format(prefix)]
build_sys = os.getenv('MN_BUILD')
if build_sys is None:
if os.path.exists('_MultiNEAT.cpp'):
sources.insert(0, '_MultiNEAT.cpp')
extra.append('-O3')
extensionsList.extend([Extension('MultiNEAT._MultiNEAT',
sources,
extra_compile_args=extra)],
)
else:
print('Source file is missing and MN_BUILD environment variable is not set.\n'
'Specify either \'cython\' or \'boost\'. Example to build in Linux with Cython:\n'
'\t$ export MN_BUILD=cython')
exit(1)
elif build_sys == 'cython':
from Cython.Build import cythonize
sources.insert(0, '_MultiNEAT.pyx')
extra.append('-O3')
extensionsList.extend(cythonize([Extension('MultiNEAT._MultiNEAT',
sources,
extra_compile_args=extra)],
))
elif build_sys == 'boost':
is_python_2 = sys.version_info[0] < 3
sources.insert(0, 'src/PythonBindings.cpp')
if is_windows:
if is_python_2:
raise RuntimeError("Python prior to version 3 is not supported on Windows due to limits of VC++ compiler version")
libs = ['boost_system', 'boost_serialization']
if is_python_2:
libs += ['boost_python', "boost_numpy"]
else:
# with boost 1.67 you need boost_python3x and boost_numpy3x where x is python version 3.x
libs += ['boost_python36', "boost_numpy36"] # in Ubuntu 14 there is only 'boost_python-py34'
# for Windows with mingw
# libraries= ['libboost_python-mgw48-mt-1_58',
# 'libboost_serialization-mgw48-mt-1_58'],
# include_dirs = ['C:/MinGW/include', 'C:/Users/Peter/Desktop/boost_1_58_0'],
# library_dirs = ['C:/MinGW/lib', 'C:/Users/Peter/Desktop/boost_1_58_0/stage/lib'],
extra.extend(['-DUSE_BOOST_PYTHON', '-DUSE_BOOST_RANDOM', #'-O0',
#'-DVDEBUG',
])
exx = Extension('MultiNEAT._MultiNEAT',
sources,
libraries=libs,
extra_compile_args=extra)
print(dir(exx))
print(exx)
print(exx.extra_compile_args)
extensionsList.append(exx)
else:
raise AttributeError('Unknown tool: {}'.format(build_sys))
return extensionsList
setup(name='multineat',
version='0.5', # Update version in conda/meta.yaml as well
packages=['MultiNEAT'],
ext_modules=getExtensions())
|
"""A module for handling and accessing both the in-memory, and on-disk,
representation of a set of routes as a set of segments. Where each segment
specifies its start and end stop ids, and other data (see
topology_shapefile_data_model.py for more."""
import sys
import csv
import re
import operator
import itertools
import misc_utils
import topology_shapefile_data_model as tp_model
def get_route_order_key_from_name(route_def):
rname = route_def.short_name
if rname:
# Courtesy http://stackoverflow.com/questions/4289331/python-extract-numbers-from-a-string
try:
order_key = int(re.findall(r'\d+', rname)[0])
except IndexError:
order_key = rname
else:
order_key = route_def.long_name
return order_key
def get_route_names_sorted(route_names):
# Get an ordered list of route names so we can write in name order,
keyfunc = None
if len(route_names[0]) <= 3:
# Dropping the 'R' for route, for short route names, and sort
# by integer version of remaining string
keyfunc = lambda s: int(s[1:])
else:
# Just sort by the full route name string.
keyfunc = lambda s: s
rnames_sorted = sorted(route_names, key=keyfunc)
return rnames_sorted
class Route_Def:
def __init__(self, route_id, short_name, long_name, dir_names,
ordered_seg_ids, gtfs_origin_id = None):
self.id = route_id
self.gtfs_origin_id = gtfs_origin_id
self.short_name = short_name
self.long_name = long_name
self.dir_names = dir_names
self.ordered_seg_ids = ordered_seg_ids
class Seg_Reference:
"""A small lightweight class for using as an in-memory storage of
key segment topology information, and reference to actual segment
feature in a shapefile layer.
This is designed to save cost of reading actual
shapefile frequently, e.g. for algorithms that need to search and/or
add to segments list a lot."""
def __init__(self, seg_id, first_stop_id, second_stop_id,
route_dist_on_seg=None, routes=None):
self.seg_id = seg_id # Segment ID
self.first_id = first_stop_id
self.second_id = second_stop_id
self.route_dist_on_seg = route_dist_on_seg
if routes is None:
self.routes = []
else:
self.routes = routes
self.seg_ii = None # Index into segments layer shapefile -
class Route_Ext_Info:
"""Class for holding relevant info about extended routes."""
def __init__(self, ext_id, ext_name, ext_type,
exist_r_s_name, exist_r_l_name,
exist_r_connect_stop_gtfs_id, exist_r_first_stop_gtfs_id,
upd_r_short_name, upd_r_long_name, upd_dir_name):
self.ext_id = ext_id
self.ext_name = ext_name
self.ext_type = ext_type
self.exist_r_short_name = exist_r_s_name
self.exist_r_long_name = exist_r_l_name
self.exist_r_connect_stop_gtfs_id = exist_r_connect_stop_gtfs_id
self.exist_r_first_stop_gtfs_id = exist_r_first_stop_gtfs_id
self.upd_r_short_name = upd_r_short_name
self.upd_r_long_name = upd_r_long_name
self.upd_dir_name = upd_dir_name
assert ext_type in tp_model.ROUTE_EXT_ALL_TYPES
assert self.exist_r_connect_stop_gtfs_id is not None
if ext_type == tp_model.ROUTE_EXT_TYPE_NEW:
assert self.exist_r_first_stop_gtfs_id is not None
assert upd_dir_name
return
def get_print_name(route_def):
print_name = misc_utils.get_route_print_name(
route_def.short_name, route_def.long_name)
return print_name
def add_route_to_seg_ref(seg_ref, route_id):
if route_id not in seg_ref.routes:
seg_ref.routes.append(route_id)
return
def seg_has_stops(seg_ref, stop_id_1, stop_id_2):
if seg_ref.first_id == stop_id_1 and \
seg_ref.second_id == stop_id_2 \
or seg_ref.first_id == stop_id_2 and \
seg_ref.second_id == stop_id_1:
return True
return False
def get_seg_dist_km(seg_ref):
if seg_ref is not None:
return seg_ref.route_dist_on_seg / tp_model.ROUTE_DIST_RATIO_TO_KM
else:
print "Warning:- asked for distance of a seg_ref with ID %d, but "\
"route distance hasn't yet been read or calculated for this "\
"seg_ref." % seg_ref.seg_id
return None
def get_other_stop_id(seg_ref, stop_id):
if stop_id == seg_ref.first_id:
return seg_ref.second_id
else:
assert stop_id == seg_ref.second_id
return seg_ref.first_id
def get_seg_ref_with_id(seg_id, seg_refs):
for seg_ref in seg_refs:
if seg_id == seg_ref.seg_id:
return seg_ref
return None
def build_seg_refs_lookup_table(seg_refs):
seg_refs_lookup_table = {}
for seg_ref in seg_refs:
seg_refs_lookup_table[seg_ref.seg_id] = seg_ref
return seg_refs_lookup_table
def find_seg_ref_matching_stops(all_seg_refs, stop_id_1, stop_id_2):
matched_seg_ref = None
for seg_ref in all_seg_refs:
if seg_has_stops(seg_ref, stop_id_1, stop_id_2):
matched_seg_ref = seg_ref
break
return matched_seg_ref
def add_update_seg_ref(start_stop_id, end_stop_id, route_id,
route_dist_on_seg, all_seg_refs, seg_refs_this_route,
possible_route_duplicates=False):
"""Add a new segment to the two pre-existing lists all_seg_refs, and
seg_refs_this_route. If segment already exists, update its route list."""
seg_id = None
new_status = False
seg_ref_to_return = None
matched_seg_ref = find_seg_ref_matching_stops(all_seg_refs, start_stop_id,
end_stop_id)
if matched_seg_ref:
new_status = False
#print "While adding, matched a segment! Seg id = %s, existing "\
# "routes = %s, new route = '%s'" %\
# (matched_seg_ref.seg_id\
# matched_seg_ref.routes,\
# route_id)
add_route_to_seg_ref(matched_seg_ref, route_id)
seg_ref_to_return = matched_seg_ref
if possible_route_duplicates:
# Adding a new defensive case:- don't want to add a segment twice to
# the same route.
matched_in_route = find_seg_ref_matching_stops(seg_refs_this_route,
start_stop_id, end_stop_id)
if not matched_seg_ref:
seg_refs_this_route.append(seg_ref_to_return)
else:
seg_refs_this_route.append(seg_ref_to_return)
else:
new_status = True
# +1 since we want to start counter at 1
seg_id = len(all_seg_refs)+1
new_seg_ref = Seg_Reference(seg_id, start_stop_id, end_stop_id,
route_dist_on_seg, routes = [route_id])
# Its a new segment, so append to the list of all segments.
all_seg_refs.append(new_seg_ref)
seg_ref_to_return = new_seg_ref
seg_refs_this_route.append(seg_ref_to_return)
return seg_ref_to_return, new_status
def route_defs_match_statuses(route_def, route_def2):
match_statuses = []
if route_def.id is not None and route_def2.id is not None:
test = route_def.id == route_def2.id
match_statuses.append(test)
if route_def.short_name and route_def2.short_name:
test = route_def.short_name == route_def2.short_name
match_statuses.append(test)
if route_def.long_name and route_def2.long_name:
test = route_def.long_name == route_def2.long_name
match_statuses.append(test)
match_status = False
# Make sure there is at least one attribute matching, and all match.
if len(match_statuses) >= 1 and False not in match_statuses:
match_status = True
return match_status
def get_matching_route_defs(route_defs, search_route_def):
matching_route_defs = []
for rdef in route_defs:
if route_defs_match_statuses(rdef, search_route_def):
matching_route_defs.append(rdef)
return matching_route_defs
def route_def_matches_gtfs_route(route_def, gtfs_route):
match_statuses = []
if route_def.id is not None:
test = route_def.id == gtfs_route.route_id
match_statuses.append(test)
if route_def.short_name:
test = route_def.short_name == gtfs_route.route_short_name
match_statuses.append(test)
if route_def.long_name:
test = route_def.long_name == gtfs_route.route_long_name
match_statuses.append(test)
match_status = False
# Make sure there is at least one attribute matching, and all match.
if len(match_statuses) >= 1 and False not in match_statuses:
match_status = True
return match_status
def get_gtfs_route_ids_matching_route_defs(route_defs_to_match, gtfs_routes):
route_defs_to_check_match = zip(route_defs_to_match,
itertools.count(0))
matching_gtfs_ids = []
route_defs_match_status = [False] * len(route_defs_to_match)
all_matched = False
for gtfs_route in gtfs_routes:
matches = False
# Note we take a copy of list here since we want to remove from it.
for route_def, r_index in route_defs_to_check_match[:]:
if route_def_matches_gtfs_route(route_def, gtfs_route):
route_defs_match_status[r_index] = True
gtfs_route_id = gtfs_route.route_id
if gtfs_route_id not in matching_gtfs_ids:
matching_gtfs_ids.append(gtfs_route_id)
else:
print "Warning: route def just matched, with ID "\
"%s, name %s, already matched a GTFS route. "\
"Ignoring 2nd match." \
% (gtfs_route_id, get_print_name(route_def))
if route_def.id == gtfs_route_id:
# Only remove the route_def in this case, since we matched
# on ID. Otherwise there may be more matches.
route_defs_to_check_match.remove((route_def,r_index))
if len(route_defs_to_check_match) == 0:
all_matched = True
break
if all_matched:
# All routes matched, we're done.
break
for r_index, match_status in enumerate(route_defs_match_status):
if not match_status:
unmatched_r_def = route_defs_to_match[r_index]
print "Warning: route given by ID %s, name %s, didn't match "\
"any GTFS routes in given selection." \
% (unmatched_r_def.id, get_print_name(unmatched_r_def))
return matching_gtfs_ids, route_defs_match_status
def create_route_defs_list_from_route_segs(segs_by_route,
route_dirs, mode_config, r_ids_output_order=None):
"""Turn a dict containing ordered lists of seg references that make up
each route (segs_by_route) and related dictionary of route dir names
(route_dirs) into a list of route definitions. If r_ids_output_order
provided, routes defs in list will be ordered in that order."""
route_defs = []
if r_ids_output_order is None:
r_ids_output_order = segs_by_route.keys()
for r_id in r_ids_output_order:
# Haven't yet implemented ability to create route long names
r_short_name = tp_model.route_name_from_id(r_id, mode_config)
r_long_name = None
rdef = Route_Def(r_id, r_short_name, r_long_name, route_dirs[r_id],
map(operator.attrgetter('seg_id'), segs_by_route[r_id]))
route_defs.append(rdef)
return route_defs
def find_linking_stop_id(seg1, seg2):
"""Checks if two segments are linked by a common stop. If true, returns
the ID of the linking stop. If they don't link, returns None."""
if seg1.first_id == seg2.first_id or seg1.first_id == seg2.second_id:
return seg1.first_id
elif seg1.second_id == seg2.first_id or seg1.second_id == seg2.second_id:
return seg1.second_id
return None
def find_non_linking_stop_id(seg1, seg2):
"""Find the stop in seg1 that doesn't link to seg2."""
if seg1.first_id == seg2.first_id or seg1.first_id == seg2.second_id:
return seg1.second_id
elif seg1.second_id == seg2.first_id or seg1.second_id == seg2.second_id:
return seg1.first_id
return None
def get_stop_order(seg_ref, next_seg_ref):
"""Use the fact that for two segments, in the first segment, there must be
a matching stop with the 2nd segment. Return the IDs of the 1st and 2nd
stops in the first segment."""
linking_stop_id = find_linking_stop_id(seg_ref, next_seg_ref)
if linking_stop_id is None:
print "Error, in segment with id %d, next seg id is %d, "\
"stop a is #%d, stop b is #%d, "\
"next seg stop a is #%d, stop b is #%d, "\
"couldn't work out stop order."\
% (seg_ref.seg_id, next_seg_ref.seg_id, \
seg_ref.first_id, seg_ref.second_id, \
next_seg_ref.first_id, next_seg_ref.second_id)
sys.exit(1)
else:
first_stop_id = get_other_stop_id(seg_ref, linking_stop_id)
second_stop_id = linking_stop_id
return first_stop_id, second_stop_id
def get_stop_ids_in_travel_dir(route_seg_refs, seg_ii, dir_index):
"""Returns the stop ids of segment ii in route_route_seg_refs given
order of travel by dir_index. (Assumes route_seg_refs ordered in
direction of travel of dir_index 0.)"""
seg_ref = route_seg_refs[seg_ii]
assert seg_ii >= 0 and seg_ii <= len(route_seg_refs) - 1
if dir_index == 0:
if seg_ii < len(route_seg_refs) - 1:
stop_ids = get_stop_order(seg_ref,
route_seg_refs[seg_ii+1])
else:
# Special case for last seg - need to use prev seg.
linking_id = find_linking_stop_id(seg_ref,
route_seg_refs[seg_ii-1])
other_id = get_other_stop_id(seg_ref, linking_id)
stop_ids = (linking_id, other_id)
else:
if seg_ii > 0:
stop_ids = get_stop_order(seg_ref,
route_seg_refs[seg_ii-1])
else:
# Special case for first seg - need to use next seg.
linking_id = find_linking_stop_id(seg_ref,
route_seg_refs[seg_ii+1])
other_id = get_other_stop_id(seg_ref, linking_id)
# Remember we're going 'backwards' in this case
stop_ids = (linking_id, other_id)
return stop_ids
def build_seg_links(route_seg_refs):
"""Create a dictionary, which for each segment ID, gives the list
of other segments linked to that id via a common stop."""
seg_links = {}
for seg in route_seg_refs:
seg_links[seg.seg_id] = []
for ii, seg in enumerate(route_seg_refs[:-1]):
for other_seg in route_seg_refs[ii+1:]:
if find_linking_stop_id(seg, other_seg) is not None:
seg_links[seg.seg_id].append(other_seg.seg_id)
seg_links[other_seg.seg_id].append(seg.seg_id)
return seg_links
def order_segs_based_on_links(route_seg_refs, seg_links):
"""Construct and ordered list of all segments within a route
(given in list route_seg_refs), based on their links via common stops."""
# Ok: start with one of the segments that only has one link
start_seg_id = None
for seg_id, links in seg_links.iteritems():
if len(links) == 1:
start_seg_id = seg_id
break
if start_seg_id is None:
print "Error: no segment with 1 link."
sys.exit(1)
ordered_seg_refs = [get_seg_ref_with_id(start_seg_id, route_seg_refs)]
prev_seg_id = start_seg_id
curr_seg_id = seg_links[start_seg_id][0]
while True:
curr_seg_ref = get_seg_ref_with_id(curr_seg_id, route_seg_refs)
ordered_seg_refs.append(curr_seg_ref)
links = seg_links[curr_seg_id]
if len(links) > 2:
print "Error, segment %d is linked to %d other segments %s" %\
(currseg, len(links), links)
sys.exit(1)
if len(links) == 1:
# We have reached the final segment in the route.
break
next_seg_id = None
for link_seg_id in links:
if link_seg_id != prev_seg_id:
next_seg_id = link_seg_id
assert next_seg_id is not None
prev_seg_id = curr_seg_id
curr_seg_id = next_seg_id
if len(route_seg_refs) != len(ordered_seg_refs):
print "Error: total # segments for this route is %d, but only "\
"found a linked chain of %d segments." \
% (len(route_seg_refs), len(ordered_seg_refs))
unlinked_seg_ids = []
for seg in route_seg_refs:
if get_seg_ref_with_id(seg.seg_id, route_seg_refs) is None:
unlinked_seg_ids.append(seg.seg_id)
print "Unlinked segment IDs: %s" % unlinked_seg_ids
sys.exit(1)
return ordered_seg_refs
def get_set_of_stops_in_route_so_far(segs_so_far):
stop_ids_in_route_so_far = map(operator.attrgetter('first_id'),
segs_so_far)
stop_ids_in_route_so_far += map(operator.attrgetter('second_id'),
segs_so_far)
stop_ids_in_route_so_far = set(stop_ids_in_route_so_far)
return stop_ids_in_route_so_far
def get_seg_id_with_shortest_dist(link_seg_ids, seg_refs,
link_dest_stop_ids_disallowed):
# Trying algorithm of choosing segment with shortest distance.
min_direct_dist = float("inf")
min_dist_seg_id = None
for link_seg_id in link_seg_ids:
link_seg = get_seg_ref_with_id(link_seg_id, seg_refs)
if link_seg.first_id in link_dest_stop_ids_disallowed \
or link_seg.second_id in link_dest_stop_ids_disallowed:
continue
if link_seg.route_dist_on_seg < min_direct_dist:
min_direct_dist = link_seg.route_dist_on_seg
min_dist_seg_id = link_seg_id
return min_dist_seg_id
def get_links_sorted_by_distance(link_seg_ids, seg_refs,
link_dest_stop_ids_disallowed):
links_and_dists = []
for link_seg_id in link_seg_ids:
link_seg = get_seg_ref_with_id(link_seg_id, seg_refs)
if link_seg.first_id in link_dest_stop_ids_disallowed \
or link_seg.second_id in link_dest_stop_ids_disallowed:
continue
links_and_dists.append((link_seg_id, link_seg.route_dist_on_seg))
if links_and_dists:
links_and_dists.sort(key=operator.itemgetter(1))
link_seg_ids_sorted_by_dist = map(operator.itemgetter(0),
links_and_dists)
else:
link_seg_ids_sorted_by_dist = None
return link_seg_ids_sorted_by_dist
def get_seg_id_with_stop_ids(seg_refs, stop_id_a, stop_id_b):
seg_ids_that_include_stop_ids = []
for seg in seg_refs:
if stop_id_a in (seg.first_id, seg.second_id) \
and stop_id_b in (seg.first_id, seg.second_id):
seg_ids_that_include_stop_ids.append(seg.seg_id)
assert len(seg_ids_that_include_stop_ids) <= 1
if not seg_ids_that_include_stop_ids:
return None
else:
return seg_ids_that_include_stop_ids[0]
def get_seg_ids_that_include_stop_id(seg_refs, stop_id):
seg_ids_that_include_stop_id = []
for seg_ref in seg_refs:
if stop_id in (seg_ref.first_id, seg_ref.second_id):
seg_ids_that_include_stop_id.append(seg_ref.seg_id)
return seg_ids_that_include_stop_id
def get_seg_ids_with_minimum_links(seg_ids, seg_links):
min_link_segs = []
min_links = min([len(seg_links[seg_id]) for seg_id in seg_ids])
for seg_id in seg_ids:
if len(seg_links[seg_id]) == min_links:
min_link_segs.append(seg_id)
return min_link_segs, min_links
def get_seg_refs_for_ordered_stop_ids(stop_ids, seg_refs):
ordered_segs = []
for stop_id_a, stop_id_b in misc_utils.pairs(stop_ids):
seg_id = get_seg_id_with_stop_ids(seg_refs,
stop_id_a, stop_id_b)
if seg_id is None:
print "WARNING:- the pattern being processed contains no "\
"segments with stop pair IDs %d, %d, in list of "\
"ordered stop ids you requested."\
% (stop_id_a, stop_id_b)
ordered_segs = []
break
else:
seg_ref = get_seg_ref_with_id(seg_id, seg_refs)
ordered_segs.append(seg_ref)
return ordered_segs
def get_full_stop_pattern_segs(all_pattern_segs, seg_links,
force_first_stop_ids=None):
"""More advanced function to build a list of segments into a route :-
this time by finding a 'full-stop' pattern linking all the segments.
(This is useful if you're trying to reconstruct a single full-stop pattern
from a set of all segments derived from a GTFS file with varying stop
patterns throughout the day.)
(Note: current implementation is unlikely to deal with branching routes
well. It will follow the branch with the most segments, won't include
other branches.)
Note re alg tuning and force_first_stop_ids argument:- after a fair bit
of effort I was able to make the algorithm produce sensible results for
the 'full stop' version of routes with expresses and a 'city loop' trains
in most cases. However a few cases such as the Belgrave line in Melbourne
are difficult to come up with a good outcome with no initial information.
Therefore there is a force_first_stop_ids argument that allows to force
beginning the segment-chain building algorithm at a particular stop(s), to
help get a good result.
"""
full_stop_pattern_segs = []
all_seg_ids = map(operator.attrgetter('seg_id'), all_pattern_segs)
if len(all_pattern_segs) == 1:
full_stop_pattern_segs = list(all_pattern_segs)
return full_stop_pattern_segs
if force_first_stop_ids and len(force_first_stop_ids) >= 3:
# In this case :- we have at least two segments to start from in a
# given order. Build these then add the longest chain at end.
# We know there's no need to extend/reverse from here.
print "Starting building chain with segs between stops %s ...." \
% (force_first_stop_ids)
full_stop_pattern_segs = get_seg_refs_for_ordered_stop_ids(
force_first_stop_ids, all_pattern_segs)
if not full_stop_pattern_segs: return []
first_link_seg_id = full_stop_pattern_segs.pop().seg_id
print "Added seg IDs b/w these stops: %s - next is %d" \
% (map(operator.attrgetter('seg_id'), full_stop_pattern_segs),\
first_link_seg_id)
seg_chain, chain_len = get_longest_seg_linked_chain(first_link_seg_id,
all_pattern_segs, full_stop_pattern_segs, seg_links, {})
full_stop_pattern_segs += seg_chain
return full_stop_pattern_segs
elif force_first_stop_ids and len(force_first_stop_ids) == 2:
# We've been given req'd first two stops, hence req'd first
# segment. So search all options with this segment in order.
print "Starting building chain with seg between stops %s ...." \
% (force_first_stop_ids)
full_stop_pattern_segs = get_seg_refs_for_ordered_stop_ids(
force_first_stop_ids, all_pattern_segs)
if not full_stop_pattern_segs: return []
first_seg_id = full_stop_pattern_segs[0].seg_id
print "First build seg is #%d" % first_seg_id
link_seg_ids = seg_links[first_seg_id]
link_segs = [get_seg_ref_with_id(seg_id, all_pattern_segs) for \
seg_id in link_seg_ids]
cand_init_link_seg_ids = get_seg_ids_that_include_stop_id(
link_segs, force_first_stop_ids[-1])
# Now we need to find the longest sub-chain for all of these
# init link candidates.
longest_chain = []
for init_link_seg_id in cand_init_link_seg_ids:
seg_chain, chain_len = get_longest_seg_linked_chain(
init_link_seg_id, all_pattern_segs, full_stop_pattern_segs,
seg_links, {})
if chain_len > len(longest_chain):
longest_chain = seg_chain
full_stop_pattern_segs += longest_chain
elif force_first_stop_ids and len(force_first_stop_ids) == 1:
# We have a first stop ID - but don't necessarily know which segment
# this stop belongs to to start at. Need to potentially try
# all combos passing through this stop.
first_stop_id = force_first_stop_ids[0]
print "Forcing start of building chain at stop ID %d" \
% first_stop_id
cand_start_seg_ids = get_seg_ids_that_include_stop_id(
all_pattern_segs, first_stop_id)
start_seg_ids_and_chains = []
for start_seg_id in cand_start_seg_ids:
start_seg_ref = get_seg_ref_with_id(start_seg_id, all_pattern_segs)
other_stop_id = get_other_stop_id(start_seg_ref, first_stop_id)
link_seg_ids = seg_links[start_seg_id]
link_segs = [get_seg_ref_with_id(seg_id, all_pattern_segs) for \
seg_id in link_seg_ids]
# We only want 'forward' links away from the first stop id
# work out longest of these.
cand_init_link_seg_ids = get_seg_ids_that_include_stop_id(
link_segs, other_stop_id)
longest_sub_chain = []
for link_seg_id in cand_init_link_seg_ids:
seg_chain, chain_len = get_longest_seg_linked_chain(
link_seg_id, all_pattern_segs, [start_seg_ref],
seg_links, {})
if chain_len > len(longest_sub_chain):
longest_sub_chain = seg_chain
start_seg_ids_and_chains.append([start_seg_ref] + longest_sub_chain)
# We need to get the longest chain
start_seg_ids_and_chains.sort(key = len)
full_stop_pattern_segs = start_seg_ids_and_chains[0]
else:
# We don't have a forced seg to start at.
# Ok: best bet in this case is search for one of the segments that
# only has one link - and is therefore an end of the route.
possible_reverse_links = False
start_seg_id = None
for seg_id, link_seg_ids in seg_links.iteritems():
if len(link_seg_ids) == 1:
start_seg_id = seg_id
break
if start_seg_id is not None:
print "No start stop specified, so starting with seg #%d "\
"that has only one link." % start_seg_id
else:
print "No start stop specified, and route has no "\
"segments with only one link."
possible_reverse_links = True
# Fallback case.
cand_start_seg_ids, min_links = get_seg_ids_with_minimum_links(
all_seg_ids, seg_links)
print "Minimum links of any seg is %d" % min_links
# Try the 'starts' and 'ends' first in order we read segs for this
# route.
min_dist_from_end = float("inf")
for seg_id in cand_start_seg_ids:
dist_from_end = min(seg_id - 1, len(all_pattern_segs) - seg_id)
if dist_from_end < min_dist_from_end:
min_dist_from_end = dist_from_end
start_seg_id = seg_id
if dist_from_end == 0:
break
print "Starting with seg to have this # of links closest to "\
"start or end = seg #%s" % start_seg_id
# Ok:- we've chosen a start seg ID, now need to choose best link seg
#print "Added start seg %d." % start_seg_id
start_seg_ref = get_seg_ref_with_id(start_seg_id, all_pattern_segs)
full_stop_pattern_segs.append(start_seg_ref)
init_link_seg_ids = seg_links[start_seg_id]
first_link_seg_id = get_seg_id_with_shortest_dist(init_link_seg_ids,
all_pattern_segs, [])
seg_chain, chain_len = get_longest_seg_linked_chain(first_link_seg_id,
all_pattern_segs, full_stop_pattern_segs, seg_links, {})
full_stop_pattern_segs += seg_chain
if possible_reverse_links:
# We want to try building other possible 'reverse' chains, given
# with this flag we may have started in the middle of a route.
rem_init_link_seg_ids = list(init_link_seg_ids)
rem_init_link_seg_ids.remove(first_link_seg_id)
first_stop_id = find_non_linking_stop_id(full_stop_pattern_segs[0],
full_stop_pattern_segs[1])
stop_ids_in_route_so_far = get_set_of_stops_in_route_so_far(
full_stop_pattern_segs)
rev_candidate_link_ids = []
for link_seg_id in rem_init_link_seg_ids:
link_seg_ref = get_seg_ref_with_id(link_seg_id, all_pattern_segs)
if first_stop_id not in \
(link_seg_ref.first_id, link_seg_ref.second_id):
# This must be a 'branch' from the first stop, not a
# possible reverse.
continue
non_link_stop = get_other_stop_id(link_seg_ref, first_stop_id)
# NOTE:- rules out some loops
if non_link_stop not in stop_ids_in_route_so_far:
# we have an unexplored section, not an express into
# already included chain.
rev_candidate_link_ids.append(link_seg_id)
if rev_candidate_link_ids:
print "Calling special reverse case ..."
full_stop_pattern_segs.reverse()
longest_chains_lookup_cache = {}
longest_sub_chain = []
longest_sub_chain_len = 0
for rev_link_seg_id in rev_candidate_link_ids:
seg_sub_chain, sub_chain_len = get_longest_seg_linked_chain(
rev_link_seg_id, all_pattern_segs,
full_stop_pattern_segs, seg_links,
#longest_chains_lookup_cache)
{})
if sub_chain_len > longest_sub_chain_len:
longest_sub_chain = seg_sub_chain
longest_sub_chain_len = sub_chain_len
full_stop_pattern_segs += longest_sub_chain
return full_stop_pattern_segs
def get_longest_seg_linked_chain(init_seg_id, all_segs, segs_visited_so_far,
seg_links, longest_chains_lookup_cache):
# Special case for having visited all segments - esp for 1-segment routes
if len(all_segs) == len(segs_visited_so_far):
return [], 0
seg_chain = []
init_seg_ref = get_seg_ref_with_id(init_seg_id, all_segs)
prev_seg_ref = segs_visited_so_far[-1]
prev_seg_id = prev_seg_ref.seg_id
prev_stop_id = find_linking_stop_id(prev_seg_ref, init_seg_ref)
stop_ids_in_route_so_far = get_set_of_stops_in_route_so_far(
segs_visited_so_far)
curr_seg_id = init_seg_id
while True:
curr_seg_ref = get_seg_ref_with_id(curr_seg_id, all_segs)
assert curr_seg_id not in map(operator.attrgetter('seg_id'), seg_chain)
seg_chain.append(curr_seg_ref)
#print "Appended seg %d to sub chain. - sub chain is now %s." % \
# (curr_seg_id, map(operator.attrgetter('seg_id'), seg_chain))
curr_stop_id = find_non_linking_stop_id(curr_seg_ref, prev_seg_ref)
stop_ids_in_route_so_far.add(curr_stop_id)
link_seg_ids = seg_links[curr_seg_id]
next_seg_id = None
if len(link_seg_ids) == 1:
# We have reached the final segment in the route.
break
elif len(link_seg_ids) == 2:
for link_seg_id in link_seg_ids:
if link_seg_id != prev_seg_id:
next_seg_id = link_seg_id
assert next_seg_id is not None
next_seg_ref = get_seg_ref_with_id(next_seg_id, all_segs)
linking_stop_id = find_linking_stop_id(next_seg_ref, curr_seg_ref)
# Need this check to deal with single-segment branch cases.
if linking_stop_id == prev_stop_id:
#print "Warning:- single 'forward' link found from seg %d "\
# "to seg %d, but this next seg is actually a branch "\
# "from previous link. So breaking here."\
# % (curr_seg_id, next_seg_id)
break
# We need this extra check to avoid loops back into existing
# stops.
next_stop_id = get_other_stop_id(next_seg_ref, linking_stop_id)
if next_stop_id in stop_ids_in_route_so_far:
#print "Warning:- single forward link found from seg %d "\
# "to seg %d, but this next seg links back to an "\
# "already visited stop. So breaking here."\
# % (curr_seg_id, next_seg_id)
break
else:
# This means there is either a 'branch', 'express' section,
# or a loop.
fwd_link_seg_ids = list(link_seg_ids)
fwd_link_seg_ids.remove(prev_seg_id)
stops_disallowed = set(stop_ids_in_route_so_far)
stops_disallowed.remove(curr_stop_id)
fwd_link_seg_ids = get_links_sorted_by_distance(fwd_link_seg_ids,
all_segs, stops_disallowed)
if fwd_link_seg_ids is None:
#print "Warning: multiple links from current segment, but "\
# "all of them looped back to an already used stop. "\
# "So breaking here (last added seg ID was %d)."\
# % curr_seg_id
break
longest_sub_chain = []
longest_sub_chain_len = 0
#print "*In recursive part*, curr_seg_id=%d" % curr_seg_id
updated_segs_visited_so_far = segs_visited_so_far + seg_chain
# We only want to cache lookup chains at the same depth level
sub_longest_chains_lookup_cache = {}
for link_seg_id in fwd_link_seg_ids:
try:
sub_seg_chain = longest_chains_lookup_cache[link_seg_id]
sub_chain_len = len(sub_seg_chain)
#print "(lookup answer from cache for link %d was %d)" \
# % (link_seg_id, sub_chain_len)
except KeyError:
# Recursive call, to try all possible branches.
#print "*Launching recursive call on link seg id %d" \
# % link_seg_id
sub_seg_chain, sub_chain_len = get_longest_seg_linked_chain(
link_seg_id, all_segs,
updated_segs_visited_so_far, seg_links,
#sub_longest_chains_lookup_cache)
{})
#print "...Sub-chain from link %d was %d long" \
# % (link_seg_id, sub_chain_len)
if sub_chain_len > longest_sub_chain_len:
longest_sub_chain = sub_seg_chain
longest_sub_chain_len = sub_chain_len
assert len(set(longest_sub_chain)) == len(longest_sub_chain)
seg_chain += longest_sub_chain
assert len(set(seg_chain)) == len(seg_chain)
break
# Defensive check
if next_seg_id in map(operator.attrgetter('seg_id'),
segs_visited_so_far + seg_chain):
#print "Warning, we found a loop in segments while constructing "\
# "full-stop pattern - breaking with loop seg id being %d."\
# % next_seg_id
break
prev_seg_id = curr_seg_id
prev_stop_id = curr_stop_id
prev_seg_ref = curr_seg_ref
curr_seg_id = next_seg_id
longest_chains_lookup_cache[init_seg_id] = seg_chain
#print "sub-chain of ids calc was %s" \
# % (map(operator.attrgetter('seg_id'), seg_chain))
assert len(set(seg_chain)) == len(seg_chain)
all_segs_thus_far = segs_visited_so_far + seg_chain
assert len(set(all_segs_thus_far)) == \
len(all_segs_thus_far)
stop_ids_in_route_thus_far = get_set_of_stops_in_route_so_far(
all_segs_thus_far)
assert len(set(stop_ids_in_route_thus_far)) == \
len(stop_ids_in_route_thus_far)
return seg_chain, len(seg_chain)
def order_all_route_segments(all_segs_by_route, r_ids_sorted=None):
# Now order each route properly ...
# for each route - find unique stop names
if r_ids_sorted == None:
r_ids_sorted = sorted(all_segs_by_route.keys())
segs_by_routes_ordered = {}
for r_id in r_ids_sorted:
print "Ordering segments by traversal for route ID %d:" \
% (r_id)
route_seg_refs = all_segs_by_route[r_id]
if len(route_seg_refs) == 1:
segs_by_routes_ordered[r_id] = route_seg_refs
else:
seg_links = build_seg_links(route_seg_refs)
ordered_seg_refs = order_segs_based_on_links(route_seg_refs,
seg_links)
segs_by_routes_ordered[r_id] = ordered_seg_refs
assert len(segs_by_routes_ordered) == len(all_segs_by_route)
return segs_by_routes_ordered
def create_basic_route_dir_names(all_segs_by_route, mode_config):
"""Creating basic direction names for routes :- based on first and last
stop ids and names in each route."""
route_dir_names = {}
for r_id, route_seg_refs in all_segs_by_route.iteritems():
if len(route_seg_refs) == 1:
start_stop = route_seg_refs[0].first_id
end_stop = route_seg_refs[0].second_id
else:
first_seg, second_seg = route_seg_refs[0], route_seg_refs[1]
start_stop = find_non_linking_stop_id(first_seg, second_seg)
if start_stop is None:
print "Error in working out directions for route ID %d:- "\
"first and second segments don't link via a common stop!"\
% r_id
sys.exit(1)
last_seg = route_seg_refs[-1]
second_last_seg = route_seg_refs[-2]
end_stop = find_non_linking_stop_id(last_seg, second_last_seg)
if end_stop is None:
print "Error in working out directions for route ID %d:- "\
"last and second last segments don't link via a "\
"common stop!"\
% r_id
sys.exit(1)
first_stop_name = tp_model.stop_default_name_from_id(start_stop,
mode_config)
last_stop_name = tp_model.stop_default_name_from_id(end_stop,
mode_config)
dir1 = "%s->%s" % (first_stop_name, last_stop_name)
dir2 = "%s->%s" % (last_stop_name, first_stop_name)
route_dir_names[r_id] = (dir1, dir2)
assert len(all_segs_by_route) == len(route_dir_names)
return route_dir_names
def extract_stop_list_along_route(ordered_seg_refs):
stop_ids = []
if len(ordered_seg_refs) == 1:
# special case for a route with only one segment.
seg_ref = ordered_seg_refs[0]
stop_ids = [seg_ref.first_id, seg_ref.second_id]
else:
first_stop_id, second_stop_id = get_stop_order(
ordered_seg_refs[0], ordered_seg_refs[1])
stop_ids.append(first_stop_id)
prev_second_stop_id = second_stop_id
for seg_ref in ordered_seg_refs[1:]:
first_stop_id = prev_second_stop_id
second_stop_id = get_other_stop_id(seg_ref, first_stop_id)
stop_ids.append(first_stop_id)
prev_second_stop_id = second_stop_id
# Finally, add second stop of final segment.
stop_ids.append(second_stop_id)
return stop_ids
def seg_ref_from_feature(seg_feature):
seg_id = int(seg_feature.GetField(tp_model.SEG_ID_FIELD))
stop_id_a, stop_id_b = tp_model.get_stop_ids_of_seg(seg_feature)
route_dist_on_seg = float(seg_feature.GetField(
tp_model.SEG_ROUTE_DIST_FIELD))
seg_rlist = tp_model.get_routes_on_seg(seg_feature)
seg_ref = Seg_Reference(seg_id, stop_id_a, stop_id_b,
route_dist_on_seg=route_dist_on_seg, routes=seg_rlist)
return seg_ref
def route_ext_from_feature(route_ext_feat):
ext_id = route_ext_feat.GetField(tp_model.ROUTE_EXT_ID_FIELD)
ext_name = route_ext_feat.GetField(tp_model.ROUTE_EXT_NAME_FIELD)
ext_type = route_ext_feat.GetField(tp_model.ROUTE_EXT_TYPE_FIELD)
exist_r_s_name = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_EXIST_S_NAME_FIELD)
exist_r_l_name = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_EXIST_L_NAME_FIELD)
exist_r_connect_stop_gtfs_id = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_CONNECTING_STOP_FIELD)
exist_r_first_stop_gtfs_id = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_FIRST_STOP_FIELD)
if not exist_r_first_stop_gtfs_id:
exist_r_first_stop_gtfs_id = None
upd_r_short_name = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_UPD_S_NAME_FIELD)
upd_r_long_name = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_UPD_L_NAME_FIELD)
upd_dir_name = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_UPD_DIR_NAME_FIELD)
route_ext_info = Route_Ext_Info(
ext_id, ext_name, ext_type,
exist_r_s_name, exist_r_l_name,
exist_r_connect_stop_gtfs_id, exist_r_first_stop_gtfs_id,
upd_r_short_name, upd_r_long_name, upd_dir_name)
return route_ext_info
def read_route_ext_infos(route_exts_lyr):
route_ext_infos = []
for r_ext_i, route_ext_feat in enumerate(route_exts_lyr):
route_ext_info = route_ext_from_feature(route_ext_feat)
route_ext_infos.append(route_ext_info)
route_exts_lyr.ResetReading()
return route_ext_infos
def get_routes_and_segments(segs_lyr):
all_routes = {}
for feature in segs_lyr:
seg_ref = seg_ref_from_feature(feature)
for route_id in seg_ref.routes:
if route_id not in all_routes:
all_routes[route_id] = [seg_ref]
else:
all_routes[route_id].append(seg_ref)
#for r_id, rsegs in all_routes.iteritems():
# print "For Route ID '%s': segments are %s" % (r_id, rsegs)
segs_lyr.ResetReading()
return all_routes
def get_all_seg_refs(segs_lyr):
all_seg_refs = []
for feature in segs_lyr:
seg_ref = seg_ref_from_feature(feature)
all_seg_refs.append(seg_ref)
segs_lyr.ResetReading()
return all_seg_refs
def create_ordered_seg_refs_from_ids(ordered_seg_ids, segs_lookup_table):
ordered_seg_refs = []
for seg_id in ordered_seg_ids:
seg_feature = segs_lookup_table[seg_id]
seg_ref = seg_ref_from_feature(seg_feature)
ordered_seg_refs.append(seg_ref)
return ordered_seg_refs
def write_seg_ref_to_shp_file(seg_ref, segments_lyr, stop_feat_a, stop_feat_b,
stops_srs, mode_config):
# Create line geometry based on two stops.
seg_geom = tp_model.create_seg_geom_from_stop_pair(stop_feat_a,
stop_feat_b, stops_srs)
seg_ii = tp_model.add_segment(segments_lyr,
seg_ref.seg_id, seg_ref.routes, seg_ref.first_id, seg_ref.second_id,
seg_ref.route_dist_on_seg, seg_geom, mode_config)
seg_geom.Destroy()
return seg_ii
def write_segments_to_shp_file(segments_lyr, input_stops_lyr, seg_refs,
mode_config):
"""Write all segments defined by input seg_refs list to the segments_lyr.
Geometries of segments defined by stop pairs in input_stops_lyr.
"""
print "Writing segment references to shapefile:"
stops_srs = input_stops_lyr.GetSpatialRef()
# Build lookup table by stop ID into stops layer - for speed
stops_lookup_dict = tp_model.build_stops_lookup_table(input_stops_lyr)
for seg_ref in seg_refs:
# look up corresponding stops in lookup table, and build geometry
stop_feat_a = stops_lookup_dict[seg_ref.first_id]
stop_feat_b = stops_lookup_dict[seg_ref.second_id]
seg_ii = write_seg_ref_to_shp_file(seg_ref, segments_lyr,
stop_feat_a, stop_feat_b, stops_srs, mode_config)
print "...done writing."
return
def print_route_ext_infos(route_ext_infos, indent=4):
for re in route_ext_infos:
print " " * indent + "Ext id:%s, '%s', of type %s"\
% (re.ext_id, re.ext_name, re.ext_type)
print " " * indent * 2 + "connects to existing route '%s' "\
"('%s'), at GTFS stop ID %s" \
% (re.exist_r_short_name, re.exist_r_long_name, \
re.exist_r_connect_stop_gtfs_id)
if re.ext_type == tp_model.ROUTE_EXT_TYPE_NEW:
print " " * indent * 2 + "(new route will copy starting from "\
"stop with GTFS ID %s)"\
% (re.exist_r_first_stop_gtfs_id)
print " " * indent * 2 + "will update r name to '%s':'%s' "\
"and new/updated dir name as '%s'." \
% (re.upd_r_short_name, re.upd_r_long_name, \
re.upd_dir_name)
return
def get_matching_existing_route_info(
route_defs, segs_lyr, segs_lookup_table, stops_lyr,
route_ext_info):
# Find the route def, stops, etc of matching route in existing topology
search_route_def = Route_Def(
None,
route_ext_info.exist_r_short_name,
route_ext_info.exist_r_long_name,
None, None)
matching_r_defs = get_matching_route_defs(route_defs,
search_route_def)
if len(matching_r_defs) == 0:
print "Error:- for route extension %s with s name %s, l name %s: "\
"no matching existing routes!" \
% (route_ext_info.ext_name, route_ext_info.exist_r_short_name,\
route_ext_info.exist_r_long_name)
sys.exit(1)
elif len(matching_r_defs) > 1:
print "Error:- for route extension %s with s name %s, l name %s: "\
"matched multiple existing routes!" \
% (route_ext_info.ext_name, route_ext_info.exist_r_short_name,\
route_ext_info.exist_r_long_name)
sys.exit(1)
r_def_to_extend = matching_r_defs[0]
seg_refs_along_route = create_ordered_seg_refs_from_ids(
r_def_to_extend.ordered_seg_ids, segs_lookup_table)
stop_ids_along_route = extract_stop_list_along_route(
seg_refs_along_route)
connect_stop_id = tp_model.get_stop_id_with_gtfs_id(
stops_lyr, route_ext_info.exist_r_connect_stop_gtfs_id)
if connect_stop_id is None:
print "Error:- extension route with connecting stop spec. "\
"with GTFS ID %s :- couldn't find an existing stop with "\
"this GTFS ID."\
% (route_ext_info.exist_r_connect_stop_gtfs_id)
sys.exit()
elif connect_stop_id not in stop_ids_along_route:
print "Error:- extension route with connecting stop spec. "\
"with GTFS ID %s exists, but not found in route to extend." \
% (route_ext_info.exist_r_connect_stop_gtfs_id)
sys.exit()
if route_ext_info.ext_type == tp_model.ROUTE_EXT_TYPE_EXTENSION:
if connect_stop_id == stop_ids_along_route[-1]:
ext_dir_id = 0
elif connect_stop_id == stop_ids_along_route[0]:
ext_dir_id = -1
else:
print "Error:- extension route with connecting stop spec. "\
"with GTFS ID %s not found at end of route to extend."\
% (route_ext_info.exist_r_connect_stop_gtfs_id)
sys.exit(1)
# For new routes, the connecting stop can legitimately be
# anywhere along the route.
orig_route_first_stop_id = tp_model.get_stop_id_with_gtfs_id(
stops_lyr, route_ext_info.exist_r_first_stop_gtfs_id)
return r_def_to_extend, seg_refs_along_route, stop_ids_along_route, \
connect_stop_id, orig_route_first_stop_id
def get_route_infos_to_extend(route_ext_infos, route_defs, segs_lyr,
segs_lookup_table, stops_lyr):
"""Returns the existing_route_infos_to_extend in the form:-
(r_def_to_extend, seg_refs_along_route, stop_ids_along_route,
connect_stop_id)"""
existing_route_infos_to_extend = []
for r_ext_info in route_ext_infos:
route_info_to_extend = get_matching_existing_route_info(
route_defs, segs_lyr, segs_lookup_table, stops_lyr,
r_ext_info)
existing_route_infos_to_extend.append(route_info_to_extend)
return existing_route_infos_to_extend
ROUTE_CSV_HEADERS_00 = ['Route', 'dir1', 'dir2', 'Segments']
ROUTE_CSV_HEADERS_01 = ['route_id', 'route_short_name', 'route_long_name',
'gtfs_id', 'dir1', 'dir2', 'Segments']
def read_route_defs(csv_file_name, do_sort=True):
"""Reads a CSV of route_defs, into a list of 'route_defs'.
Each route_def is a dictionary, with following entries:
name: name of route.
directions: a tuple of two strings, the route directions.
segments: a list of (ordered) route segments IDs."""
route_defs = []
try:
csv_file = open(csv_file_name, 'r')
except IOError:
print "Error, route mapping CSV file given, %s , failed to open." \
% (csv_file_name)
sys.exit(1)
dict_reader = csv.DictReader(csv_file, delimiter=';', quotechar="'")
# Check old vs new format
if 'Route' in dict_reader.fieldnames:
format_version = "00"
else:
format_version = "01"
for ii, row in enumerate(dict_reader):
if format_version == "00":
r_id = ii
r_short_name = row['Route']
r_long_name = None
else:
r_id = int(row['route_id'])
r_short_name = row['route_short_name']
if r_short_name == 'None' or len(r_short_name) == 0:
r_short_name = None
r_long_name = row['route_long_name']
if r_long_name == 'None' or len(r_long_name) == 0:
r_long_name = None
assert r_short_name or r_long_name
try:
r_gtfs_id = row['gtfs_id']
if r_gtfs_id == 'None' or len(r_gtfs_id) == 0:
r_gtfs_id = None
except KeyError:
r_gtfs_id = None
dir1 = row['dir1']
dir2 = row['dir2']
segments_str = row['Segments'].split(',')
seg_ids = [int(segstr) for segstr in segments_str]
route_def = Route_Def(r_id, r_short_name, r_long_name,
(dir1, dir2), seg_ids, gtfs_origin_id=r_gtfs_id)
route_defs.append(route_def)
if do_sort == True:
route_defs.sort(key=get_route_order_key_from_name)
csv_file.close()
return route_defs
def write_route_defs(csv_file_name, route_defs):
if sys.version_info >= (3,0,0):
routesfile = open(csv_file_name, 'w', newline='')
else:
routesfile = open(csv_file_name, 'wb')
rwriter = csv.writer(routesfile, delimiter=';')
rwriter.writerow(ROUTE_CSV_HEADERS_01)
for rdef in route_defs:
dirs = tuple(rdef.dir_names)
if not dirs:
print "Warning:- no dirs listed for route %s to write. "\
"writing as empty dirs." % rdef.short_name
dirs = ("", "")
if len(dirs) == 1:
print "Warning:- only one dir listed for route %s to write. "\
"writing other dir as empty." % rdef.short_name
dirs = (dirs[0], "")
seg_str_all = ','.join(map(str, rdef.ordered_seg_ids))
rwriter.writerow([rdef.id, rdef.short_name, rdef.long_name,
rdef.gtfs_origin_id, dirs[0], dirs[1], seg_str_all])
routesfile.close()
print "Wrote output to %s" % (csv_file_name)
return
|
"""Curses interface class."""
from __future__ import unicode_literals
import sys
from glances.compat import nativestr, u, itervalues, enable, disable
from glances.globals import MACOS, WINDOWS
from glances.logger import logger
from glances.events import glances_events
from glances.processes import glances_processes, sort_processes_key_list
from glances.outputs.glances_unicode import unicode_message
from glances.timer import Timer
try:
import curses
import curses.panel
from curses.textpad import Textbox
except ImportError:
logger.critical("Curses module not found. Glances cannot start in standalone mode.")
if WINDOWS:
logger.critical("For Windows you can try installing windows-curses with pip install.")
sys.exit(1)
class _GlancesCurses(object):
"""This class manages the curses display (and key pressed).
Note: It is a private class, use GlancesCursesClient or GlancesCursesBrowser.
"""
_hotkeys = {
# 'ENTER' > Edit the process filter
'0': {'switch': 'disable_irix'},
'1': {'switch': 'percpu'},
'2': {'switch': 'disable_left_sidebar'},
'3': {'switch': 'disable_quicklook'},
# '4' > Enable or disable quicklook
# '5' > Enable or disable top menu
'6': {'switch': 'meangpu'},
'9': {'switch': 'theme_white'},
'/': {'switch': 'process_short_name'},
'a': {'sort_key': 'auto'},
'A': {'switch': 'disable_amps'},
'b': {'switch': 'byte'},
'B': {'switch': 'diskio_iops'},
'c': {'sort_key': 'cpu_percent'},
'C': {'switch': 'disable_cloud'},
'd': {'switch': 'disable_diskio'},
'D': {'switch': 'disable_docker'},
# 'e' > Enable/Disable process extended
# 'E' > Erase the process filter
# 'f' > Show/hide fs / folder stats
'F': {'switch': 'fs_free_space'},
'g': {'switch': 'generate_graph'},
'G': {'switch': 'disable_gpu'},
'h': {'switch': 'help_tag'},
'i': {'sort_key': 'io_counters'},
'I': {'switch': 'disable_ip'},
# 'k' > Kill selected process
'K': {'switch': 'disable_connections'},
'l': {'switch': 'disable_alert'},
'm': {'sort_key': 'memory_percent'},
'M': {'switch': 'reset_minmax_tag'},
'n': {'switch': 'disable_network'},
'N': {'switch': 'disable_now'},
'p': {'sort_key': 'name'},
'P': {'switch': 'disable_ports'},
# 'q' or ESCAPE > Quit
'Q': {'switch': 'enable_irq'},
'r': {'switch': 'disable_smart'},
'R': {'switch': 'disable_raid'},
's': {'switch': 'disable_sensors'},
'S': {'switch': 'sparkline'},
't': {'sort_key': 'cpu_times'},
'T': {'switch': 'network_sum'},
'u': {'sort_key': 'username'},
'U': {'switch': 'network_cumul'},
# 'w' > Delete finished warning logs
'W': {'switch': 'disable_wifi'},
# 'x' > Delete finished warning and critical logs
# 'z' > Enable or disable processes
# "<" (left arrow) navigation through process sort
# ">" (right arrow) navigation through process sort
# 'UP' > Up in the server list
# 'DOWN' > Down in the server list
}
_sort_loop = sort_processes_key_list
# Define top menu
_top = ['quicklook', 'cpu', 'percpu', 'gpu', 'mem', 'memswap', 'load']
_quicklook_max_width = 68
# Define left sidebar
_left_sidebar = [
'network',
'connections',
'wifi',
'ports',
'diskio',
'fs',
'irq',
'folders',
'raid',
'smart',
'sensors',
'now',
]
_left_sidebar_min_width = 23
_left_sidebar_max_width = 34
# Define right sidebar
_right_sidebar = ['docker', 'processcount', 'amps', 'processlist', 'alert']
def __init__(self, config=None, args=None):
# Init
self.config = config
self.args = args
# Init windows positions
self.term_w = 80
self.term_h = 24
# Space between stats
self.space_between_column = 3
self.space_between_line = 2
# Init the curses screen
self.screen = curses.initscr()
if not self.screen:
logger.critical("Cannot init the curses library.\n")
sys.exit(1)
# Load the 'outputs' section of the configuration file
# - Init the theme (default is black)
self.theme = {'name': 'black'}
# Load configuration file
self.load_config(config)
# Init cursor
self._init_cursor()
# Init the colors
self._init_colors()
# Init main window
self.term_window = self.screen.subwin(0, 0)
# Init edit filter tag
self.edit_filter = False
# Init kill process tag
self.kill_process = False
# Init the process min/max reset
self.args.reset_minmax_tag = False
# Init cursor
self.args.cursor_position = 0
# Catch key pressed with non blocking mode
self.term_window.keypad(1)
self.term_window.nodelay(1)
self.pressedkey = -1
# History tag
self._init_history()
def load_config(self, config):
"""Load the outputs section of the configuration file."""
# Load the theme
if config is not None and config.has_section('outputs'):
logger.debug('Read the outputs section in the configuration file')
self.theme['name'] = config.get_value('outputs', 'curse_theme', default='black')
logger.debug('Theme for the curse interface: {}'.format(self.theme['name']))
def is_theme(self, name):
"""Return True if the theme *name* should be used."""
return getattr(self.args, 'theme_' + name) or self.theme['name'] == name
def _init_history(self):
"""Init the history option."""
self.reset_history_tag = False
def _init_cursor(self):
"""Init cursors."""
if hasattr(curses, 'noecho'):
curses.noecho()
if hasattr(curses, 'cbreak'):
curses.cbreak()
self.set_cursor(0)
def _init_colors(self):
"""Init the Curses color layout."""
# Set curses options
try:
if hasattr(curses, 'start_color'):
curses.start_color()
logger.debug('Curses interface compatible with {} colors'.format(curses.COLORS))
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
except Exception as e:
logger.warning('Error initializing terminal color ({})'.format(e))
# Init colors
if self.args.disable_bold:
A_BOLD = 0
self.args.disable_bg = True
else:
A_BOLD = curses.A_BOLD
self.title_color = A_BOLD
self.title_underline_color = A_BOLD | curses.A_UNDERLINE
self.help_color = A_BOLD
if curses.has_colors():
# The screen is compatible with a colored design
if self.is_theme('white'):
# White theme: black ==> white
curses.init_pair(1, curses.COLOR_BLACK, -1)
else:
curses.init_pair(1, curses.COLOR_WHITE, -1)
if self.args.disable_bg:
curses.init_pair(2, curses.COLOR_RED, -1)
curses.init_pair(3, curses.COLOR_GREEN, -1)
curses.init_pair(4, curses.COLOR_BLUE, -1)
curses.init_pair(5, curses.COLOR_MAGENTA, -1)
else:
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_GREEN)
curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(5, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
curses.init_pair(6, curses.COLOR_RED, -1)
curses.init_pair(7, curses.COLOR_GREEN, -1)
curses.init_pair(8, curses.COLOR_BLUE, -1)
# Colors text styles
self.no_color = curses.color_pair(1)
self.default_color = curses.color_pair(3) | A_BOLD
self.nice_color = curses.color_pair(5)
self.cpu_time_color = curses.color_pair(5)
self.ifCAREFUL_color = curses.color_pair(4) | A_BOLD
self.ifWARNING_color = curses.color_pair(5) | A_BOLD
self.ifCRITICAL_color = curses.color_pair(2) | A_BOLD
self.default_color2 = curses.color_pair(7)
self.ifCAREFUL_color2 = curses.color_pair(8) | A_BOLD
self.ifWARNING_color2 = curses.color_pair(5) | A_BOLD
self.ifCRITICAL_color2 = curses.color_pair(6) | A_BOLD
self.filter_color = A_BOLD
self.selected_color = A_BOLD
if curses.COLOR_PAIRS > 8:
colors_list = [curses.COLOR_MAGENTA, curses.COLOR_CYAN, curses.COLOR_YELLOW]
for i in range(0, 3):
try:
curses.init_pair(i + 9, colors_list[i], -1)
except Exception:
if self.is_theme('white'):
curses.init_pair(i + 9, curses.COLOR_BLACK, -1)
else:
curses.init_pair(i + 9, curses.COLOR_WHITE, -1)
self.nice_color = curses.color_pair(9)
self.cpu_time_color = curses.color_pair(9)
self.ifWARNING_color2 = curses.color_pair(9) | A_BOLD
self.filter_color = curses.color_pair(10) | A_BOLD
self.selected_color = curses.color_pair(11) | A_BOLD
else:
# The screen is NOT compatible with a colored design
# switch to B&W text styles
self.no_color = curses.A_NORMAL
self.default_color = curses.A_NORMAL
self.nice_color = A_BOLD
self.cpu_time_color = A_BOLD
self.ifCAREFUL_color = curses.A_UNDERLINE
self.ifWARNING_color = A_BOLD
self.ifCRITICAL_color = curses.A_REVERSE
self.default_color2 = curses.A_NORMAL
self.ifCAREFUL_color2 = curses.A_UNDERLINE
self.ifWARNING_color2 = A_BOLD
self.ifCRITICAL_color2 = curses.A_REVERSE
self.filter_color = A_BOLD
self.selected_color = A_BOLD
# Define the colors list (hash table) for stats
self.colors_list = {
'DEFAULT': self.no_color,
'UNDERLINE': curses.A_UNDERLINE,
'BOLD': A_BOLD,
'SORT': curses.A_UNDERLINE | A_BOLD,
'OK': self.default_color2,
'MAX': self.default_color2 | A_BOLD,
'FILTER': self.filter_color,
'TITLE': self.title_color,
'PROCESS': self.default_color2,
'PROCESS_SELECTED': self.default_color2 | curses.A_UNDERLINE,
'STATUS': self.default_color2,
'NICE': self.nice_color,
'CPU_TIME': self.cpu_time_color,
'CAREFUL': self.ifCAREFUL_color2,
'WARNING': self.ifWARNING_color2,
'CRITICAL': self.ifCRITICAL_color2,
'OK_LOG': self.default_color,
'CAREFUL_LOG': self.ifCAREFUL_color,
'WARNING_LOG': self.ifWARNING_color,
'CRITICAL_LOG': self.ifCRITICAL_color,
'PASSWORD': curses.A_PROTECT,
'SELECTED': self.selected_color,
}
def set_cursor(self, value):
"""Configure the curse cursor appearance.
0: invisible
1: visible
2: very visible
"""
if hasattr(curses, 'curs_set'):
try:
curses.curs_set(value)
except Exception:
pass
def get_key(self, window):
# @TODO: Check issue #163
ret = window.getch()
return ret
def __catch_key(self, return_to_browser=False):
# Catch the pressed key
self.pressedkey = self.get_key(self.term_window)
if self.pressedkey == -1:
return -1
# Actions (available in the global hotkey dict)...
logger.debug("Keypressed (code: {})".format(self.pressedkey))
for hotkey in self._hotkeys:
if self.pressedkey == ord(hotkey) and 'switch' in self._hotkeys[hotkey]:
# Get the option name
# Ex: disable_foo return foo
# enable_foo_bar return foo_bar
option = '_'.join(self._hotkeys[hotkey]['switch'].split('_')[1:])
if self._hotkeys[hotkey]['switch'].startswith('disable_'):
# disable_ switch
if getattr(self.args, self._hotkeys[hotkey]['switch']):
enable(self.args, option)
else:
disable(self.args, option)
elif self._hotkeys[hotkey]['switch'].startswith('enable_'):
# enable_ switch
if getattr(self.args, self._hotkeys[hotkey]['switch']):
disable(self.args, option)
else:
enable(self.args, option)
else:
# Others switchs options (with no enable_ or disable_)
setattr(
self.args,
self._hotkeys[hotkey]['switch'],
not getattr(self.args, self._hotkeys[hotkey]['switch']),
)
if self.pressedkey == ord(hotkey) and 'sort_key' in self._hotkeys[hotkey]:
glances_processes.set_sort_key(
self._hotkeys[hotkey]['sort_key'], self._hotkeys[hotkey]['sort_key'] == 'auto'
)
# Other actions...
if self.pressedkey == ord('\n'):
# 'ENTER' > Edit the process filter
self.edit_filter = not self.edit_filter
elif self.pressedkey == ord('4'):
# '4' > Enable or disable quicklook
self.args.full_quicklook = not self.args.full_quicklook
if self.args.full_quicklook:
self.enable_fullquicklook()
else:
self.disable_fullquicklook()
elif self.pressedkey == ord('5'):
# '5' > Enable or disable top menu
self.args.disable_top = not self.args.disable_top
if self.args.disable_top:
self.disable_top()
else:
self.enable_top()
elif self.pressedkey == ord('9'):
# '9' > Theme from black to white and reverse
self._init_colors()
elif self.pressedkey == ord('e'):
# 'e' > Enable/Disable process extended
self.args.enable_process_extended = not self.args.enable_process_extended
if not self.args.enable_process_extended:
glances_processes.disable_extended()
else:
glances_processes.enable_extended()
elif self.pressedkey == ord('E'):
# 'E' > Erase the process filter
glances_processes.process_filter = None
elif self.pressedkey == ord('f'):
# 'f' > Show/hide fs / folder stats
self.args.disable_fs = not self.args.disable_fs
self.args.disable_folders = not self.args.disable_folders
elif self.pressedkey == ord('k'):
# 'k' > Kill selected process (after confirmation)
self.kill_process = not self.kill_process
elif self.pressedkey == ord('w'):
# 'w' > Delete finished warning logs
glances_events.clean()
elif self.pressedkey == ord('x'):
# 'x' > Delete finished warning and critical logs
glances_events.clean(critical=True)
elif self.pressedkey == ord('z'):
# 'z' > Enable or disable processes
self.args.disable_process = not self.args.disable_process
if self.args.disable_process:
glances_processes.disable()
else:
glances_processes.enable()
elif self.pressedkey == curses.KEY_LEFT:
# "<" (left arrow) navigation through process sort
next_sort = (self.loop_position() - 1) % len(self._sort_loop)
glances_processes.set_sort_key(self._sort_loop[next_sort], False)
elif self.pressedkey == curses.KEY_RIGHT:
# ">" (right arrow) navigation through process sort
next_sort = (self.loop_position() + 1) % len(self._sort_loop)
glances_processes.set_sort_key(self._sort_loop[next_sort], False)
elif self.pressedkey == curses.KEY_UP or self.pressedkey == 65:
# 'UP' > Up in the server list
if self.args.cursor_position > 0:
self.args.cursor_position -= 1
elif self.pressedkey == curses.KEY_DOWN or self.pressedkey == 66:
# 'DOWN' > Down in the server list
# if self.args.cursor_position < glances_processes.max_processes - 2:
if self.args.cursor_position < glances_processes.processes_count:
self.args.cursor_position += 1
elif self.pressedkey == ord('\x1b') or self.pressedkey == ord('q'):
# 'ESC'|'q' > Quit
if return_to_browser:
logger.info("Stop Glances client and return to the browser")
else:
logger.info("Stop Glances (keypressed: {})".format(self.pressedkey))
elif self.pressedkey == curses.KEY_F5:
# "F5" manual refresh requested
pass
# Return the key code
return self.pressedkey
def loop_position(self):
"""Return the current sort in the loop"""
for i, v in enumerate(self._sort_loop):
if v == glances_processes.sort_key:
return i
return 0
def disable_top(self):
"""Disable the top panel"""
for p in ['quicklook', 'cpu', 'gpu', 'mem', 'memswap', 'load']:
setattr(self.args, 'disable_' + p, True)
def enable_top(self):
"""Enable the top panel"""
for p in ['quicklook', 'cpu', 'gpu', 'mem', 'memswap', 'load']:
setattr(self.args, 'disable_' + p, False)
def disable_fullquicklook(self):
"""Disable the full quicklook mode"""
for p in ['quicklook', 'cpu', 'gpu', 'mem', 'memswap']:
setattr(self.args, 'disable_' + p, False)
def enable_fullquicklook(self):
"""Disable the full quicklook mode"""
self.args.disable_quicklook = False
for p in ['cpu', 'gpu', 'mem', 'memswap']:
setattr(self.args, 'disable_' + p, True)
def end(self):
"""Shutdown the curses window."""
if hasattr(curses, 'echo'):
curses.echo()
if hasattr(curses, 'nocbreak'):
curses.nocbreak()
if hasattr(curses, 'curs_set'):
try:
curses.curs_set(1)
except Exception:
pass
curses.endwin()
def init_line_column(self):
"""Init the line and column position for the curses interface."""
self.init_line()
self.init_column()
def init_line(self):
"""Init the line position for the curses interface."""
self.line = 0
self.next_line = 0
def init_column(self):
"""Init the column position for the curses interface."""
self.column = 0
self.next_column = 0
def new_line(self, separator=False):
"""New line in the curses interface."""
self.line = self.next_line
def new_column(self):
"""New column in the curses interface."""
self.column = self.next_column
def separator_line(self, color='TITLE'):
"""New separator line in the curses interface."""
if not self.args.enable_separator:
return
self.new_line()
self.line -= 1
line_width = self.term_window.getmaxyx()[1] - self.column
self.term_window.addnstr(self.line, self.column,
unicode_message('MEDIUM_LINE', self.args) * line_width,
line_width,
self.colors_list[color])
def __get_stat_display(self, stats, layer):
"""Return a dict of dict with all the stats display.
# TODO: Drop extra parameter
:param stats: Global stats dict
:param layer: ~ cs_status
"None": standalone or server mode
"Connected": Client is connected to a Glances server
"SNMP": Client is connected to a SNMP server
"Disconnected": Client is disconnected from the server
:returns: dict of dict
* key: plugin name
* value: dict returned by the get_stats_display Plugin method
"""
ret = {}
for p in stats.getPluginsList(enable=False):
if p == 'quicklook' or p == 'processlist':
# processlist is done later
# because we need to know how many processes could be displayed
continue
# Compute the plugin max size
plugin_max_width = None
if p in self._left_sidebar:
plugin_max_width = max(self._left_sidebar_min_width, self.term_window.getmaxyx()[1] - 105)
plugin_max_width = min(self._left_sidebar_max_width, plugin_max_width)
# Get the view
ret[p] = stats.get_plugin(p).get_stats_display(args=self.args, max_width=plugin_max_width)
return ret
def display(self, stats, cs_status=None):
"""Display stats on the screen.
:param stats: Stats database to display
:param cs_status:
"None": standalone or server mode
"Connected": Client is connected to a Glances server
"SNMP": Client is connected to a SNMP server
"Disconnected": Client is disconnected from the server
:return: True if the stats have been displayed else False if the help have been displayed
"""
# Init the internal line/column for Glances Curses
self.init_line_column()
# Update the stats messages
###########################
# Get all the plugins but quicklook and process list
self.args.cs_status = cs_status
__stat_display = self.__get_stat_display(stats, layer=cs_status)
# Adapt number of processes to the available space
max_processes_displayed = (
self.term_window.getmaxyx()[0]
- 11
- (0 if 'docker' not in __stat_display else self.get_stats_display_height(__stat_display["docker"]))
- (
0
if 'processcount' not in __stat_display
else self.get_stats_display_height(__stat_display["processcount"])
)
- (0 if 'amps' not in __stat_display else self.get_stats_display_height(__stat_display["amps"]))
- (0 if 'alert' not in __stat_display else self.get_stats_display_height(__stat_display["alert"]))
)
try:
if self.args.enable_process_extended:
max_processes_displayed -= 4
except AttributeError:
pass
if max_processes_displayed < 0:
max_processes_displayed = 0
if glances_processes.max_processes is None or glances_processes.max_processes != max_processes_displayed:
logger.debug("Set number of displayed processes to {}".format(max_processes_displayed))
glances_processes.max_processes = max_processes_displayed
# Get the processlist
__stat_display["processlist"] = stats.get_plugin('processlist').get_stats_display(args=self.args)
# Display the stats on the curses interface
###########################################
# Help screen (on top of the other stats)
if self.args.help_tag:
# Display the stats...
self.display_plugin(stats.get_plugin('help').get_stats_display(args=self.args))
# ... and exit
return False
# =====================================
# Display first line (system+ip+uptime)
# Optionally: Cloud on second line
# =====================================
self.__display_header(__stat_display)
self.separator_line()
# ==============================================================
# Display second line (<SUMMARY>+CPU|PERCPU+<GPU>+LOAD+MEM+SWAP)
# ==============================================================
self.__display_top(__stat_display, stats)
self.init_column()
self.separator_line()
# ==================================================================
# Display left sidebar (NETWORK+PORTS+DISKIO+FS+SENSORS+Current time)
# ==================================================================
self.__display_left(__stat_display)
# ====================================
# Display right stats (process and co)
# ====================================
self.__display_right(__stat_display)
# =====================
# Others popup messages
# =====================
# Display edit filter popup
# Only in standalone mode (cs_status is None)
if self.edit_filter and cs_status is None:
new_filter = self.display_popup(
'Process filter pattern: \n\n'
+ 'Examples:\n'
+ '- python\n'
+ '- .*python.*\n'
+ '- /usr/lib.*\n'
+ '- name:.*nautilus.*\n'
+ '- cmdline:.*glances.*\n'
+ '- username:nicolargo\n'
+ '- username:^root ',
popup_type='input',
input_value=glances_processes.process_filter_input,
)
glances_processes.process_filter = new_filter
elif self.edit_filter and cs_status is not None:
self.display_popup('Process filter only available in standalone mode')
self.edit_filter = False
# Display kill process confirmation popup
# Only in standalone mode (cs_status is None)
if self.kill_process and cs_status is None:
selected_process_raw = stats.get_plugin('processlist').get_raw()[self.args.cursor_position]
confirm = self.display_popup(
'Kill process: {} (pid: {}) ?\n\nConfirm ([y]es/[n]o): '.format(
selected_process_raw['name'], selected_process_raw['pid']
),
popup_type='yesno',
)
if confirm.lower().startswith('y'):
try:
ret_kill = glances_processes.kill(selected_process_raw['pid'])
except Exception as e:
logger.error('Can not kill process {} ({})'.format(selected_process_raw['name'], e))
else:
logger.info(
'Kill signal has been sent to process {} (return code: {})'.format(
selected_process_raw['name'], ret_kill
)
)
elif self.kill_process and cs_status is not None:
self.display_popup('Kill process only available in standalone mode')
self.kill_process = False
# Display graph generation popup
if self.args.generate_graph:
self.display_popup('Generate graph in {}'.format(self.args.export_graph_path))
return True
def __display_header(self, stat_display):
"""Display the firsts lines (header) in the Curses interface.
system + ip + uptime
(cloud)
"""
# First line
self.new_line()
self.space_between_column = 0
l_uptime = 1
for i in ['system', 'ip', 'uptime']:
if i in stat_display:
l_uptime += self.get_stats_display_width(stat_display[i])
self.display_plugin(stat_display["system"], display_optional=(self.term_window.getmaxyx()[1] >= l_uptime))
self.space_between_column = 3
if 'ip' in stat_display:
self.new_column()
self.display_plugin(stat_display["ip"])
self.new_column()
self.display_plugin(
stat_display["uptime"],
add_space=-(self.get_stats_display_width(stat_display["cloud"]) != 0)
)
self.init_column()
if self.get_stats_display_width(stat_display["cloud"]) != 0:
# Second line (optional)
self.new_line()
self.display_plugin(stat_display["cloud"])
def __display_top(self, stat_display, stats):
"""Display the second line in the Curses interface.
<QUICKLOOK> + CPU|PERCPU + <GPU> + MEM + SWAP + LOAD
"""
self.init_column()
self.new_line()
# Init quicklook
stat_display['quicklook'] = {'msgdict': []}
# Dict for plugins width
plugin_widths = {}
for p in self._top:
plugin_widths[p] = (
self.get_stats_display_width(stat_display.get(p, 0)) if hasattr(self.args, 'disable_' + p) else 0
)
# Width of all plugins
stats_width = sum(itervalues(plugin_widths))
# Number of plugin but quicklook
stats_number = sum(
[int(stat_display[p]['msgdict'] != []) for p in self._top if not getattr(self.args, 'disable_' + p)]
)
if not self.args.disable_quicklook:
# Quick look is in the place !
if self.args.full_quicklook:
quicklook_width = self.term_window.getmaxyx()[1] - (
stats_width + 8 + stats_number * self.space_between_column
)
else:
quicklook_width = min(
self.term_window.getmaxyx()[1] - (stats_width + 8 + stats_number * self.space_between_column),
self._quicklook_max_width - 5,
)
try:
stat_display["quicklook"] = stats.get_plugin('quicklook').get_stats_display(
max_width=quicklook_width, args=self.args
)
except AttributeError as e:
logger.debug("Quicklook plugin not available (%s)" % e)
else:
plugin_widths['quicklook'] = self.get_stats_display_width(stat_display["quicklook"])
stats_width = sum(itervalues(plugin_widths)) + 1
self.space_between_column = 1
self.display_plugin(stat_display["quicklook"])
self.new_column()
# Compute spaces between plugins
# Note: Only one space between Quicklook and others
plugin_display_optional = {}
for p in self._top:
plugin_display_optional[p] = True
if stats_number > 1:
self.space_between_column = max(1, int((self.term_window.getmaxyx()[1] - stats_width) / (stats_number - 1)))
for p in ['mem', 'cpu']:
# No space ? Remove optional stats
if self.space_between_column < 3:
plugin_display_optional[p] = False
plugin_widths[p] = (
self.get_stats_display_width(stat_display[p], without_option=True)
if hasattr(self.args, 'disable_' + p)
else 0
)
stats_width = sum(itervalues(plugin_widths)) + 1
self.space_between_column = max(
1, int((self.term_window.getmaxyx()[1] - stats_width) / (stats_number - 1))
)
else:
self.space_between_column = 0
# Display CPU, MEM, SWAP and LOAD
for p in self._top:
if p == 'quicklook':
continue
if p in stat_display:
self.display_plugin(stat_display[p], display_optional=plugin_display_optional[p])
if p != 'load':
# Skip last column
self.new_column()
# Space between column
self.space_between_column = 3
# Backup line position
self.saved_line = self.next_line
def __display_left(self, stat_display):
"""Display the left sidebar in the Curses interface."""
self.init_column()
if self.args.disable_left_sidebar:
return
for p in self._left_sidebar:
if (hasattr(self.args, 'enable_' + p) or hasattr(self.args, 'disable_' + p)) and p in stat_display:
self.new_line()
self.display_plugin(stat_display[p])
def __display_right(self, stat_display):
"""Display the right sidebar in the Curses interface.
docker + processcount + amps + processlist + alert
"""
# Do not display anything if space is not available...
if self.term_window.getmaxyx()[1] < self._left_sidebar_min_width:
return
# Restore line position
self.next_line = self.saved_line
# Display right sidebar
self.new_column()
for p in self._right_sidebar:
if (hasattr(self.args, 'enable_' + p) or hasattr(self.args, 'disable_' + p)) and p in stat_display:
if p not in p:
# Catch for issue #1470
continue
self.new_line()
if p == 'processlist':
self.display_plugin(
stat_display['processlist'],
display_optional=(self.term_window.getmaxyx()[1] > 102),
display_additional=(not MACOS),
max_y=(
self.term_window.getmaxyx()[0] - self.get_stats_display_height(stat_display['alert']) - 2
),
)
else:
self.display_plugin(stat_display[p])
def display_popup(
self, message, size_x=None, size_y=None, duration=3, popup_type='info', input_size=30, input_value=None
):
"""
Display a centered popup.
popup_type: ='info'
Just an information popup, no user interaction
Display a centered popup with the given message during duration seconds
If size_x and size_y: set the popup size
else set it automatically
Return True if the popup could be displayed
popup_type='input'
Display a centered popup with the given message and a input field
If size_x and size_y: set the popup size
else set it automatically
Return the input string or None if the field is empty
popup_type='yesno'
Display a centered popup with the given message
If size_x and size_y: set the popup size
else set it automatically
Return True (yes) or False (no)
"""
# Center the popup
sentence_list = message.split('\n')
if size_x is None:
size_x = len(max(sentence_list, key=len)) + 4
# Add space for the input field
if popup_type == 'input':
size_x += input_size
if size_y is None:
size_y = len(sentence_list) + 4
screen_x = self.term_window.getmaxyx()[1]
screen_y = self.term_window.getmaxyx()[0]
if size_x > screen_x or size_y > screen_y:
# No size to display the popup => abord
return False
pos_x = int((screen_x - size_x) / 2)
pos_y = int((screen_y - size_y) / 2)
# Create the popup
popup = curses.newwin(size_y, size_x, pos_y, pos_x)
# Fill the popup
popup.border()
# Add the message
for y, m in enumerate(sentence_list):
popup.addnstr(2 + y, 2, m, len(m))
if popup_type == 'info':
# Display the popup
popup.refresh()
self.wait(duration * 1000)
return True
elif popup_type == 'input':
# Create a sub-window for the text field
sub_pop = popup.derwin(1, input_size, 2, 2 + len(m))
sub_pop.attron(self.colors_list['FILTER'])
# Init the field with the current value
if input_value is not None:
sub_pop.addnstr(0, 0, input_value, len(input_value))
# Display the popup
popup.refresh()
sub_pop.refresh()
# Create the textbox inside the sub-windows
self.set_cursor(2)
self.term_window.keypad(1)
textbox = GlancesTextbox(sub_pop, insert_mode=True)
textbox.edit()
self.set_cursor(0)
# self.term_window.keypad(0)
if textbox.gather() != '':
logger.debug("User enters the following string: %s" % textbox.gather())
return textbox.gather()[:-1]
else:
logger.debug("User centers an empty string")
return None
elif popup_type == 'yesno':
# # Create a sub-window for the text field
sub_pop = popup.derwin(1, 2, len(sentence_list) + 1, len(m) + 2)
sub_pop.attron(self.colors_list['FILTER'])
# Init the field with the current value
sub_pop.addnstr(0, 0, '', 0)
# Display the popup
popup.refresh()
sub_pop.refresh()
# Create the textbox inside the sub-windows
self.set_cursor(2)
self.term_window.keypad(1)
textbox = GlancesTextboxYesNo(sub_pop, insert_mode=False)
textbox.edit()
self.set_cursor(0)
# self.term_window.keypad(0)
return textbox.gather()
def display_plugin(self, plugin_stats, display_optional=True, display_additional=True, max_y=65535, add_space=0):
"""Display the plugin_stats on the screen.
:param plugin_stats:
:param display_optional: display the optional stats if True
:param display_additional: display additional stats if True
:param max_y: do not display line > max_y
:param add_space: add x space (line) after the plugin
"""
# Exit if:
# - the plugin_stats message is empty
# - the display tag = False
if plugin_stats is None or not plugin_stats['msgdict'] or not plugin_stats['display']:
# Exit
return 0
# Get the screen size
screen_x = self.term_window.getmaxyx()[1]
screen_y = self.term_window.getmaxyx()[0]
# Set the upper/left position of the message
if plugin_stats['align'] == 'right':
# Right align (last column)
display_x = screen_x - self.get_stats_display_width(plugin_stats)
else:
display_x = self.column
if plugin_stats['align'] == 'bottom':
# Bottom (last line)
display_y = screen_y - self.get_stats_display_height(plugin_stats)
else:
display_y = self.line
# Display
x = display_x
x_max = x
y = display_y
for m in plugin_stats['msgdict']:
# New line
try:
if m['msg'].startswith('\n'):
# Go to the next line
y += 1
# Return to the first column
x = display_x
continue
except:
# Avoid exception (see issue #1692)
pass
# Do not display outside the screen
if x < 0:
continue
if not m['splittable'] and (x + len(m['msg']) > screen_x):
continue
if y < 0 or (y + 1 > screen_y) or (y > max_y):
break
# If display_optional = False do not display optional stats
if not display_optional and m['optional']:
continue
# If display_additional = False do not display additional stats
if not display_additional and m['additional']:
continue
# Is it possible to display the stat with the current screen size
# !!! Crash if not try/except... Why ???
try:
self.term_window.addnstr(
y,
x,
m['msg'],
# Do not display outside the screen
screen_x - x,
self.colors_list[m['decoration']],
)
except Exception:
pass
else:
# New column
# Python 2: we need to decode to get real screen size because
# UTF-8 special tree chars occupy several bytes.
# Python 3: strings are strings and bytes are bytes, all is
# good.
try:
x += len(u(m['msg']))
except UnicodeDecodeError:
# Quick and dirty hack for issue #745
pass
if x > x_max:
x_max = x
# Compute the next Glances column/line position
self.next_column = max(self.next_column, x_max + self.space_between_column)
self.next_line = max(self.next_line, y + self.space_between_line)
# Have empty lines after the plugins
self.next_line += add_space
def erase(self):
"""Erase the content of the screen."""
self.term_window.erase()
def flush(self, stats, cs_status=None):
"""Clear and update the screen.
:param stats: Stats database to display
:param cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
"""
self.erase()
self.display(stats, cs_status=cs_status)
def update(self, stats, duration=3, cs_status=None, return_to_browser=False):
"""Update the screen.
:param stats: Stats database to display
:param duration: duration of the loop
:param cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
:param return_to_browser:
True: Do not exist, return to the browser list
False: Exit and return to the shell
:return: True if exit key has been pressed else False
"""
# Flush display
self.flush(stats, cs_status=cs_status)
# If the duration is < 0 (update + export time > refresh_time)
# Then display the interface and log a message
if duration <= 0:
logger.warning('Update and export time higher than refresh_time.')
duration = 0.1
# Wait duration (in s) time
isexitkey = False
countdown = Timer(duration)
# Set the default timeout (in ms) between two getch
self.term_window.timeout(100)
while not countdown.finished() and not isexitkey:
# Getkey
pressedkey = self.__catch_key(return_to_browser=return_to_browser)
isexitkey = pressedkey == ord('\x1b') or pressedkey == ord('q')
if pressedkey == curses.KEY_F5:
# Were asked to refresh
return isexitkey
if isexitkey and self.args.help_tag:
# Quit from help should return to main screen, not exit #1874
self.args.help_tag = not self.args.help_tag
isexitkey = False
return isexitkey
if not isexitkey and pressedkey > -1:
# Redraw display
self.flush(stats, cs_status=cs_status)
# Overwrite the timeout with the countdown
self.wait(delay=int(countdown.get() * 1000))
return isexitkey
def wait(self, delay=100):
"""Wait delay in ms"""
curses.napms(100)
def get_stats_display_width(self, curse_msg, without_option=False):
"""Return the width of the formatted curses message."""
try:
if without_option:
# Size without options
c = len(
max(
''.join(
[
(u(u(nativestr(i['msg'])).encode('ascii', 'replace')) if not i['optional'] else "")
for i in curse_msg['msgdict']
]
).split('\n'),
key=len,
)
)
else:
# Size with all options
c = len(
max(
''.join(
[u(u(nativestr(i['msg'])).encode('ascii', 'replace')) for i in curse_msg['msgdict']]
).split('\n'),
key=len,
)
)
except Exception as e:
logger.debug('ERROR: Can not compute plugin width ({})'.format(e))
return 0
else:
return c
def get_stats_display_height(self, curse_msg):
"""Return the height of the formatted curses message.
The height is defined by the number of '\n' (new line).
"""
try:
c = [i['msg'] for i in curse_msg['msgdict']].count('\n')
except Exception as e:
logger.debug('ERROR: Can not compute plugin height ({})'.format(e))
return 0
else:
return c + 1
class GlancesCursesStandalone(_GlancesCurses):
"""Class for the Glances curse standalone."""
pass
class GlancesCursesClient(_GlancesCurses):
"""Class for the Glances curse client."""
pass
class GlancesTextbox(Textbox, object):
def __init__(self, *args, **kwargs):
super(GlancesTextbox, self).__init__(*args, **kwargs)
def do_command(self, ch):
if ch == 10: # Enter
return 0
if ch == 127: # Back
return 8
return super(GlancesTextbox, self).do_command(ch)
class GlancesTextboxYesNo(Textbox, object):
def __init__(self, *args, **kwargs):
super(GlancesTextboxYesNo, self).__init__(*args, **kwargs)
def do_command(self, ch):
return super(GlancesTextboxYesNo, self).do_command(ch)
|
import pytest
from numpy import isclose
from dolfin import (assemble, dx, Function, FunctionSpace, grad, inner, solve, TestFunction, TrialFunction,
UnitSquareMesh)
from rbnics.backends import LinearSolver as FactoryLinearSolver
from rbnics.backends.dolfin import LinearSolver as DolfinLinearSolver
from test_dolfin_utils import RandomDolfinFunction
LinearSolver = None
AllLinearSolver = {"dolfin": DolfinLinearSolver, "factory": FactoryLinearSolver}
class Data(object):
def __init__(self, Th, callback_type):
# Create mesh and define function space
mesh = UnitSquareMesh(Th, Th)
self.V = FunctionSpace(mesh, "Lagrange", 1)
# Define variational problem
u = TrialFunction(self.V)
v = TestFunction(self.V)
self.a = inner(grad(u), grad(v)) * dx + inner(u, v) * dx
self.f = lambda g: g * v * dx
# Define callback function depending on callback type
assert callback_type in ("form callbacks", "tensor callbacks")
if callback_type == "form callbacks":
def callback(arg):
return arg
elif callback_type == "tensor callbacks":
def callback(arg):
return assemble(arg)
self.callback_type = callback_type
self.callback = callback
def generate_random(self):
# Generate random rhs
g = RandomDolfinFunction(self.V)
# Return
return (self.a, self.f(g))
def evaluate_builtin(self, a, f):
a = self.callback(a)
f = self.callback(f)
result_builtin = Function(self.V)
if self.callback_type == "form callbacks":
solve(a == f, result_builtin, solver_parameters={"linear_solver": "mumps"})
elif self.callback_type == "tensor callbacks":
solve(a, result_builtin.vector(), f, "mumps")
return result_builtin
def evaluate_backend(self, a, f):
a = self.callback(a)
f = self.callback(f)
result_backend = Function(self.V)
solver = LinearSolver(a, result_backend, f)
solver.set_parameters({
"linear_solver": "mumps"
})
solver.solve()
return result_backend
def assert_backend(self, a, f, result_backend):
result_builtin = self.evaluate_builtin(a, f)
error = Function(self.V)
error.vector().add_local(+ result_backend.vector().get_local())
error.vector().add_local(- result_builtin.vector().get_local())
error.vector().apply("add")
relative_error = error.vector().norm("l2") / result_builtin.vector().norm("l2")
assert isclose(relative_error, 0., atol=1e-12)
@pytest.mark.parametrize("Th", [2**i for i in range(3, 9)])
@pytest.mark.parametrize("callback_type", ["form callbacks", "tensor callbacks"])
@pytest.mark.parametrize("test_type", ["builtin"] + list(AllLinearSolver.keys()))
def test_dolfin_linear_solver(Th, callback_type, test_type, benchmark):
data = Data(Th, callback_type)
print("Th = " + str(Th) + ", Nh = " + str(data.V.dim()))
if test_type == "builtin":
print("Testing " + test_type + ", callback_type = " + callback_type)
benchmark(data.evaluate_builtin, setup=data.generate_random)
else:
print("Testing " + test_type + " backend" + ", callback_type = " + callback_type)
global LinearSolver
LinearSolver = AllLinearSolver[test_type]
benchmark(data.evaluate_backend, setup=data.generate_random, teardown=data.assert_backend)
|
import re
from DelogX.utils.i18n import I18n
from DelogX.utils.path import Path
from DelogX.utils.plugin import Plugin
class DelogReadMore(Plugin):
i18n = None
def run(self):
conf = self.blog.default_conf
self.i18n = I18n(
Path.format_url(self.workspace, 'locale'), conf('local.locale'))
self.manager.add_action('dx_post_update', self.parse_readmore)
def parse_readmore(self, post):
if not post:
return
content_split = re.split(r'<[Hh][Rr](?:\s+\/)?>', post.content, 1)
if len(content_split) == 2:
summary, more = content_split
else:
summary = content_split[0]
more = ''
post_url = self.blog.runtime.get('url_prefix.post')
post_url = Path.format_url(post_url, Path.urlencode(post.url))
content = '''{0}
<div class="{1}"><a href="{2}">{3}</a></div>
<div class="post-more">{4}</div>
'''
more_class = ['read-more']
if not more:
more_class.append('no-more-content')
more_class = ' '.join(more_class)
content = content.format(
summary, more_class, post_url, self.i18n.get('Read More'), more)
post.content = content
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tickets', '0008_auto_20180730_2035'),
]
operations = [
migrations.AlterModelOptions(
name='ticket',
options={'default_permissions': ('add', 'change', 'delete', 'view')},
),
]
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table(u'core_category', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=75)),
))
db.send_create_signal(u'core', ['Category'])
# Adding model 'Source'
db.create_table(u'core_source', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=20)),
('title', self.gf('django.db.models.fields.CharField')(max_length=75)),
('author', self.gf('django.db.models.fields.CharField')(max_length=75)),
('year_published', self.gf('django.db.models.fields.PositiveIntegerField')()),
('more_info', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('series_season', self.gf('django.db.models.fields.PositiveIntegerField')()),
('series_episode', self.gf('django.db.models.fields.PositiveIntegerField')()),
('description_E', self.gf('django.db.models.fields.TextField')(max_length=300)),
('description_D', self.gf('django.db.models.fields.TextField')(max_length=300)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('image_credit', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)),
))
db.send_create_signal(u'core', ['Source'])
# Adding model 'Prediction'
db.create_table(u'core_prediction', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Source'])),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Category'])),
('description_E', self.gf('django.db.models.fields.TextField')(max_length=300)),
('description_D', self.gf('django.db.models.fields.TextField')(max_length=300)),
('year_predicted', self.gf('django.db.models.fields.PositiveIntegerField')()),
('more_info', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('headline_E', self.gf('django.db.models.fields.TextField')(max_length=300)),
('headline_D', self.gf('django.db.models.fields.TextField')(max_length=300)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('image_credit', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)),
('username', self.gf('django.db.models.fields.CharField')(max_length=75)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('edition_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('published', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'core', ['Prediction'])
# Adding model 'Realisation'
db.create_table(u'core_realisation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('prediction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Prediction'])),
('description_E', self.gf('django.db.models.fields.TextField')(max_length=300)),
('description_D', self.gf('django.db.models.fields.TextField')(max_length=300)),
('year_introduced', self.gf('django.db.models.fields.PositiveIntegerField')()),
('more_info', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('image_credit', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('edition_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('published', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'core', ['Realisation'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table(u'core_category')
# Deleting model 'Source'
db.delete_table(u'core_source')
# Deleting model 'Prediction'
db.delete_table(u'core_prediction')
# Deleting model 'Realisation'
db.delete_table(u'core_realisation')
models = {
u'core.category': {
'Meta': {'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '75'})
},
u'core.prediction': {
'Meta': {'object_name': 'Prediction'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Category']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description_D': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'description_E': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'edition_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'headline_D': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'headline_E': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'image_credit': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'more_info': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Source']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'year_predicted': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'core.realisation': {
'Meta': {'object_name': 'Realisation'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description_D': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'description_E': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'edition_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'image_credit': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'more_info': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'prediction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Prediction']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'year_introduced': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'core.source': {
'Meta': {'object_name': 'Source'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'description_D': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'description_E': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'image_credit': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'more_info': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'series_episode': ('django.db.models.fields.PositiveIntegerField', [], {}),
'series_season': ('django.db.models.fields.PositiveIntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'year_published': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['core']
|
from msaf.models import dbsession, Sample, Marker, Batch
from msaf.lib.analytics import SampleSet
from itertools import cycle
import yaml
def load_yaml(yaml_text):
d = yaml.load( yaml_text )
instances = {}
for k in d:
if k == 'selector':
instances['selector'] = Selector.from_dict( d[k] )
elif k == 'filter':
instances['filter'] = Filter.from_dict( d[k] )
elif k == 'differentiation':
instances['differentiation'] = Differentiation.from_dict( d[k] )
else:
raise RuntimeError()
return instances
def save_yaml( instances ):
# we don't really need to save to YAML yet
pass
colours = cycle( [ 'red', 'green', 'blue', 'orange', 'purple', 'black', 'magenta',
'wheat', 'cyan', 'brown', 'slateblue', 'lightgreen' ] )
class Selector(object):
def __init__(self, samples = [], markers = []):
self.samples = []
self.markers = []
@staticmethod
def from_dict(d):
selector = Selector()
selector.samples = d['samples']
selector.markers = d['markers']
return selector
def to_dict(self):
return { 'samples': self.samples, 'markers': self.markers }
@staticmethod
def load(yaml_text):
d = yaml.load( yaml_text )
selector = Selector.from_dict( d )
return selector
def dump(self):
d = self.to_dict()
return yaml.dump( d )
def get_sample_ids(self, db):
""" return sample ids; db is SQLa dbsession handler """
pass
def get_marker_ids(self):
""" return marker ids; db is SQLa dbsession handler """
# self.markers is name
markers = [ Marker.search(name) for name in self.markers ]
return [ marker.id for marker in markers ]
def get_sample_sets(self, db=None):
if not db:
db = dbsession
sample_set = []
for label in self.samples:
if label == '__ALL__':
# single query
pass
sample_ids = []
sample_selector = self.samples[label]
for spec in sample_selector:
if 'query' in spec:
if '$' in spec['query']:
raise RuntimeError('query most not an advance one')
if 'batch' in spec:
query = spec['batch'] + '[batch] & (' + spec['query'] + ')'
elif 'codes' in spec:
batch = Batch.search(spec['batch'])
q = dbsession.query( Sample.id ).join( Batch ).filter( Batch.id == batch.id).filter( Sample.code.in_( spec['codes'] ) )
sample_ids += list( q )
if label == '__ALL__':
label = '-'
sample_set.append( SampleSet( location = '', year = 0,
label = label,
colour = next(colours),
sample_ids = sample_ids ) )
return sample_set
class Filter(object):
def __init__(self):
self.abs_threshold = 0
self.rel_threshold = 0
self.rel_cutoff = 0
self.sample_qual_threshold = 0
self.marker_qual_threshold = 0
self.sample_options = None
@staticmethod
def from_dict(d):
filter_params = Filter()
filter_params.abs_threshold = int( d['abs_threshold'] )
filter_params.rel_threshold = float( d['rel_threshold'] )
filter_params.rel_cutoff = float( d['rel_cutoff'] )
filter_params.sample_qual_threshold = float( d['sample_qual_threshold'] )
filter_params.marker_qual_threshold = float( d['marker_qual_threshold'] )
filter_params.sample_option = d['sample_option']
return filter_params
def to_dict(self):
pass
@staticmethod
def load(yaml_text):
pass
def dump(self):
pass
class Differentiation(object):
def __init__(self):
self.spatial = 0
self.temporal = 0
self.differentiation = 0
@staticmethod
def from_dict(d):
differentiation = Differentiation()
differentiation.spatial = d['spatial']
differentiation.temporal = d['temporal']
differentiation.detection = d['detection']
return differentiation
def to_dict(self):
pass
@staticmethod
def load(yaml_text):
pass
def dump(self):
pass
def create_group( selector ):
pass
|
"""This module contains an object that represents a Telegram Message Parse Modes."""
from typing import ClassVar
from telegram import constants
from telegram.utils.deprecate import set_new_attribute_deprecated
class ParseMode:
"""This object represents a Telegram Message Parse Modes."""
__slots__ = ('__dict__',)
MARKDOWN: ClassVar[str] = constants.PARSEMODE_MARKDOWN
""":const:`telegram.constants.PARSEMODE_MARKDOWN`\n
Note:
:attr:`MARKDOWN` is a legacy mode, retained by Telegram for backward compatibility.
You should use :attr:`MARKDOWN_V2` instead.
"""
MARKDOWN_V2: ClassVar[str] = constants.PARSEMODE_MARKDOWN_V2
""":const:`telegram.constants.PARSEMODE_MARKDOWN_V2`"""
HTML: ClassVar[str] = constants.PARSEMODE_HTML
""":const:`telegram.constants.PARSEMODE_HTML`"""
def __setattr__(self, key: str, value: object) -> None:
set_new_attribute_deprecated(self, key, value)
|
"""
Base version of package/tasks.py, created by version 0.3.0 of
package/root/dir> dk-tasklib install
(it should reside in the root directory of your package)
This file defines tasks for the Invoke tool: http://www.pyinvoke.org
Basic usage::
inv -l # list all available tasks
inv build -f # build everything, forcefully
inv build --docs # only build the docs
dk-tasklib is a library of basic tasks that tries to automate common tasks.
dk-tasklib will attempt to install any tools/libraries/etc. that are required,
e.g. when running the task to compile x.less to x.css, it will check that
the lessc compiler is installed (and if not it will attempt to install it).
This file is an initial skeleton, you are supposed to edit and add to it so it
will fit your use case.
"""
from __future__ import print_function
import os
import warnings
from dkfileutils.changed import changed
from dkfileutils.path import Path
from dktasklib.wintask import task
from invoke import Collection
from dktasklib import docs as doctools
from dktasklib import jstools
from dktasklib import lessc
from dktasklib import version, upversion
from dktasklib.manage import collectstatic
from dktasklib.package import Package, package
from dktasklib.watch import Watcher
from dktasklib.publish import publish
DIRNAME = Path(os.path.dirname(__file__))
DJANGO_SETTINGS_MODULE = ''
JSX_FILENAMES = []
HAVE_SETTINGS = bool(DJANGO_SETTINGS_MODULE)
if not HAVE_SETTINGS and (DIRNAME / 'settings.py').exists():
# look for a dummy settings.py module in the root of the package.
DJANGO_SETTINGS_MODULE = 'settings'
if DJANGO_SETTINGS_MODULE:
os.environ['DJANGO_SETTINGS_MODULE'] = DJANGO_SETTINGS_MODULE
WARN_ABOUT_SETTINGS = not bool(DJANGO_SETTINGS_MODULE)
@task
def build_js(ctx, force=False):
"""Build all javascript files.
"""
for fname in JSX_FILENAMES:
jstools.babel(
ctx,
'{pkg.source_js}/' + fname,
'{pkg.django_static}/{pkg.name}/js/' + fname + '.js',
force=force
)
@task
def build(ctx, less=False, docs=False, js=False, force=False):
"""Build everything and collectstatic.
"""
specified = any([less, docs, js])
buildall = not specified
if buildall or less:
less_fname = ctx.pkg.source_less / ctx.pkg.name + '.less'
if less_fname.exists():
lessc.LessRule(
ctx,
src='{pkg.source_less}/{pkg.name}.less',
dst='{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css',
force=force
)
elif less:
warnings.warn(
"WARNING: build --less specified, but no file at: " + less_fname
)
if buildall or docs:
if WARN_ABOUT_SETTINGS:
warnings.warn(
"autodoc might need a dummy settings file in the root of "
"your package. Since it runs in a separate process you cannot"
"use settings.configure()"
)
doctools.build(ctx, force=force)
if buildall or js:
build_js(ctx, force)
if HAVE_SETTINGS and (force or changed(ctx.pkg.django_static)):
collectstatic(ctx, DJANGO_SETTINGS_MODULE, force=force)
@task
def watch(ctx):
"""Automatically run build whenever a relevant file changes.
"""
watcher = Watcher(ctx)
watcher.watch_directory(
path='{pkg.source_less}', ext='.less',
action=lambda e: build(ctx, less=True)
)
watcher.watch_directory(
path='{pkg.source_js}', ext='.jsx',
action=lambda e: build(ctx, js=True)
)
watcher.watch_directory(
path='{pkg.docs}', ext='.rst',
action=lambda e: build(ctx, docs=True)
)
watcher.start()
ns = Collection(
build,
watch,
build_js,
lessc,
doctools,
version, upversion,
package,
collectstatic,
publish,
)
ns.configure({
'pkg': Package(),
'run': {
'echo': True
}
})
|
from typing import Optional
from UM.Logger import Logger
from cura.CuraApplication import CuraApplication
from cura.PrinterOutput.Models.MaterialOutputModel import MaterialOutputModel
from .BaseCloudModel import BaseCloudModel
class CloudClusterPrinterConfigurationMaterial(BaseCloudModel):
## Creates a new material configuration model.
# \param brand: The brand of material in this print core, e.g. 'Ultimaker'.
# \param color: The color of material in this print core, e.g. 'Blue'.
# \param guid: he GUID of the material in this print core, e.g. '506c9f0d-e3aa-4bd4-b2d2-23e2425b1aa9'.
# \param material: The type of material in this print core, e.g. 'PLA'.
def __init__(self, brand: Optional[str] = None, color: Optional[str] = None, guid: Optional[str] = None,
material: Optional[str] = None, **kwargs) -> None:
self.guid = guid
self.brand = brand
self.color = color
self.material = material
super().__init__(**kwargs)
## Creates a material output model based on this cloud printer material.
def createOutputModel(self) -> MaterialOutputModel:
material_manager = CuraApplication.getInstance().getMaterialManager()
material_group_list = material_manager.getMaterialGroupListByGUID(self.guid) or []
# Sort the material groups by "is_read_only = True" first, and then the name alphabetically.
read_only_material_group_list = list(filter(lambda x: x.is_read_only, material_group_list))
non_read_only_material_group_list = list(filter(lambda x: not x.is_read_only, material_group_list))
material_group = None
if read_only_material_group_list:
read_only_material_group_list = sorted(read_only_material_group_list, key = lambda x: x.name)
material_group = read_only_material_group_list[0]
elif non_read_only_material_group_list:
non_read_only_material_group_list = sorted(non_read_only_material_group_list, key = lambda x: x.name)
material_group = non_read_only_material_group_list[0]
if material_group:
container = material_group.root_material_node.getContainer()
color = container.getMetaDataEntry("color_code")
brand = container.getMetaDataEntry("brand")
material_type = container.getMetaDataEntry("material")
name = container.getName()
else:
Logger.log("w", "Unable to find material with guid {guid}. Using data as provided by cluster"
.format(guid = self.guid))
color = self.color
brand = self.brand
material_type = self.material
name = "Empty" if self.material == "empty" else "Unknown"
return MaterialOutputModel(guid = self.guid, type = material_type, brand = brand, color = color, name = name)
|
import weakref
import logging
logger = logging.getLogger(__name__)
import core.cons as cons
from core.api import api
from core.config import conf
from qt import signals
OPTION_IP_RENEW_ACTIVE = "ip_renew_active"
OPTION_RENEW_SCRIPT_ACTIVE = "renew_script_active"
class IPRenewerGUI:
""""""
def __init__(self, parent, ip_renewer):
""""""
self.ip_renewer = ip_renewer
self.weak_parent = weakref.ref(parent)
self.id_item_list = []
self.is_working = True
if self.can_change_ip():
self.id_item_list = [download_item.id for download_item in api.get_active_downloads().values() + api.get_queue_downloads().values()]
signals.on_stop_all.emit()
if conf.get_addon_option(OPTION_RENEW_SCRIPT_ACTIVE, default=False, is_bool=True):
self.ip_renewer.start_shell_script()
else:
self.ip_renewer.start_default_ip_renew()
self.status_msg = _("Changing IP...")
signals.status_bar_push_msg.emit(self.status_msg)
self.timer = self.parent.idle_timeout(1000, self.update)
else:
self.is_working = False
@property
def parent(self):
return self.weak_parent()
def can_change_ip(self):
""""""
for download_item in api.get_active_downloads().itervalues():
if download_item.start_time:
return False
return True
def update(self):
""""""
if not self.ip_renewer.is_running():
signals.status_bar_pop_msg.emit(self.status_msg)
for id_item in self.id_item_list:
api.start_download(id_item)
try:
self.parent.downloads.rows_buffer[id_item][1] = self.parent.downloads.icons_dict[cons.STATUS_QUEUE]
except Exception as err:
logger.debug(err)
self.timer.stop()
self.is_working = False
|
"""
A script to convert the standard names information from the provided XML
file into a Python dictionary format.
Takes two arguments: the first is the XML file to process and the second
is the name of the file to write the Python dictionary file into.
By default, Iris will use the source XML file:
etc/cf-standard-name-table.xml
as obtained from:
http://cf-pcmdi.llnl.gov/documents/cf-standard-names
"""
from __future__ import (absolute_import, division, print_function)
import argparse
import pprint
import xml.etree.ElementTree as ET
STD_VALUES_FILE_TEMPLATE = '''
"""
This file contains a dictionary of standard value names that are mapped
to another dictionary of other standard name attributes. Currently only
the `canonical_unit` exists in these attribute dictionaries.
This file is automatically generated. Do not edit this file by hand.
The file will be generated during a standard build/installation:
python setup.py build
python setup.py install
Also, the file can be re-generated in the source distribution via:
python setup.py std_names
Or for more control (e.g. to use an alternative XML file) via:
python tools/generate_std_names.py XML_FILE MODULE_FILE
"""
from __future__ import (absolute_import, division, print_function)
STD_NAMES = '''.lstrip()
def process_name_table(tree, element_name, *child_elements):
"""
Yields a series of dictionaries with the key being the id of the entry element and the value containing
another dictionary mapping other attributes of the standard name to their values, e.g. units, description, grib value etc.
"""
for elem in tree.iterfind(element_name):
sub_section = {}
for child_elem in child_elements:
found_elem = elem.find(child_elem)
sub_section[child_elem] = found_elem.text if found_elem is not None else None
yield {elem.get("id") : sub_section}
def to_dict(infile, outfile):
values = {}
aliases = {}
tree = ET.parse(infile)
for section in process_name_table(tree, 'entry', 'canonical_units'):
values.update(section)
for section in process_name_table(tree, 'alias', 'entry_id'):
aliases.update(section)
for key, valued in aliases.iteritems():
values.update({
key : {'canonical_units' : values.get(valued['entry_id']).get('canonical_units')}
})
outfile.write(STD_VALUES_FILE_TEMPLATE + pprint.pformat(values))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Create Python code from CF standard name XML.')
parser.add_argument('input', type=argparse.FileType(),
metavar='INPUT',
help='Path to CF standard name XML')
parser.add_argument('output', type=argparse.FileType('w'),
metavar='OUTPUT',
help='Path to resulting Python code')
args = parser.parse_args()
to_dict(args.input, args.output)
|
"""Launcher of the Mikado pick step."""
import argparse
import re
import sys
import os
from typing import Union, Dict
from ._utils import check_log_settings_and_create_logger, _set_pick_mode
import marshmallow
from ..configuration import DaijinConfiguration, MikadoConfiguration
from ..exceptions import InvalidConfiguration
from ..utilities.log_utils import create_default_logger, create_null_logger
from ..utilities import to_region, percentage
from ..utilities import IntervalTree, Interval
from ..configuration.configurator import load_and_validate_config
from ..picking import Picker
def _parse_regions(regions_string: Union[None,str]) -> Union[None, Dict[str, IntervalTree]]:
if regions_string is None:
return None
regions = dict()
if os.path.exists(regions_string):
with open(regions_string) as f_regions:
for counter, line in enumerate(f_regions, start=1):
try:
chrom, start, end = to_region(line)
except ValueError:
raise ValueError(f"Invalid region line, no. {counter}: {line}")
if chrom not in regions:
regions[chrom] = IntervalTree()
regions[chrom].add(Interval(start, end))
else:
chrom, start, end = to_region(regions_string)
regions[chrom] = IntervalTree.from_intervals([Interval(start, end)])
return regions
def _set_pick_output_options(conf: Union[DaijinConfiguration, MikadoConfiguration], args,
logger=create_null_logger()) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.output_format.source = args.source if args.source is not None else conf.pick.output_format.source
conf.pick.output_format.id_prefix = args.prefix if args.prefix is not None else conf.pick.output_format.id_prefix
conf.pick.output_format.report_all_external_metrics = True if args.report_all_external_metrics is True else \
conf.pick.output_format.report_all_external_metrics
conf.pick.output_format.report_all_orfs = True if args.report_all_orfs is True else \
conf.pick.output_format.report_all_orfs
conf.pick.files.log = args.log if args.log else conf.pick.files.log
pat = re.compile(r"\.(gff3|gff)")
if args.loci_out:
conf.pick.files.loci_out = args.loci_out if pat.search(args.loci_out) else "{0}.gff3".format(args.loci_out)
if args.monoloci_out:
conf.pick.files.monoloci_out = args.monoloci_out if pat.search(args.monoloci_out) else "{0}.gff3".format(
args.monoloci_out)
if args.subloci_out:
conf.pick.files.subloci_out = args.subloci_out if pat.search(args.subloci_out) else "{0}.gff3".format(
args.subloci_out)
return conf
def _set_pick_run_options(conf: Union[DaijinConfiguration, MikadoConfiguration], args,
logger=create_null_logger()) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.run_options.single_thread = args.single
conf.pick.run_options.exclude_cds = True if args.no_cds is True else conf.pick.run_options.exclude_cds
conf.pick.run_options.intron_range = tuple(sorted(args.intron_range)) if args.intron_range is not None \
else conf.pick.run_options.intron_range
conf.pick.run_options.shm = True if args.shm is not None else conf.pick.run_options.shm
if args.only_reference_update is True:
conf.pick.run_options.only_reference_update = True
conf.pick.run_options.reference_update = True
conf.pick.run_options.reference_update = True if args.reference_update is True else \
conf.pick.run_options.reference_update
conf.pick.run_options.check_references = True if args.check_references is True else \
conf.pick.run_options.check_references
return conf
def _set_pick_clustering_options(conf: Union[DaijinConfiguration, MikadoConfiguration],
args) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.clustering.purge = False if args.no_purge is True else conf.pick.clustering.purge
conf.pick.clustering.flank = args.flank if args.flank is not None else conf.pick.clustering.flank
conf.pick.clustering.min_cds_overlap = args.min_clustering_cds_overlap if \
args.min_clustering_cds_overlap else conf.pick.clustering.min_cds_overlap
conf.pick.clustering.cds_only = True if args.cds_only else conf.pick.clustering.cds_only
if args.min_clustering_cdna_overlap is not None:
conf.pick.clustering.min_cdna_overlap = args.min_clustering_cdna_overlap
if args.min_clustering_cds_overlap is None:
conf.pick.clustering.min_cds_overlap = args.min_clustering_cdna_overlap
return conf
def _set_pick_as_options(conf: Union[DaijinConfiguration, MikadoConfiguration],
args) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.alternative_splicing.pad = args.pad if args.pad is True else \
conf.pick.alternative_splicing.pad
conf.pick.alternative_splicing.ts_max_splices = True if args.pad_max_splices \
else conf.pick.alternative_splicing.ts_max_splices
conf.pick.alternative_splicing.ts_distance = True if args.pad_max_distance is not None else \
conf.pick.alternative_splicing.ts_distance
conf.pick.alternative_splicing.cds_only = True if args.as_cds_only is True else \
conf.pick.alternative_splicing.cds_only
conf.pick.alternative_splicing.keep_cds_disrupted_by_ri = True if args.keep_disrupted_cds is True \
else conf.pick.alternative_splicing.keep_cds_disrupted_by_ri
conf.pick.alternative_splicing.keep_retained_introns = False if args.exclude_retained_introns is True else \
conf.pick.alternative_splicing.keep_retained_introns
return conf
def _set_conf_values_from_args(conf: Union[DaijinConfiguration, MikadoConfiguration], args,
logger=create_null_logger()) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.multiprocessing_method = args.start_method if args.start_method else conf.multiprocessing_method
conf.threads = args.procs if args.procs is not None else conf.threads
if args.random_seed is True:
conf.seed = None
elif args.seed is not None:
conf.seed = args.seed
else:
pass
conf.pick.scoring_file = args.scoring_file if args.scoring_file is not None else conf.pick.scoring_file
conf.prepare.max_intron_length = args.max_intron_length if args.max_intron_length is not None else \
conf.prepare.max_intron_length
conf.serialise.codon_table = str(args.codon_table) if args.codon_table not in (False, None, True) \
else conf.serialise.codon_table
conf = _set_pick_output_options(conf, args)
conf = _set_pick_mode(conf, args.mode)
conf = _set_pick_run_options(conf, args)
conf = _set_pick_clustering_options(conf, args)
conf = _set_pick_as_options(conf, args)
try:
conf = load_and_validate_config(conf, logger=logger)
except marshmallow.exceptions.MarshmallowError as exc:
logger.critical("Invalid options specified for the configuration: {}".format(exc))
raise exc
return conf
def _check_db(conf: Union[MikadoConfiguration, DaijinConfiguration], args,
logger=create_null_logger()) -> Union[MikadoConfiguration, DaijinConfiguration]:
logger.debug("Checking the database")
if args.sqlite_db is not None:
if not os.path.exists(args.sqlite_db):
exc = InvalidConfiguration(f"Mikado database {args.sqlite_db} not found. Exiting.")
logger.critical(exc)
raise exc
logger.debug(f"Setting the database from the CLI to {args.sqlite_db}")
conf.db_settings.db = args.sqlite_db
conf.db_settings.dbtype = "sqlite"
if conf.db_settings.dbtype == "sqlite":
raw = conf.db_settings.db
db_basename = os.path.basename(conf.db_settings.db)
__compound = os.path.join(conf.pick.files.output_dir, db_basename)
__base = os.path.join(conf.pick.files.output_dir, db_basename)
found = False
for option in raw, __compound, __base:
if os.path.exists(option):
conf.db_settings.db = option
found = True
break
if found is False:
exc = InvalidConfiguration(f"Mikado database {conf.db_settings.db} not found. Exiting.")
logger.critical(exc)
raise exc
logger.debug(f"Found database: {conf.db_settings.dbtype}:///{conf.db_settings.db}")
return conf
def _check_pick_input(conf: Union[MikadoConfiguration, DaijinConfiguration], args,
logger=create_null_logger()) -> Union[MikadoConfiguration, DaijinConfiguration]:
if args.gff:
conf.pick.files.input = args.gff
if not os.path.exists(args.gff):
raise InvalidConfiguration("The input file {} does not exist. Please double check!".format(args.gff))
prep_gtf = os.path.join(conf.prepare.files.output_dir, conf.prepare.files.out)
if not os.path.exists(conf.pick.files.input):
if os.path.exists(prep_gtf):
conf.pick.files.input = prep_gtf
elif os.path.exists(conf.prepare.files.out):
conf.pick.files.input = conf.prepare.files.out
else:
exc = InvalidConfiguration("I tried to infer the input file from the prepare option, but failed. Please "
"point me to the correct file through the command line or by correcting the "
"configuration file.")
logger.critical(exc)
raise exc
if args.genome:
if not os.path.exists(args.genome):
raise InvalidConfiguration(f"The requested genome FASTA file does not seem to exist: {args.genome}")
conf.reference.genome = args.genome
if conf.pick.alternative_splicing.pad and not os.path.exists(conf.reference.genome):
exc = InvalidConfiguration("Transcript padding cannot function unless the genome file is specified. \
Please either provide a valid genome file or disable the padding.")
logger.critical(exc)
raise exc
conf = _check_db(conf, args, logger)
return conf
def check_run_options(mikado_configuration: Union[MikadoConfiguration, DaijinConfiguration],
args: argparse.Namespace, logger=create_null_logger()):
"""
Quick method to check the consistency of run option settings
from the namespace.
:param args: a Namespace
:param logger: a logger instance.
:return: args
"""
mikado_configuration = _set_conf_values_from_args(mikado_configuration, args, logger=logger)
mikado_configuration = _check_pick_input(mikado_configuration, args, logger)
mikado_configuration = load_and_validate_config(mikado_configuration, logger=logger)
return mikado_configuration
def pick(args):
"""
This function launches the pick step, using the data derived from the Namespace.
:param args: argparse Namespace with the configuration for the run.
"""
logger = create_default_logger("pick", level="WARNING")
mikado_configuration = load_and_validate_config(args.configuration, logger=logger)
# Create the output directory. Necessary to do it here to avoid the logger being put in the wrong place.
if args.output_dir is not None:
mikado_configuration.pick.files.output_dir = os.path.abspath(args.output_dir)
else:
mikado_configuration.pick.files.output_dir = os.path.abspath(mikado_configuration.pick.files.output_dir)
try:
os.makedirs(mikado_configuration.pick.files.output_dir, exist_ok=True)
except OSError:
exc = OSError("I cannot create the output directory {}. Aborting.".format(
mikado_configuration.pick.files.output_dir))
logger.critical(exc)
raise exc
mikado_configuration, logger = check_log_settings_and_create_logger(mikado_configuration, args.log, args.log_level,
section="pick")
mikado_configuration = check_run_options(mikado_configuration, args, logger=logger)
regions = _parse_regions(args.regions)
creator = Picker(mikado_configuration, commandline=" ".join(sys.argv), regions=regions)
creator()
def pick_parser():
"""
Parser for the picking step.
"""
parser = argparse.ArgumentParser(description="Launcher of the Mikado pipeline.")
parser.add_argument("--fasta", "--genome", default=None, dest="genome",
help="Genome FASTA file. Required for transcript padding.")
parser.add_argument("--start-method", dest="start_method",
choices=["fork", "spawn", "forkserver"],
default=None, help="Multiprocessing start method.")
parser.add_argument("--shm", default=False, action="store_true",
help="Flag. If switched, Mikado pick will copy the database to RAM (ie SHM) for faster access \
during the run.")
parser.add_argument("-p", "--procs", type=int, default=None,
help="""Number of processors to use. \
Default: look in the configuration file (1 if undefined)""")
parser.add_argument("--configuration", "--json-conf", dest="configuration", required=True,
help="Configuration file for Mikado.")
parser.add_argument("--scoring-file", dest="scoring_file",
type=str, default=None,
required=False,
help="Optional scoring file for the run. It will override the value set in the configuration.")
parser.add_argument("-i", "--intron-range",
dest="intron_range", type=int, nargs=2,
default=None,
help="""Range into which intron lengths should fall, as a couple of integers. \
Transcripts with intron lengths outside of this range will be penalised. Default: (60, 900)""")
padding = parser.add_mutually_exclusive_group()
padding.add_argument("--no-pad", dest="pad", default=None, action="store_false", help="Disable transcript padding.")
padding.add_argument("--pad", default=None,
action="store_true",
help="Whether to pad transcripts in loci.")
padding.add_argument("--codon-table", dest="codon_table", default=None,
help="""Codon table to use. Default: 0 (ie Standard, NCBI #1, but only ATG is considered \
a valid start codon.""")
parser.add_argument("--pad-max-splices", default=None, dest="pad_max_splices",
type=int, help="Maximum splice sites that can be crossed during transcript padding.")
parser.add_argument("--pad-max-distance", default=None, dest="pad_max_distance",
type=int, help="Maximum amount of bps that transcripts can be padded with (per side).")
parser.add_argument("-r", "--regions",
help="""Either a single region on the CLI or a file listing a series of target regions.
Mikado pick will only consider regions included in this string/file.
Regions should be provided in a WebApollo-like format: <chrom>:<start>..<end>""")
output = parser.add_argument_group("Options related to the output files.")
output.add_argument("-od", "--output-dir", dest="output_dir",
type=str, default=None,
help="Output directory. Default: current working directory")
output.add_argument("--subloci-out", type=str, default=None, dest="subloci_out")
output.add_argument("--monoloci-out", type=str, default=None, dest="monoloci_out")
output.add_argument("--loci-out", type=str, default=None, dest="loci_out",
help="""This output file is mandatory.
If it is not specified in the configuration file,
it must be provided here.""")
output.add_argument("--prefix", type=str, default=None,
help="Prefix for the genes. Default: Mikado")
output.add_argument('--source', type=str, default=None,
help='Source field to use for the output files.')
output.add_argument("--report-all-external-metrics", default=None,
action="store_true",
help="Boolean switch. If activated, Mikado will report all available external metrics, not just \
those requested for in the scoring configuration. This might affect speed in Minos analyses.")
parser.add_argument("--no_cds", action="store_true", default=False,
help="""Flag. If set, not CDS information will be printed out in the GFF output files.""")
parser.add_argument("--flank", default=None, type=int,
help="""Flanking distance (in bps) to group non-overlapping transcripts into a single \
superlocus. Default: determined by the configuration file.""")
parser.add_argument("--max-intron-length", default=None, type=int,
help="""Maximum intron length for a transcript. Default: inferred from the configuration \
file (default value there is 1,000,000 bps).""")
parser.add_argument('--no-purge', action='store_true', default=False,
dest="no_purge",
help='''Flag. If set, the pipeline will NOT suppress any loci \
whose transcripts do not pass the requirements set in the JSON file.''')
parser.add_argument("--cds-only", dest="cds_only",
default=None, action="store_true",
help=""""Flag. If set, Mikado will only look for overlap in the coding features \
when clustering transcripts (unless one transcript is non-coding, in which case the whole transcript will \
be considered). Please note that Mikado will only consider the **best** ORF for this. \
Default: False, Mikado will consider transcripts in their entirety.""")
parser.add_argument("--as-cds-only", dest="as_cds_only", default=None, action="store_true",
help="""Flag. If set, Mikado will only consider the CDS to determine whether a transcript
is a valid alternative splicing event in a locus.""")
parser.add_argument("--reference-update", dest="reference_update", default=None,
action="store_true",
help="""Flag. If switched on, Mikado will prioritise transcripts marked as reference and will \
consider any other transcipt within loci only in reference to these reference transcripts. Novel loci will still be reported.""")
parser.add_argument("--report-all-orfs", default=False, action="store_true",
help="Boolean switch. If set to true, all ORFs will be reported, not just the primary.")
parser.add_argument("--only-reference-update", dest="only_reference_update", default=None,
action="store_true",
help="""Flag. If switched on, Mikado will only keep loci where at least one of the transcripts \
is marked as "reference". CAUTION: if no transcript has been marked as reference, the output will be completely empty!""")
parser.add_argument("-eri", "--exclude-retained-introns", default=None, action="store_true",
help="""Exclude all retained intron alternative splicing events from the final output. \
Default: False. Retained intron events that do not dirsupt the CDS are kept by Mikado in the final output.""")
parser.add_argument("-kdc", "--keep-disrupted-cds", default=None, action="store_true",
help="""Keep in the final output transcripts whose CDS is most probably disrupted by a \
retained intron event. Default: False. Mikado will try to detect these instances and exclude them from the \
final output.""")
parser.add_argument("-mco", "--min-clustering-cdna-overlap", default=None, type=percentage,
help="Minimum cDNA overlap between two transcripts for them to be considered part of the same \
locus during the late picking stages. \
NOTE: if --min-cds-overlap is not specified, it will be set to this value! \
Default: 20%%.")
parser.add_argument("-mcso", "--min-clustering-cds-overlap", default=None, type=percentage,
help="Minimum CDS overlap between two transcripts for them to be considered part of the same \
locus during the late picking stages. \
NOTE: if not specified, and --min-cdna-overlap is specified on the command line, min-cds-overlap will be set to this value! \
Default: 20%%.")
parser.add_argument("--check-references", dest="check_references", default=None,
action="store_true",
help="""Flag. If switched on, Mikado will also check reference models against the general
transcript requirements, and will also consider them as potential fragments. This is useful in the context of e.g.
updating an *ab-initio* results with data from RNASeq, protein alignments, etc.
""")
parser.add_argument("-db", "--sqlite-db", dest="sqlite_db",
default=None, type=str,
help="Location of an SQLite database to overwrite what is specified \
in the configuration file.")
parser.add_argument("--single", action="store_true", default=False,
help="""Flag. If set, Creator will be launched with a single process, without involving the
multithreading apparatus. Useful for debugging purposes only.""")
log_options = parser.add_argument_group("Log options")
log_options.add_argument("-l", "--log", default=None,
help="""File to write the log to.
Default: decided by the configuration file.""")
verbosity = log_options.add_mutually_exclusive_group()
verbosity.add_argument("--verbose", default=None, dest="log_level", action="store_const", const="DEBUG")
verbosity.add_argument("--quiet", default=None, dest="log_level", action="store_const", const="WARNING")
verbosity.add_argument("-lv", "--log-level", dest="log_level",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], default=None,
help="Logging level. Default: retrieved by the configuration file.")
# parser.formatter_class = argparse.RawTextHelpFormatter
parser.add_argument("--mode", default=None,
choices=["nosplit", "stringent", "lenient", "permissive", "split"],
help="""Mode in which Mikado will treat transcripts with multiple ORFs.
- nosplit: keep the transcripts whole.
- stringent: split multi-orf transcripts if two consecutive ORFs have both BLAST hits
and none of those hits is against the same target.
- lenient: split multi-orf transcripts as in stringent, and additionally, also when
either of the ORFs lacks a BLAST hit (but not both).
- permissive: like lenient, but also split when both ORFs lack BLAST hits
- split: split multi-orf transcripts regardless of what BLAST data is available.""")
seed_group = parser.add_mutually_exclusive_group()
seed_group.add_argument("--seed", type=int, default=None, help="Random seed number. Default: 0.")
seed_group.add_argument("--random-seed", action="store_true", default=False,
help="Generate a new random seed number (instead of the default of 0)")
parser.add_argument("gff", nargs="?", default=None)
parser.set_defaults(func=pick)
return parser
|
import sys
def main():
files = sys.argv[1:]
suffixes = {}
for filename in files:
suff = getsuffix(filename)
suffixes.setdefault(suff, []).append(filename)
for suff, filenames in sorted(suffixes.items()):
print(repr(suff), len(filenames))
def getsuffix(filename):
name, sep, suff = filename.rpartition('.')
return sep + suff if sep else ''
if __name__ == '__main__':
main()
|
import sys
if sys.version_info[0] < 3:
print('CXXII exige Python 3 ou mais recente.')
sys.exit(1)
import os
import time
import datetime
import unicodedata
import urllib.request
import tempfile
import zipfile
import re
from xml.etree.ElementTree import ElementTree as CXXII_XML_Arvore
class CXXII_XML_Arquivo:
"""Classe que representa um arquivo XML."""
def __init__(self, endereco):
self.endereco = endereco
self.nome = NomeDoArquivo(endereco)
self.arvore = CXXII_XML_Arvore()
self.arvore.parse(endereco)
def raiz(self):
return self.arvore.getroot()
def CXXII_Baixar( url, destino=None, nome=None, forcar=False ):
"""Download de arquivo."""
if url[-1] == '/' : url = url[0:-1]
if destino is None :
global CXXII_Diretorio
destino = CXXII_Diretorio
if destino[-1] == os.sep : destino = destino[0:-1]
if nome is None : nome = url.replace('/', '_').replace(':', '_')
endereco = destino + os.sep + nome
existe = os.path.exists(endereco)
baixar = forcar or not existe
if not baixar:
global CXXII_Gerador_TempoMaximo
baixar = ( time.time() - os.path.getmtime(endereco) ) > CXXII_Gerador_TempoMaximo
if baixar:
try:
urllib.request.urlretrieve(url, endereco)
except:
raise Exception('Não foi possível baixar o gerador.')
return endereco
def CXXII_XML_Adicionar( endereco ):
global CXXII_XML_Arquivos
CXXII_XML_Arquivos.append( CXXII_XML_Arquivo(endereco) )
def CXXII_Separadores( endereco ):
"""Substitui os separadores "/" pelo separador real do sistema operacional."""
return endereco.replace('/', os.sep)
def CXXII_Python_Formato( arquivo ):
"""Retorna o valor de "# coding=".
arquivo -- Arquivo ou endereço de arquivo. Pode-se utilizar "/" como separador.
"""
if type(arquivo) is str: arquivo = open( CXXII_Separadores(arquivo), 'r', encoding='iso-8859-1' )
formato = 'utf-8'
arquivo.seek(0)
for i in range(2):
linha = arquivo.readline()
if linha.startswith('# coding='):
formato = linha[9:-1]
break
arquivo.close()
return formato
def CXXII_Abrir_Python( endereco ):
"""Abre um arquivo ".py" respeitando a codificação especificada no cabeçalho "# coding=".
endereco -- Endereço do arquivo. Pode-se utilizar "/" como separador.
"""
endereco = CXXII_Separadores(endereco)
return open( endereco, 'r', encoding=CXXII_Python_Formato(endereco) )
def CXXII_Atual( endereco, modo='w', formato='utf-8' ):
"""Determina o arquivo em geração atual.
endereco -- Endereço do arquivo desejado, relativo ao CXXII_Destino. Pode-se utilizar "/" como separador.
"""
global CXXII_Saida
global CXXII_Destino
endereco = CXXII_Separadores(endereco)
if endereco[0] != os.sep: endereco = os.sep + endereco
if CXXII_Saida != None and CXXII_Saida != sys.stdout: CXXII_Saida.close()
arquivo = CXXII_Texto(CXXII_Destino + endereco)
diretorio = CXXII_Texto(os.path.dirname(arquivo))
if not os.path.exists(diretorio): os.makedirs(diretorio)
CXXII_Saida = open( arquivo, modo, encoding=formato )
def CXXII_Escrever( texto ):
"""Escreve no arquivo em geração atual. Ver CXXII_Atual()."""
global CXXII_Saida
if not CXXII_Saida is None:
CXXII_Saida.write(texto)
def CXXII_ContarIdentacao( linha ):
comprimento = len(linha)
if comprimento == 0: return 0
tamanho = comprimento - len(linha.lstrip())
if linha[0] == ' ': tamanho /= 4
return tamanho
def CXXII_Identar( linha, total=1 ):
espaco = '\t' if len(linha) > 0 and linha[0] == '\t' else ' '
while total > 0:
linha = espaco + linha
total -= 1
return linha
def CXXII_EscreverArquivo( endereco, inicio=1, fim=None, quebraFinal=True, formato='utf-8', dicGlobal=None, dicLocal=None ):
"""Escreve no arquivo em geração atual (CXXII_Atual()) o conteúdo de um arquivo-modelo.
Arquivo-modelo é qualquer conteúdo que contenha instruções CXXII embutidas.
Se o endereço do arquivo for relativo, ele primeiro será buscado em CXXII_Gerador_Diretorio.
endereco -- Endereço do arquivo-modelo. Pode-se utilizar "/" como separador.
inicio -- Número inicial do intervalo de linhas desejado. Padrão: 1
fim -- Número final do intervalo de linhas desejado. Padrão: None (última linha)
quebraFinal -- Quebra de linha na última linha?
dicGlobal -- Ver globals()
dicLocal -- Ver locals()
"""
global CXXII_Saida
if CXXII_Saida is None: return
if dicGlobal is None: dicGlobal = globals()
if dicLocal is None: dicLocal = locals()
endereco = CXXII_Separadores(endereco)
if endereco[0] != os.sep and os.path.exists(CXXII_Gerador_Diretorio + os.sep + endereco):
endereco = CXXII_Gerador_Diretorio + os.sep + endereco
codigo = []
modelo = open( endereco, 'r', encoding=formato )
linhas = list(modelo)
modelo.close()
total = len(linhas)
if inicio != 1 or fim != None:
inicio = inicio - 1 if inicio != None else 0
fim = fim if fim != None else total
linhas = linhas[inicio:fim]
if not quebraFinal and linhas[-1][-1] == '\n': linhas[-1] = linhas[-1][0:-1]
total = len(linhas)
identacao = 0
i = 0
while i < total:
linha = linhas[i]
if linha == '@@@\n':
i += 1
if i < total and identacao > 0 and linhas[i] == '@@@\n':
identacao -= 1
else:
while i < total and linhas[i] != '@@@\n':
linha = linhas[i]
identacao = CXXII_ContarIdentacao(linha)
codigo.append(linha)
linha = linha.strip()
if len(linha) > 0 and linha[-1] == ':':
if linha.startswith('for ') or linha.startswith('while '):
identacao += 1
i += 1
else:
codigo.append(CXXII_Identar('"""~\n', identacao))
finalComQuebra = False
while i < total and linhas[i] != '@@@\n':
linha = linhas[i]
finalComQuebra = linha.endswith('\n')
if not finalComQuebra: linha += '\n'
codigo.append(linha)
i += 1
if finalComQuebra: codigo.append('\n')
codigo.append(CXXII_Identar('"""\n', identacao))
i -= 1
i += 1
CXXII_Executar( CXXII_CompilarPython(codigo), dicGlobal, dicLocal )
def CXXII_Texto( texto, decodificar=False ):
if decodificar and type(texto) is bytes: texto = texto.decode(sys.getfilesystemencoding())
return unicodedata.normalize('NFC', texto)
def CXXII_EscapeParaTexto( texto ):
return texto.replace('\n','\\n').replace('\r','\\r').replace('\t','\\t').replace('\'','\\\'')
def CXXII_TextoParaEscape( texto ):
return texto.replace('\\n','\n').replace('\\r','\r').replace('\\t','\t').replace('\\\'','\'')
def NomeDoArquivo( endereco, extensao=True ):
if endereco[-1] == os.sep: endereco = endereco[0:-1]
nome = endereco[endereco.rfind(os.sep)+1:]
if not extensao:
nome = nome[0:len(nome)-nome.rfind('.')]
return nome
def CXXII_Compilar( endereco ):
"""Compila um arquivo codificado com a linguagem Python.
Se o endereço do arquivo for relativo, ele primeiro será buscado em CXXII_Gerador_Diretorio.
endereco -- Endereço do arquivo ".py". Pode-se utilizar "/" como separador.
"""
endereco = CXXII_Separadores(endereco)
if endereco[0] != os.sep and os.path.exists(CXXII_Gerador_Diretorio + os.sep + endereco):
endereco = CXXII_Gerador_Diretorio + os.sep + endereco
py_arquivo = CXXII_Abrir_Python(endereco)
py = list(py_arquivo)
py_arquivo.close()
return CXXII_CompilarPython(py)
def CXXII_CompilarPython( codigoFonte ):
"""Compila um código fonte codificado com a linguagem Python."""
py = list(codigoFonte) if type(codigoFonte) != list else codigoFonte
if py[0].startswith('# coding='):
py = py[1:]
elif py[1].startswith('# coding='):
py = py[2:]
py[-1] += '\n'
i = 0
total = len(py)
embutido = re.compile('({{{[^{}]*}}})')
while i < total:
linha = py[i]
passo = 1
if linha.endswith('"""~\n'):
desconsiderar = False
tokenstr = None
cpre = None
for c in linha:
if tokenstr != None:
if c == tokenstr and cpre != '\\': tokenstr = None
elif c == '#':
desconsiderar = True
break
elif c == '\'' or c == '\"':
tokenstr = c
cpre = c
if desconsiderar:
i += passo
continue
linha = linha[:-5] + 'CXXII_Escrever(\''
a = i
b = a + 1
while b < total and not py[b].lstrip().startswith('"""'): b += 1
if b >= total: raise Exception('Bloco de escrita não finalizado: linha ' + str(i))
py[b] = py[b][py[b].index('"""')+3:]
passo = b - a
if (b-a) > 1:
primeiro = True
a += 1
while a < b:
linha += ( '\\n' if not primeiro else '' ) + CXXII_EscapeParaTexto( py[a][:-1] )
py[a] = ''
primeiro = False
a += 1
linhapos = 0
while True:
codigo = embutido.search(linha, linhapos)
if not codigo is None:
parte1 = \
linha[0:codigo.start(0)] +\
'\'+' +\
CXXII_TextoParaEscape(codigo.group(0)[3:-3]) +\
'+\''
parte2 = linha[codigo.end(0):]
linha = parte1 + parte2
linhapos = len(parte1)
else:
break
linha += '\');'
py[i] = linha
i += passo
return compile( ''.join(py), 'CXXII_Python', 'exec' )
def CXXII_Executar( python, dicGlobal=None, dicLocal=None ):
"""Executa um código Python pré-compilado com CXXII_Compilar() ou um arquivo Python.
Se o endereço do arquivo for relativo, ele primeiro será buscado em CXXII_Gerador_Diretorio.
python -- Código pré-compilado ou endereço do arquivo. Pode-se utilizar "/" como separador.
dicGlobal -- Ver globals()
dicLocal -- Ver locals()
"""
if dicGlobal is None: dicGlobal = globals()
if dicLocal is None: dicLocal = locals()
exec( CXXII_Compilar(python) if type(python) is str else python, dicGlobal, dicLocal )
CXXII_Repositorio = 'http://www.joseflavio.com/cxxii/'
CXXII_Inicio = datetime.datetime.today()
CXXII_Gerador_Endereco = None
CXXII_Gerador_Diretorio = None
CXXII_Gerador_TempoMaximo = 6*60*60 #6h
CXXII_Gerador_Baixar = False
CXXII_Destino = None
CXXII_XML_Arquivos = []
CXXII_Extensao = 'xml'
CXXII_Saida = sys.stdout
CXXII_Diretorio = CXXII_Texto(os.path.expanduser('~')) + os.sep + 'CXXII'
CXXII_Geradores = CXXII_Diretorio + os.sep + 'Geradores'
if not os.path.exists(CXXII_Geradores): os.makedirs(CXXII_Geradores)
try:
argumentos = CXXII_Texto(' '.join(sys.argv), True)
argumentos = argumentos.replace(' -g', '###g')
argumentos = argumentos.replace(' -f', '###fSIM')
argumentos = argumentos.replace(' -t', '###tSIM')
argumentos = argumentos.replace(' -d', '###d')
argumentos = argumentos.replace(' -e', '###e')
argumentos = argumentos.replace(' -a', '###a')
argumentos = argumentos.split('###')
argumento_g = None
argumento_f = None
argumento_t = None
argumento_d = None
argumento_e = None
argumento_a = None
for argumento in argumentos[1:]:
valor = argumento[1:].strip()
if len(valor) == 0: continue
exec( 'argumento_' + argumento[0] + '=\'' + valor + '\'' )
if argumento_g is None or argumento_a is None:
print('\nCXXII 1.0-A1 : Gerador de arquivos a partir de XML\n')
print('cxxii -g GERADOR [-f] [-t] [-d DESTINO] [-e EXTENSAO] -a ARQUIVOS\n')
print('Argumentos:')
print(' -g URL ou endereço local do gerador a utilizar: .py ou .zip')
print(' Nome sem extensão = ' + CXXII_Repositorio + 'Nome.zip')
print(' -f Forçar download do gerador')
print(' -t Imprimir detalhes do erro que possa ocorrer')
print(' -d Destino dos arquivos gerados')
print(' -e Extensão padrão dos arquivos de entrada: xml')
print(' -a Arquivos XML de entrada ou diretórios que os contenham\n')
sys.exit(1)
if argumento_e != None: CXXII_Extensao = argumento_e.lower()
argumento_a = argumento_a.replace('.' + CXXII_Extensao, '.' + CXXII_Extensao + '###')
argumento_a = argumento_a.split('###')
for xml in argumento_a:
xml = xml.strip()
if len(xml) == 0: continue
xml = CXXII_Texto(os.path.abspath(xml))
if os.path.isdir(xml):
for arquivo in os.listdir(xml):
arquivo = CXXII_Texto(arquivo)
if arquivo.lower().endswith('.' + CXXII_Extensao):
CXXII_XML_Adicionar(xml + os.sep + arquivo)
else:
CXXII_XML_Adicionar(xml)
if len(CXXII_XML_Arquivos) == 0:
sys.exit(0)
try:
CXXII_Gerador_Baixar = not argumento_f is None
gerurl = argumento_g.startswith('http://')
if( gerurl and argumento_g[-1] == '/' ): argumento_g = argumento_g[0:-1]
gernome = argumento_g[argumento_g.rfind('/' if gerurl else os.sep)+1:]
gerpy = gernome.endswith('.py')
gerzip = gernome.endswith('.zip')
if gerurl:
argumento_g = CXXII_Baixar(url=argumento_g, destino=CXXII_Geradores, forcar=CXXII_Gerador_Baixar)
elif gerpy or gerzip:
argumento_g = CXXII_Texto(os.path.abspath(argumento_g))
else:
gerurl = True
gernome += '.zip'
gerzip = True
argumento_g = CXXII_Baixar(url=CXXII_Repositorio + gernome, destino=CXXII_Geradores, forcar=CXXII_Gerador_Baixar)
if gerzip:
destino = argumento_g[0:-4]
if not os.path.exists(destino): os.makedirs(destino)
CXXII_Gerador_Endereco = destino + os.sep + gernome[0:-4] + '.py'
descompactar = not os.path.exists(CXXII_Gerador_Endereco)
if not descompactar:
descompactar = os.path.getmtime(argumento_g) > os.path.getmtime(CXXII_Gerador_Endereco)
if descompactar:
zip = zipfile.ZipFile(argumento_g, 'r')
zip.extractall(destino)
del zip
else:
CXXII_Gerador_Endereco = argumento_g
CXXII_Gerador_Diretorio = CXXII_Texto(os.path.dirname(CXXII_Gerador_Endereco))
except:
raise Exception('Gerador inválido.')
CXXII_Destino = argumento_d if not argumento_d is None else 'CXXII_' + CXXII_Inicio.strftime('%Y%m%d%H%M%S')
CXXII_Destino = CXXII_Texto(os.path.abspath(CXXII_Destino))
if not os.path.exists(CXXII_Destino): os.makedirs(CXXII_Destino)
gerador_nome = ''
gerador_versao = ''
gerador_multiplo = True
cxxii_con = CXXII_Abrir_Python(CXXII_Gerador_Endereco)
cxxii_lin = list(cxxii_con)
cxxii_ini = 0
cxxii_tot = len(cxxii_lin)
while cxxii_ini < cxxii_tot and cxxii_lin[cxxii_ini] != '### CXXII\n': cxxii_ini += 1
if cxxii_ini < cxxii_tot:
fim = cxxii_ini + 1
while fim < cxxii_tot and cxxii_lin[fim] != '###\n': fim += 1
if fim < cxxii_tot: exec(''.join(cxxii_lin[(cxxii_ini+1):fim]))
cxxii_con.close()
del cxxii_con
del cxxii_lin
del cxxii_ini
del cxxii_tot
gerador_nome = gerador_nome if gerador_nome != None and len(gerador_nome) > 0 else NomeDoArquivo(argumento_g)
if gerador_versao == None: gerador_versao = 'Desconhecida'
if not type(gerador_versao) is str: gerador_versao = str(gerador_versao)
print( 'Gerador: ' + gerador_nome )
print( 'Versão: ' + gerador_versao )
CXXII_Gerador_Compilado = CXXII_Compilar( CXXII_Gerador_Endereco )
CXXII_XML = CXXII_XML_Arquivos[0]
if gerador_multiplo:
for xml in CXXII_XML_Arquivos:
print(xml.endereco)
CXXII_XML = xml
CXXII_Executar( CXXII_Gerador_Compilado, globals(), locals() )
else:
CXXII_Executar( CXXII_Gerador_Compilado, globals(), locals() )
except Exception as e:
if not argumento_t is None:
import traceback
traceback.print_exc()
print('Erro: ' + str(e))
|
from . import res_country
|
"""
Test the Fieldsfile file loading plugin and FFHeader.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import zip
import iris.tests as tests
import collections
import mock
import numpy as np
import iris
import iris.fileformats.ff as ff
import iris.fileformats.pp as pp
_MockField = collections.namedtuple('_MockField',
'lbext lblrec lbnrec raw_lbpack '
'lbuser boundary_packing')
_UNPACKED = 0
_WGDOS = 1
_CRAY = 2
_GRIB = 3 # Not implemented.
_RLE = 4 # Not supported, deprecated FF format.
_REAL = 1
_INTEGER = 2
_LOGICAL = 3 # Not implemented.
class TestFF_HEADER(tests.IrisTest):
def test_initialisation(self):
self.assertEqual(ff.FF_HEADER[0], ('data_set_format_version', (0,)))
self.assertEqual(ff.FF_HEADER[17], ('integer_constants', (99, 100)))
def test_size(self):
self.assertEqual(len(ff.FF_HEADER), 31)
@tests.skip_data
class TestFFHeader(tests.IrisTest):
def setUp(self):
self.filename = tests.get_data_path(('FF', 'n48_multi_field'))
self.ff_header = ff.FFHeader(self.filename)
self.valid_headers = (
'integer_constants', 'real_constants', 'level_dependent_constants',
'lookup_table', 'data'
)
self.invalid_headers = (
'row_dependent_constants', 'column_dependent_constants',
'fields_of_constants', 'extra_constants', 'temp_historyfile',
'compressed_field_index1', 'compressed_field_index2',
'compressed_field_index3'
)
def test_constructor(self):
# Test FieldsFile header attribute lookup.
self.assertEqual(self.ff_header.data_set_format_version, 20)
self.assertEqual(self.ff_header.sub_model, 1)
self.assertEqual(self.ff_header.vert_coord_type, 5)
self.assertEqual(self.ff_header.horiz_grid_type, 0)
self.assertEqual(self.ff_header.dataset_type, 3)
self.assertEqual(self.ff_header.run_identifier, 0)
self.assertEqual(self.ff_header.experiment_number, -32768)
self.assertEqual(self.ff_header.calendar, 1)
self.assertEqual(self.ff_header.grid_staggering, 3)
self.assertEqual(self.ff_header.time_type, -32768)
self.assertEqual(self.ff_header.projection_number, -32768)
self.assertEqual(self.ff_header.model_version, 802)
self.assertEqual(self.ff_header.obs_file_type, -32768)
self.assertEqual(self.ff_header.last_fieldop_type, -32768)
self.assertEqual(self.ff_header.first_validity_time,
(2011, 7, 10, 18, 0, 0, 191))
self.assertEqual(self.ff_header.last_validity_time,
(2011, 7, 10, 21, 0, 0, 191))
self.assertEqual(self.ff_header.misc_validity_time,
(2012, 4, 30, 18, 12, 13, -32768))
self.assertEqual(self.ff_header.integer_constants.shape, (46, ))
self.assertEqual(self.ff_header.real_constants.shape, (38, ))
self.assertEqual(self.ff_header.level_dependent_constants.shape,
(71, 8))
self.assertIsNone(self.ff_header.row_dependent_constants)
self.assertIsNone(self.ff_header.column_dependent_constants)
self.assertIsNone(self.ff_header.fields_of_constants)
self.assertIsNone(self.ff_header.extra_constants)
self.assertIsNone(self.ff_header.temp_historyfile)
self.assertIsNone(self.ff_header.compressed_field_index1)
self.assertIsNone(self.ff_header.compressed_field_index2)
self.assertIsNone(self.ff_header.compressed_field_index3)
self.assertEqual(self.ff_header.lookup_table, (909, 64, 5))
self.assertEqual(self.ff_header.total_prognostic_fields, 3119)
self.assertEqual(self.ff_header.data, (2049, 2961, -32768))
def test_str(self):
self.assertString(str(self.ff_header), ('FF', 'ffheader.txt'))
def test_repr(self):
target = "FFHeader('" + self.filename + "')"
self.assertEqual(repr(self.ff_header), target)
def test_shape(self):
self.assertEqual(self.ff_header.shape('data'), (2961, -32768))
@tests.skip_data
class TestFF2PP2Cube(tests.IrisTest):
def setUp(self):
self.filename = tests.get_data_path(('FF', 'n48_multi_field'))
def test_unit_pass_0(self):
# Test FieldsFile to PPFields cube load.
cube_by_name = collections.defaultdict(int)
cubes = iris.load(self.filename)
while cubes:
cube = cubes.pop(0)
standard_name = cube.standard_name
cube_by_name[standard_name] += 1
filename = '{}_{}.cml'.format(standard_name,
cube_by_name[standard_name])
self.assertCML(cube, ('FF', filename))
def test_raw_to_table_count(self):
filename = tests.get_data_path(('FF', 'n48_multi_field_table_count'))
cubes = iris.load_raw(filename)
ff_header = ff.FFHeader(filename)
table_count = ff_header.lookup_table[2]
self.assertEqual(len(cubes), table_count)
@tests.skip_data
class TestFFieee32(tests.IrisTest):
def test_iris_loading(self):
ff32_fname = tests.get_data_path(('FF', 'n48_multi_field.ieee32'))
ff64_fname = tests.get_data_path(('FF', 'n48_multi_field'))
ff32_cubes = iris.load(ff32_fname)
ff64_cubes = iris.load(ff64_fname)
for ff32, ff64 in zip(ff32_cubes, ff64_cubes):
# load the data
_, _ = ff32.data, ff64.data
self.assertEqual(ff32, ff64)
@tests.skip_data
class TestFFVariableResolutionGrid(tests.IrisTest):
def setUp(self):
self.filename = tests.get_data_path(('FF', 'n48_multi_field'))
self.ff2pp = ff.FF2PP(self.filename)
self.ff_header = self.ff2pp._ff_header
data_shape = (73, 96)
delta = np.sin(np.linspace(0, np.pi * 5, data_shape[1])) * 5
lons = np.linspace(0, 180, data_shape[1]) + delta
lons = np.vstack([lons[:-1], lons[:-1] + 0.5 * np.diff(lons)]).T
lons = np.reshape(lons, lons.shape, order='F')
delta = np.sin(np.linspace(0, np.pi * 5, data_shape[0])) * 5
lats = np.linspace(-90, 90, data_shape[0]) + delta
lats = np.vstack([lats[:-1], lats[:-1] + 0.5 * np.diff(lats)]).T
lats = np.reshape(lats, lats.shape, order='F')
self.ff_header.column_dependent_constants = lons
self.ff_header.row_dependent_constants = lats
self.U_grid_x = lons[:-1, 1]
self.V_grid_y = lats[:-1, 1]
self.P_grid_x = lons[:, 0]
self.P_grid_y = lats[:, 0]
self.orig_make_pp_field = pp.make_pp_field
def new_make_pp_field(header):
field = self.orig_make_pp_field(header)
field.stash = self.ff2pp._custom_stash
field.bdx = field.bdy = field.bmdi
return field
# Replace the pp module function with this new function;
# this gets called in PP2FF.
pp.make_pp_field = new_make_pp_field
def tearDown(self):
pp.make_pp_field = self.orig_make_pp_field
def _check_stash(self, stash, x_coord, y_coord):
self.ff2pp._custom_stash = stash
field = next(iter(self.ff2pp))
self.assertArrayEqual(x_coord, field.x, ('x_coord was incorrect for '
'stash {}'.format(stash)))
self.assertArrayEqual(y_coord, field.y, ('y_coord was incorrect for '
'stash {}'.format(stash)))
def test_p(self):
self._check_stash('m01s00i001', self.P_grid_x, self.P_grid_y)
def test_u(self):
self._check_stash('m01s00i002', self.U_grid_x, self.P_grid_y)
def test_v(self):
self._check_stash('m01s00i003', self.P_grid_x, self.V_grid_y)
class TestFFPayload(tests.IrisTest):
def _test_payload(self, mock_field, expected_depth, expected_type):
with mock.patch('iris.fileformats.ff.FFHeader') as mock_header:
mock_header.return_value = None
ff2pp = ff.FF2PP('Not real')
data_depth, data_type = ff2pp._payload(mock_field)
self.assertEqual(data_depth, expected_depth)
self.assertEqual(data_type, expected_type)
def test_payload_unpacked_real(self):
mock_field = _MockField(lbext=0, lblrec=100, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_REAL], boundary_packing=None)
expected_type = ff._LBUSER_DTYPE_LOOKUP[_REAL].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 800, expected_type)
def test_payload_unpacked_real_ext(self):
mock_field = _MockField(lbext=50, lblrec=100, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_REAL], boundary_packing=None)
expected_type = ff._LBUSER_DTYPE_LOOKUP[_REAL].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 400, expected_type)
def test_payload_unpacked_integer(self):
mock_field = _MockField(lbext=0, lblrec=200, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_INTEGER], boundary_packing=None)
expected_type = ff._LBUSER_DTYPE_LOOKUP[_INTEGER].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 1600, expected_type)
def test_payload_unpacked_integer_ext(self):
mock_field = _MockField(lbext=100, lblrec=200, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_INTEGER], boundary_packing=None)
expected_type = ff._LBUSER_DTYPE_LOOKUP[_INTEGER].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 800, expected_type)
def test_payload_wgdos_real(self):
mock_field = _MockField(lbext=0, lblrec=-1, lbnrec=100,
raw_lbpack=_WGDOS,
lbuser=[_REAL], boundary_packing=None)
self._test_payload(mock_field, 796, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_wgdos_real_ext(self):
mock_field = _MockField(lbext=50, lblrec=-1, lbnrec=100,
raw_lbpack=_WGDOS,
lbuser=[_REAL], boundary_packing=None)
self._test_payload(mock_field, 796, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_wgdos_integer(self):
mock_field = _MockField(lbext=0, lblrec=-1, lbnrec=200,
raw_lbpack=_WGDOS,
lbuser=[_INTEGER], boundary_packing=None)
self._test_payload(mock_field, 1596, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
def test_payload_wgdos_integer_ext(self):
mock_field = _MockField(lbext=100, lblrec=-1, lbnrec=200,
raw_lbpack=_WGDOS,
lbuser=[_INTEGER], boundary_packing=None)
self._test_payload(mock_field, 1596, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
def test_payload_cray_real(self):
mock_field = _MockField(lbext=0, lblrec=100, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_REAL], boundary_packing=None)
self._test_payload(mock_field, 400, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_cray_real_ext(self):
mock_field = _MockField(lbext=50, lblrec=100, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_REAL], boundary_packing=None)
self._test_payload(mock_field, 200, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_cray_integer(self):
mock_field = _MockField(lbext=0, lblrec=200, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_INTEGER], boundary_packing=None)
self._test_payload(mock_field, 800, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
def test_payload_cray_integer_ext(self):
mock_field = _MockField(lbext=100, lblrec=200, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_INTEGER], boundary_packing=None)
self._test_payload(mock_field, 400, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
if __name__ == '__main__':
tests.main()
|
"""
Testing for the tree module (sklearn.tree).
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from sklearn import tree
from sklearn import datasets
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# With subsampling
clf = tree.DecisionTreeClassifier(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
def test_regression_toy():
"""Check regression on a toy dataset."""
clf = tree.DecisionTreeRegressor()
clf.fit(X, y)
assert_almost_equal(clf.predict(T), true_result)
# With subsampling
clf = tree.DecisionTreeRegressor(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(clf.predict(T), true_result)
def test_xor():
"""Check on a XOR problem"""
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.DecisionTreeClassifier(max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.ExtraTreeClassifier()
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.ExtraTreeClassifier(max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
def test_graphviz_toy():
"""Check correctness of graphviz output on a toy dataset."""
clf = tree.DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
from StringIO import StringIO
# test export code
out = StringIO()
tree.export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
tree_toy = StringIO("digraph Tree {\n"
"0 [label=\"X[0] <= 0.0000\\nerror = 0.5"
"\\nsamples = 6\\nvalue = [ 3. 3.]\", shape=\"box\"] ;\n"
"1 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 3. 0.]\", shape=\"box\"] ;\n"
"0 -> 1 ;\n"
"2 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 0. 3.]\", shape=\"box\"] ;\n"
"0 -> 2 ;\n"
"}")
contents2 = tree_toy.getvalue()
assert contents1 == contents2, \
"graphviz output test failed\n: %s != %s" % (contents1, contents2)
# test with feature_names
out = StringIO()
out = tree.export_graphviz(clf, out_file=out,
feature_names=["feature1", ""])
contents1 = out.getvalue()
tree_toy = StringIO("digraph Tree {\n"
"0 [label=\"feature1 <= 0.0000\\nerror = 0.5"
"\\nsamples = 6\\nvalue = [ 3. 3.]\", shape=\"box\"] ;\n"
"1 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 3. 0.]\", shape=\"box\"] ;\n"
"0 -> 1 ;\n"
"2 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 0. 3.]\", shape=\"box\"] ;\n"
"0 -> 2 ;\n"
"}")
contents2 = tree_toy.getvalue()
assert contents1 == contents2, \
"graphviz output test failed\n: %s != %s" % (contents1, contents2)
# test improperly formed feature_names
out = StringIO()
assert_raises(IndexError, tree.export_graphviz,
clf, out, feature_names=[])
def test_iris():
"""Check consistency on dataset iris."""
for c in ('gini',
'entropy'):
clf = tree.DecisionTreeClassifier(criterion=c).fit(iris.data,
iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with criterion " + c + \
" and score = " + str(score)
clf = tree.DecisionTreeClassifier(criterion=c,
max_features=2,
random_state=1).fit(iris.data,
iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.5, "Failed with criterion " + c + \
" and score = " + str(score)
def test_boston():
"""Check consistency on dataset boston house prices."""
for c in ('mse',):
clf = tree.DecisionTreeRegressor(criterion=c).fit(boston.data,
boston.target)
score = np.mean(np.power(clf.predict(boston.data) - boston.target, 2))
assert score < 1, "Failed with criterion " + c + \
" and score = " + str(score)
clf = tree.DecisionTreeRegressor(criterion=c,
max_features=6,
random_state=1).fit(boston.data,
boston.target)
#using fewer features reduces the learning ability of this tree,
# but reduces training time.
score = np.mean(np.power(clf.predict(boston.data) - boston.target, 2))
assert score < 2, "Failed with criterion " + c + \
" and score = " + str(score)
def test_probability():
"""Predict probabilities using DecisionTreeClassifier."""
clf = tree.DecisionTreeClassifier(max_depth=1, max_features=1,
random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_arrayrepr():
"""Check the array representation."""
# Check resize
clf = tree.DecisionTreeRegressor(max_depth=None)
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
clf.fit(X, y)
def test_pure_set():
"""Check when y is pure."""
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
clf = tree.DecisionTreeClassifier().fit(X, y)
assert_array_equal(clf.predict(X), y)
clf = tree.DecisionTreeRegressor().fit(X, y)
assert_array_equal(clf.predict(X), y)
def test_numerical_stability():
"""Check numerical stability."""
old_settings = np.geterr()
np.seterr(all="raise")
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
dt = tree.DecisionTreeRegressor()
dt.fit(X, y)
dt.fit(X, -y)
dt.fit(-X, y)
dt.fit(-X, -y)
np.seterr(**old_settings)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
clf = tree.DecisionTreeClassifier(compute_importances=True)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert 0 < X_new.shape[1] < X.shape[1]
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_true(clf.feature_importances_ is None)
def test_error():
"""Test that it gives proper exception on deficient input."""
# Invalid values for parameters
assert_raises(ValueError,
tree.DecisionTreeClassifier(min_samples_leaf=-1).fit,
X, y)
assert_raises(ValueError,
tree.DecisionTreeClassifier(max_depth=-1).fit,
X, y)
assert_raises(ValueError,
tree.DecisionTreeClassifier(min_density=2.0).fit,
X, y)
assert_raises(ValueError,
tree.DecisionTreeClassifier(max_features=42).fit,
X, y)
# Wrong dimensions
clf = tree.DecisionTreeClassifier()
y2 = y[:-1]
assert_raises(ValueError, clf.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
clf = tree.DecisionTreeClassifier()
clf.fit(Xf, y)
assert_array_equal(clf.predict(T), true_result)
# predict before fitting
clf = tree.DecisionTreeClassifier()
assert_raises(Exception, clf.predict, T)
# predict on vector with different dims
clf.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, clf.predict, t[:, 1:])
# use values of max_features that are invalid
clf = tree.DecisionTreeClassifier(max_features=10)
assert_raises(ValueError, clf.fit, X, y)
clf = tree.DecisionTreeClassifier(max_features=-1)
assert_raises(ValueError, clf.fit, X, y)
clf = tree.DecisionTreeClassifier(max_features="foobar")
assert_raises(ValueError, clf.fit, X, y)
tree.DecisionTreeClassifier(max_features="auto").fit(X, y)
tree.DecisionTreeClassifier(max_features="sqrt").fit(X, y)
tree.DecisionTreeClassifier(max_features="log2").fit(X, y)
tree.DecisionTreeClassifier(max_features=None).fit(X, y)
# predict before fit
clf = tree.DecisionTreeClassifier()
assert_raises(Exception, clf.predict_proba, X)
clf.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, clf.predict_proba, X2)
# wrong sample shape
Xt = np.array(X).T
clf = tree.DecisionTreeClassifier()
clf.fit(np.dot(X, Xt), y)
assert_raises(ValueError, clf.predict, X)
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
# wrong length of sample mask
clf = tree.DecisionTreeClassifier()
sample_mask = np.array([1])
assert_raises(ValueError, clf.fit, X, y, sample_mask=sample_mask)
# wrong length of X_argsorted
clf = tree.DecisionTreeClassifier()
X_argsorted = np.array([1])
assert_raises(ValueError, clf.fit, X, y, X_argsorted=X_argsorted)
def test_min_samples_leaf():
"""Test if leaves contain more than leaf_count training examples"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
for tree_class in [tree.DecisionTreeClassifier, tree.ExtraTreeClassifier]:
clf = tree_class(min_samples_leaf=5).fit(X, y)
out = clf.tree_.apply(X)
node_counts = np.bincount(out)
leaf_count = node_counts[node_counts != 0] # drop inner nodes
assert np.min(leaf_count) >= 5
def test_pickle():
import pickle
# classification
obj = tree.DecisionTreeClassifier()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert score == score2, "Failed to generate same score " + \
" after pickling (classification) "
# regression
obj = tree.DecisionTreeRegressor()
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert score == score2, "Failed to generate same score " + \
" after pickling (regression) "
def test_multioutput():
"""Check estimators on multi-output problems."""
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
clf = tree.DecisionTreeClassifier()
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
clf = tree.DecisionTreeRegressor()
y_hat = clf.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_sample_mask():
"""Test sample_mask argument. """
# test list sample_mask
clf = tree.DecisionTreeClassifier()
sample_mask = [1] * len(X)
clf.fit(X, y, sample_mask=sample_mask)
assert_array_equal(clf.predict(T), true_result)
# test different dtype
clf = tree.DecisionTreeClassifier()
sample_mask = np.ones((len(X),), dtype=np.int32)
clf.fit(X, y, sample_mask=sample_mask)
assert_array_equal(clf.predict(T), true_result)
def test_X_argsorted():
"""Test X_argsorted argument. """
# test X_argsorted with different layout and dtype
clf = tree.DecisionTreeClassifier()
X_argsorted = np.argsort(np.array(X).T, axis=1).T
clf.fit(X, y, X_argsorted=X_argsorted)
assert_array_equal(clf.predict(T), true_result)
def test_classes_shape():
"""Test that n_classes_ and classes_ have proper shape."""
# Classification, single output
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = tree.DecisionTreeClassifier()
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_equal(clf.n_classes_, [2, 2])
assert_equal(clf.classes_, [[-1, 1], [-2, 2]])
if __name__ == "__main__":
import nose
nose.runmodule()
|
def bytes_to_long(foo):
return 0
def long_to_bytes(foo):
return '\0'
|
n = int(input())
st = [(-1, -2)]
s = 0
for i, h in enumerate(map(int, input().split() + [' -1'])):
if h > st[-1][1]:
st.append((i, h))
else:
while st[-1][1] >= h:
r = st.pop()
s = max(s, (i - r[0]) * r[1])
st.append((r[0], h))
print(s)
|
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.core.exceptions import ObjectDoesNotExist
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseRedirect
from django.db import transaction
from django_redis import get_redis_connection
from users.models import PastebinUser
from users.forms import RegisterForm, LoginForm, ChangePreferencesForm, ChangePasswordForm, VerifyPasswordForm
from users.models import Favorite, SiteSettings
from pastes.models import Paste
from pastebin.util import Paginator
import math
def register_view(request):
"""
Register a new user
"""
# Check if the user is authenticated
if request.user.is_authenticated():
# User is already authenticated
return render(request, 'users/register/already_logged_in.html')
else:
register_form = RegisterForm(request.POST or None)
if request.method == 'POST': # Form data was submitted
if register_form.is_valid(): # Form data is valid
# Create the user
with transaction.atomic():
user = User.objects.create_user(register_form.cleaned_data['username'],
"N/A", # we don't deal with email addresses
register_form.cleaned_data['password'])
PastebinUser.create_user(user)
# TODO: Show a different message if the registration fails
return render(request, 'users/register/register_success.html')
# Show the registration page
return render(request, "users/register/register.html", { "form": register_form })
def login_view(request):
"""
Log the user in
"""
# Check if the user is authenticated
if request.user.is_authenticated():
# User is authenticated
return render(request, "users/login/logged_in.html")
else:
login_form = LoginForm(request.POST or None)
# User is NOT authenticated
if request.method == 'POST': # Form data was submitted
if login_form.is_valid(): # Form data is valid
user = authenticate(username = login_form.cleaned_data['username'],
password = login_form.cleaned_data['password'])
if user is not None and user.is_active:
login(request, user)
return render(request, "users/login/logged_in.html")
else:
# Couldn't authenticate, either the username or password is wrong
error = "User doesn't exist or the password is incorrect."
login_form._errors['password'] = login_form.error_class([error])
# Show the login form
return render(request, "users/login/login.html", { "form": login_form })
def logout_view(request):
"""
Logout the user and show the logout page
"""
if request.user.is_authenticated():
logout(request)
return render(request, 'users/logout/logged_out.html')
def profile(request, username, tab="home", page=1):
"""
Show a publicly visible profile page
"""
page = int(page)
try:
profile_user = cache.get("user:%s" % username)
if profile_user == None:
profile_user = User.objects.get(username=username)
cache.set("user:%s" % username, profile_user)
elif profile_user == False:
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
except ObjectDoesNotExist:
cache.set("user:%s" % username, False)
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
# Get user's settings
profile_settings = cache.get("site_settings:%s" % username)
if profile_settings == None:
try:
profile_settings = SiteSettings.objects.get(user=profile_user)
except ObjectDoesNotExist:
profile_settings = SiteSettings(user=profile_user)
profile_settings.save()
cache.set("site_settings:%s" % username, profile_settings)
if not profile_user.is_active:
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
if request.user != profile_user:
total_paste_count = cache.get("user_public_paste_count:%s" % profile_user.username)
else:
total_paste_count = cache.get("user_paste_count:%s" % profile_user.username)
# If user is viewing his own profile, also include hidden pastes
if total_paste_count == None and request.user != profile_user:
total_paste_count = Paste.objects.filter(user=profile_user, removed=Paste.NO_REMOVAL).filter(hidden=False).count()
cache.set("user_public_paste_count:%s" % profile_user.username, total_paste_count)
elif total_paste_count == None and request.user == profile_user:
total_paste_count = Paste.objects.filter(user=profile_user, removed=Paste.NO_REMOVAL).count()
cache.set("user_paste_count:%s" % profile_user.username, total_paste_count)
total_favorite_count = cache.get("user_favorite_count:%s" % profile_user.username)
if total_favorite_count == None:
total_favorite_count = Favorite.objects.filter(user=profile_user).count()
cache.set("user_favorite_count:%s" % profile_user.username, total_favorite_count)
args = {"profile_user": profile_user,
"profile_settings": profile_settings,
"current_page": page,
"tab": tab,
"total_favorite_count": total_favorite_count,
"total_paste_count": total_paste_count}
if tab == "home":
return home(request, args)
elif tab == "pastes":
return pastes(request, profile_user, args, page)
elif tab == "favorites":
return favorites(request, profile_user, args, page)
# The remaining pages require authentication, so redirect through settings()
else:
return settings(request, profile_user, args, tab)
def settings(request, username, args={}, tab="change_password"):
"""
Show a page which allows the user to change his settings
"""
if not request.user.is_authenticated():
return render(request, "users/settings/settings_error.html", {"reason": "not_logged_in"})
profile_user = User.objects.get(username=username)
if request.user.id != profile_user.id:
return render(request, "users/settings/settings_error.html", {"reason": "incorrect_user"})
if tab == "change_preferences":
return change_preferences(request, args)
if tab == "change_password":
return change_password(request, args)
elif tab == "delete_account":
return delete_account(request, args)
def home(request, args):
"""
Display user profile's home with the most recent pastes and favorites
"""
# Get favorites only if user has made them public
if args["profile_settings"].public_favorites or request.user == args["profile_user"]:
args["favorites"] = cache.get("profile_favorites:%s" % args["profile_user"].username)
if args["favorites"] == None:
args["favorites"] = Favorite.objects.filter(user=args["profile_user"]).order_by('-added').select_related('paste')[:10]
cache.set("profile_favorites:%s" % args["profile_user"].username, args["favorites"])
if request.user == args["profile_user"]:
args["pastes"] = cache.get("profile_pastes:%s" % args["profile_user"].username)
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(args["profile_user"], include_hidden=True, count=10)
cache.set("profile_pastes:%s" % args["profile_user"].username, args["pastes"])
else:
args["pastes"] = cache.get("profile_public_pastes:%s" % args["profile_user"].username)
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(args["profile_user"], include_hidden=False, count=10)
cache.set("profile_public_pastes:%s" % args["profile_user"].username, args["pastes"])
return render(request, "users/profile/home/home.html", args)
def pastes(request, user, args, page=1):
"""
Show all of user's pastes
"""
PASTES_PER_PAGE = 15
args["total_pages"] = int(math.ceil(float(args["total_paste_count"]) / float(PASTES_PER_PAGE)))
if page > args["total_pages"]:
page = max(int(args["total_pages"]), 1)
offset = (page-1) * PASTES_PER_PAGE
if request.user == user:
args["pastes"] = cache.get("user_pastes:%s:%s" % (user.username, page))
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(user, count=PASTES_PER_PAGE, include_hidden=True, offset=offset)
cache.set("user_pastes:%s:%s" % (user.username, page), args["pastes"])
else:
args["pastes"] = cache.get("user_public_pastes:%s:%s" % (user.username, page))
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(user, count=PASTES_PER_PAGE, include_hidden=False, offset=offset)
cache.set("user_public_pastes:%s:%s" % (user.username, page), args["pastes"])
args["pages"] = Paginator.get_pages(page, PASTES_PER_PAGE, args["total_paste_count"])
args["current_page"] = page
return render(request, "users/profile/pastes/pastes.html", args)
def favorites(request, user, args, page=1):
"""
Show all of user's favorites
"""
FAVORITES_PER_PAGE = 15
if not args["profile_settings"].public_favorites and request.user != args["profile_user"]:
# Don't show pastes to other users if the user doesn't want to
return render(request, "users/profile/favorites/favorites_hidden.html", args)
args["total_pages"] = int(math.ceil(float(args["total_favorite_count"]) / float(FAVORITES_PER_PAGE)))
if page > args["total_pages"]:
page = max(int(args["total_pages"]), 1)
start = (page-1) * FAVORITES_PER_PAGE
end = start + FAVORITES_PER_PAGE
args["favorites"] = cache.get("user_favorites:%s:%s" % (user.username, page))
if args["favorites"] == None:
args["favorites"] = Favorite.objects.filter(user=user).select_related("paste")[start:end]
cache.set("user_favorites:%s:%s" % (user.username, page), args["favorites"])
args["pages"] = Paginator.get_pages(page, FAVORITES_PER_PAGE, args["total_favorite_count"])
args["current_page"] = page
return render(request, "users/profile/favorites/favorites.html", args)
def remove_favorite(request):
"""
Remove a favorite and redirect the user back to the favorite listing
"""
if "favorite_id" not in request.POST or not int(request.POST["favorite_id"]):
return HttpResponse("Favorite ID was not valid.", status=422)
if "page" not in request.POST or not int(request.POST["page"]):
return HttpResponse("Page was not valid.", status=422)
favorite_id = int(request.POST["favorite_id"])
page = int(request.POST["page"])
favorite = Favorite.objects.get(id=favorite_id)
if not request.user.is_authenticated():
return HttpResponse("You are not authenticated", status=422)
if favorite.user != request.user:
return HttpResponse("You can't delete someone else's favorites.", status=422)
favorite.delete()
cache.delete("profile_favorites:%s" % request.user.username)
cache.delete("user_favorite_count:%s" % request.user.username)
return HttpResponseRedirect(reverse("users:favorites", kwargs={"username": request.user.username,
"page": page}))
def change_preferences(request, args):
"""
Change various profile-related preferences
"""
site_settings = SiteSettings.objects.get(user=request.user)
form = ChangePreferencesForm(request.POST or None, initial={"public_favorites": site_settings.public_favorites})
preferences_changed = False
if form.is_valid():
cleaned_data = form.cleaned_data
site_settings.public_favorites = cleaned_data["public_favorites"]
site_settings.save()
cache.set("site_settings:%s" % request.user.username, site_settings)
preferences_changed = True
args["form"] = form
args["preferences_changed"] = preferences_changed
return render(request, "users/settings/change_preferences/change_preferences.html", args)
def change_password(request, args):
"""
Change the user's password
"""
form = ChangePasswordForm(request.POST or None, user=request.user)
password_changed = False
if form.is_valid():
cleaned_data = form.cleaned_data
request.user.set_password(cleaned_data["new_password"])
request.user.save()
# Session auth hash needs to be updated after changing the password
# or the user will be logged out
update_session_auth_hash(request, request.user)
password_changed = True
args["form"] = form
args["password_changed"] = password_changed
return render(request, "users/settings/change_password/change_password.html", args)
def delete_account(request, args):
"""
Delete the user's account
"""
form = VerifyPasswordForm(request.POST or None, user=request.user)
if form.is_valid():
PastebinUser.delete_user(request.user)
logout(request)
return render(request, "users/settings/delete_account/account_deleted.html")
args["form"] = form
return render(request, "users/settings/delete_account/delete_account.html", args)
|
# This script will count the number of tweets within an output.txt file
import re
output = open("output.txt", "r");
regex = re.compile("\n\n");
newlinenewline = regex.findall(output.read());
print len(newlinenewline);
|
import h5py # HDF5 support
import os
import glob
import numpy as n
from scipy.interpolate import interp1d
import sys
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
status = 'update'
path_to_lc = sys.argv[1]
f = h5py.File(path_to_lc, 'r+')
is_gal = (f['/sky_position/selection'].value)&(f['/sky_position/redshift_R'].value<3.)
z = f['/sky_position/redshift_S'].value[is_gal]
lx = f['/cluster_data/log_LceX_05_24'].value[is_gal]
percent_observed = 1.
lx_absorbed_05_24 = n.log10(10**lx * percent_observed)
d_L = cosmoMD.luminosity_distance(z)
dl_cm = (d_L.to(u.cm)).value
adjusting_factor = 0.35 # accounts for absorption for now !
fx_05_24 = 10**(lx_absorbed_05_24-adjusting_factor) / (4 * n.pi * dl_cm**2.)
fx_05_24_out = n.ones_like(f['/sky_position/redshift_S'].value)*-9999.
fx_05_24_out[is_gal] = fx_05_24
if status == 'create':
f['/cluster_data'].create_dataset('rxay_flux_05_24', data = fx_05_24_out )
if status == 'update':
f['/cluster_data/rxay_flux_05_24'][:] = fx_05_24_out
f.close()
|
import operator
def pozicijaSprite(broj, x_velicina):
#vraca pixel na kojem se sprite nalazi
pixel = broj * (x_velicina + 1) #1 je prazan red izmedu spritova
return(pixel)
spriteSlova = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "s", ",", "'", "1", "2", "4", "8", "6", "3", ".", "5", "7", "9", "0", "M", "B", "I", "N", "S", "E", "R", "T", " ", "-", "V","U" ,"A", "L", "O", "D", ":", "m", "j", "n", "u", "C", "H", "k", "l", "o", "p", "r", "t", "v", "z", "K", "P", "%", "/"]
def pixel2Ton(pixel):
rezolucija = 90
indent = -12 #extra pixeli
height = 3
broj = ( rezolucija - pixel - indent ) / height
return(int(broj))
predikati = {
0 : 0,
1 : -1,
2 : 1,
3 : 0
}
kljucevi = {
0 : ("d", ",,"),
1 : ("e", ",,"),
2 : ("f", ",,"),
3 : ("g", ",,"),
4 : ("a", ",,"),
5 : ("h", ",,"),
6 : ("c", ","),
7 : ("d", ","),
8 : ("e", ","),
9 : ("f", ","),
10 : ("g", ","),
11 : ("a", ","),
12 : ("h", ","),
13 : ("c", ""),
14 : ("d", ""),
15 : ("e", ""),
16 : ("f", ""),
17 : ("g", ""),
18 : ("a", ""),
19 : ("h", ""),
20 : ("c", "'"),
21 : ("d", "'"),
22 : ("e", "'"),
23 : ("f", "'"),
24 : ("g", "'"),
25 : ("a", "'"),
26 : ("h", "'"),
27 : ("c", "''"),
28 : ("d", "''"),
29 : ("e", "''"),
30 : ("f", "''"),
31 : ("g", "''"),
32 : ("a", "''"),
33 : ("h", "''"),
34 : ("c", "'''"),
35 : ("d", "'''"),
36 : ("e", "'''"),
37 : ("f", "'''"),
38 : ("g", "'''"),
39 : ("a", "'''"),
40 : ("h", "'''")
}
def removeLily(slovo):
return(slovo.replace(',', '').replace('\'', '').upper())
def slovoPozicija(slovo):
for i in [i for i,x in enumerate(spriteSlova) if x == slovo]:
return(i)
rijecnikNotnihVrijednosti = {
0 : "16",
1 : "8",
2 : "8.",
3 : "4",
4 : "416",
5 : "4.",
6 : "4.16",
7 : "2",
8 : "216",
9 : "28",
10 : "28.",
11 : "2.",
12 : "2.16",
13 : "2.8",
14 : "2.8.",
15 : "1"
}
def pixel2Pozicija(pixel):
rezolucija = 90
indent = 19 #extra pixeli
width = 6
broj = ( pixel - indent ) / width
return(int(broj))
def pixel2Trajanje(pixel):
indent = 4
width = 6
broj = ( pixel - indent ) / width
return(int(broj))
def ton2Pixel(ton):
rezolucija = 90
indent = -12
height = 3
pixel = rezolucija - indent - ( ton * height )
return(pixel)
def pozicija2Pixel(pozicija):
rezolucija = 90
indent = 19 #extra pixeli
width = 6
pixel = pozicija * width + indent
return(pixel)
def trajanje2Pixel(trajanje):
indent = 4
width = 6
pixel = trajanje * width + indent
return(pixel)
class dodaj_notu(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class add_chord(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class add_markup(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class cursor(object):
def __init__(self, pozicija, ton, trajanje):
self.pozicija = pozicija
self.ton = ton
self.trajanje = trajanje
self.sprite = 0
self.bg_scroll_x = 0
self.bg_scroll_y = 0
self.bg_scroll_x_offset = 0 #used for cursor follow efect
self.bg_scroll_y_offset = 0 #used for cursor follow efect
self.apsolute_x = 0 #used for cursor follow efect
self.apsolute_y = 0 #used for cursor follow efect
def checkXColision(nota, cursorLeft, trajanje):
if ( nota.pozicija == cursorLeft):
print("kolizija na pocetku note s CL")
return(True)
elif ( cursorLeft > nota.pozicija ) & ( cursorLeft < ( nota.pozicija + nota.trajanje )):
print("kolizija na sredini note s CL")
return(True)
elif ( cursorLeft == ( nota.pozicija + nota.trajanje )):
print("kolizija na kraju note s CL")
return(True)
elif ( nota.pozicija == ( cursorLeft + trajanje)):
print("kolizija na pocetku note s CR")
return(True)
elif ( ( cursorLeft + trajanje ) > nota.pozicija ) & ( ( cursorLeft + trajanje ) < ( nota.pozicija + nota.trajanje )):
print("kolizija na sredini note sa CR")
return(True)
elif ( ( cursorLeft + trajanje ) == ( nota.pozicija + nota.trajanje )):
print("kolizija na kraju note s CR")
return(True)
elif ( ( cursorLeft < nota.pozicija ) & ( ( cursorLeft + trajanje ) > (nota.pozicija + nota.trajanje ))):
print("kolizija note unutar Cursora")
return(True)
else:
return(False)
def findNote(nota, cursorLeft, trajanje):
if ( nota.pozicija == cursorLeft):
print("na pocetku note s CL")
return(1)
elif ( cursorLeft > nota.pozicija ) & ( cursorLeft < ( nota.pozicija + nota.trajanje )):
print("na sredini note s CL")
return(2)
elif ( cursorLeft == ( nota.pozicija + nota.trajanje )):
print("na kraju note s CL")
return(3)
elif ( nota.pozicija == ( cursorLeft + trajanje)):
print("na pocetku note s CR")
return(4)
elif ( ( cursorLeft + trajanje ) > nota.pozicija ) & ( ( cursorLeft + trajanje ) < ( nota.pozicija + nota.trajanje )):
print("na sredini note sa CR")
return(5)
elif ( ( cursorLeft + trajanje ) == ( nota.pozicija + nota.trajanje )):
print("na kraju note s CR")
return(6)
elif ( ( cursorLeft < nota.pozicija ) & ( ( cursorLeft + trajanje ) > (nota.pozicija + nota.trajanje ))):
print("note unutar Cursora")
return(7)
else:
return(False)
letter2MidiNumberPrefix = {
"c" : "0",
"d" : "2",
"e" : "4",
"f" : "5",
"g" : "7",
"a" : "9",
"h" : "11",
}
letter2MidiOctave = {
",," : "24",
"," : "36",
"" : "48",
"'" : "60",
"''" : "72",
"'''" : "84",
}
predikat2Midi = {
0 : 0,
1 : 1,
2 : -1,
}
def nota2MidiNumber(nota):
return(int(letter2MidiNumberPrefix[kljucevi[nota.ton][0]]) + int(letter2MidiOctave[kljucevi[nota.ton][1]]) + int(predikat2Midi[nota.predikat]))
def get_git_revision_short_hash():
import subprocess
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])
|
"""
Mastermind without kivy - by Luis
merciless edited by hans
"""
import random
import re
class G():
valid_chars = '123456'
secret_len = 5
solved = '+' * secret_len
regex_str = "^[{0}]{{{1},{1}}}$".format(valid_chars, secret_len)
valid_input = re.compile(regex_str) # regular expression for user input
def main():
secret = answer_generator()
print('Enter your guess of {} of these symbols: ({})'
.format(G.secret_len, G.valid_chars))
while True:
user_seq = user_guess()
output = handle_game(secret, user_seq)
result_msg = ('{} -> {}')
print(result_msg.format(user_seq, output))
if output == G.solved:
break
print('You have found the answer! Goodbye!')
def handle_game(answer, guess):
answer = list(answer) # no need to str() or to assign a new name
guess = list(guess)
output = ''
for i, ch in enumerate(guess):
if ch == answer[i]:
# eliminate hits from both lists, but leave position untouched
guess[i] = '°' # any char which is not in valid_chars
answer[i] = '^'
output += '+'
for ch in guess:
if ch in answer:
# remove hit from answer, position is no longer important
answer.remove(ch)
output += '-'
return output
def user_guess():
while True:
response = input() # no argument needed, default is ''
if G.valid_input.match(response):
return response
print("wrong input...")
def answer_generator(): # Creates random sequence of n characters
seq = ''
for _ in range(G.secret_len): # '_': we dont care for the value
seq += random.choice(G.valid_chars) # valid_chars string is iterable
return seq
if __name__ == '__main__':
main()
|
from __future__ import print_function
import httplib2
import io
import os
import sys
import time
import dateutil.parser
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from apiclient.http import MediaIoBaseDownload
import pprint
YEAR_OF_INFECTION=2017
MONTH_OF_INFECTION=01
DAY_OF_INFECTION=01
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'revert_osiris.json'
APPLICATION_NAME = 'Revert Osiris'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'drive-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
pp = pprint.PrettyPrinter()
#grab first batch of possible infected files
results = service.files().list(pageSize=1,
fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
next_page = results.get('nextPageToken', None)
bad_files = []
done = False
next_page = None
while True:
results = service.files().list(pageToken=next_page, pageSize=100,
fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
if not items:
print('No files found.')
break
else:
for item in items:
#Only act on files with osiris in the name.
if 'osiris' in item['name']:
bad_files.append(item)
next_page = results.get('nextPageToken', None)
print("Found {} bad files".format(len(bad_files)))
#Download a backup of all files just in case
for bad_item in bad_files:
revisions = service.revisions().list(fileId=bad_item['id'], fields='*').execute()
assert(len(revisions['revisions']) >= 2)
dt = dateutil.parser.parse(revisions['revisions'][-1]['modifiedTime'])
if dt.day == DAY_OF_INFECTION and dt.month = MONTH_OF_INFECTION and dt.year == YEAR_OF_INFECTION:
print("Last revision dates from virus day")
else:
print("Skipping {}, datastamp on file isn't from virus day")
continue
dt = dateutil.parser.parse(revisions['revisions'][-2]['modifiedTime'])
print("Date of second to last revision is: {}".format(dt))
request = service.revisions().get_media(fileId=bad_item['id'],
revisionId=revisions['revisions'][-2]['id'])
#Filenames are not unique in gdrive so append with file ID as well
new_filename = os.path.join('backup',
revisions['revisions'][-2]['originalFilename'] + '_' + bad_item['id'])
#If we are re-running script see if we already downloaded this file
if os.path.isfile(new_filename):
print("File {} already backed up, skipping".format(new_filename))
continue
fh = io.FileIO(new_filename, 'wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download {}".format(int(status.progress() * 100)) )
count = 0
for bad_item in bad_files:
count = count + 1
#Do in batches just to be kind of safe.
if count > 50:
break
file_id = bad_item['id']
revisions = service.revisions().list(fileId=file_id, fields='*').execute()
if len(revisions['revisions']) < 2:
print("File has only 1 revision, skipping: {}".format(bad_item))
continue
file_meta = service.files().get(fileId=file_id, fields='*').execute()
dt_last = dateutil.parser.parse(revisions['revisions'][-1]['modifiedTime'])
dt_2nd_last = dateutil.parser.parse(revisions['revisions'][-2]['modifiedTime'])
if dt_last.day == DAY_OF_INFECTION and dt_last.month == MONTH_OF_INFECTION and dt_last.year == YEAR_OF_INFECTION:
print("Last revision dates from virus day")
else:
print("Skipping {}, datestamp on file isn't from virus day")
continue
orig_file_name = file_meta['originalFilename']
target_rev_name = revisions['revisions'][-2]['originalFilename']
#If the 2nd to last revision is also osiris, we can't simply revert
if 'osiris' in target_rev_name:
print("2nd to last rev filename has osiris in the name, skipping: ({})".format(target_rev_name))
#print out some debug info so we can figure out what we have multipe revisions with osiris
pp.pprint(file_meta)
print(' ')
pp.pprint(revisions)
continue
print("{}: {} revisions found".format(target_rev_name, len(revisions['revisions'])) )
#THESE ARE THE REALLY DANGEROUS STEPS, ONLY UNCOMMMENT IF YOU KNOW WHAT YOU ARE DOING!!!
rev_id_to_delete = revisions['revisions'][-1]['id']
print("service.revisions().delete(fileId={}, revisionId={}).execute()".format(file_id, rev_id_to_delete))
#del_rev = service.revisions().delete(fileId=file_id, revisionId=rev_id_to_delete).execute()
update_body = { 'name': target_rev_name }
print("service.files().update(fileId={}, body={}).execute()".format(file_id, update_body))
#update_name = service.files().update(fileId=file_id, body=update_body).execute()
if __name__ == '__main__':
main()
|
"""
This script computes bounds on the privacy cost of training the
student model from noisy aggregation of labels predicted by teachers.
It should be used only after training the student (and therefore the
teachers as well). We however include the label files required to
reproduce key results from our paper (https://arxiv.org/abs/1610.05755):
the epsilon bounds for MNIST and SVHN students.
The command that computes the epsilon bound associated
with the training of the MNIST student model (100 label queries
with a (1/20)*2=0.1 epsilon bound each) is:
python analysis.py
--counts_file=mnist_250_teachers_labels.npy
--indices_file=mnist_250_teachers_100_indices_used_by_student.npy
The command that computes the epsilon bound associated
with the training of the SVHN student model (1000 label queries
with a (1/20)*2=0.1 epsilon bound each) is:
python analysis.py
--counts_file=svhn_250_teachers_labels.npy
--max_examples=1000
--delta=1e-6
"""
import os
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
from differential_privacy.multiple_teachers.input import maybe_download
tf.flags.DEFINE_integer("moments",8, "Number of moments")
tf.flags.DEFINE_float("noise_eps", 0.1, "Eps value for each call to noisymax.")
tf.flags.DEFINE_float("delta", 1e-5, "Target value of delta.")
tf.flags.DEFINE_float("beta", 0.09, "Value of beta for smooth sensitivity")
tf.flags.DEFINE_string("counts_file","","Numpy matrix with raw counts")
tf.flags.DEFINE_string("indices_file","",
"File containting a numpy matrix with indices used."
"Optional. Use the first max_examples indices if this is not provided.")
tf.flags.DEFINE_integer("max_examples",1000,
"Number of examples to use. We will use the first"
" max_examples many examples from the counts_file"
" or indices_file to do the privacy cost estimate")
tf.flags.DEFINE_float("too_small", 1e-10, "Small threshold to avoid log of 0")
tf.flags.DEFINE_bool("input_is_counts", False, "False if labels, True if counts")
FLAGS = tf.flags.FLAGS
def compute_q_noisy_max(counts, noise_eps):
"""returns ~ Pr[outcome != winner].
Args:
counts: a list of scores
noise_eps: privacy parameter for noisy_max
Returns:
q: the probability that outcome is different from true winner.
"""
# For noisy max, we only get an upper bound.
# Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
# proof at http://mathoverflow.net/questions/66763/
# tight-bounds-on-probability-of-sum-of-laplace-random-variables
winner = np.argmax(counts)
counts_normalized = noise_eps * (counts - counts[winner])
counts_rest = np.array(
[counts_normalized[i] for i in xrange(len(counts)) if i != winner])
q = 0.0
for c in counts_rest:
gap = -c
q += (gap + 2.0) / (4.0 * math.exp(gap))
return min(q, 1.0 - (1.0/len(counts)))
def compute_q_noisy_max_approx(counts, noise_eps):
"""returns ~ Pr[outcome != winner].
Args:
counts: a list of scores
noise_eps: privacy parameter for noisy_max
Returns:
q: the probability that outcome is different from true winner.
"""
# For noisy max, we only get an upper bound.
# Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
# proof at http://mathoverflow.net/questions/66763/
# tight-bounds-on-probability-of-sum-of-laplace-random-variables
# This code uses an approximation that is faster and easier
# to get local sensitivity bound on.
winner = np.argmax(counts)
counts_normalized = noise_eps * (counts - counts[winner])
counts_rest = np.array(
[counts_normalized[i] for i in xrange(len(counts)) if i != winner])
gap = -max(counts_rest)
q = (len(counts) - 1) * (gap + 2.0) / (4.0 * math.exp(gap))
return min(q, 1.0 - (1.0/len(counts)))
def logmgf_exact(q, priv_eps, l):
"""Computes the logmgf value given q and privacy eps.
The bound used is the min of three terms. The first term is from
https://arxiv.org/pdf/1605.02065.pdf.
The second term is based on the fact that when event has probability (1-q) for
q close to zero, q can only change by exp(eps), which corresponds to a
much smaller multiplicative change in (1-q)
The third term comes directly from the privacy guarantee.
Args:
q: pr of non-optimal outcome
priv_eps: eps parameter for DP
l: moment to compute.
Returns:
Upper bound on logmgf
"""
if q < 0.5:
t_one = (1-q) * math.pow((1-q) / (1 - math.exp(priv_eps) * q), l)
t_two = q * math.exp(priv_eps * l)
t = t_one + t_two
try:
log_t = math.log(t)
except ValueError:
print("Got ValueError in math.log for values :" + str((q, priv_eps, l, t)))
log_t = priv_eps * l
else:
log_t = priv_eps * l
return min(0.5 * priv_eps * priv_eps * l * (l + 1), log_t, priv_eps * l)
def logmgf_from_counts(counts, noise_eps, l):
"""
ReportNoisyMax mechanism with noise_eps with 2*noise_eps-DP
in our setting where one count can go up by one and another
can go down by 1.
"""
q = compute_q_noisy_max(counts, noise_eps)
return logmgf_exact(q, 2.0 * noise_eps, l)
def sens_at_k(counts, noise_eps, l, k):
"""Return sensitivity at distane k.
Args:
counts: an array of scores
noise_eps: noise parameter used
l: moment whose sensitivity is being computed
k: distance
Returns:
sensitivity: at distance k
"""
counts_sorted = sorted(counts, reverse=True)
if 0.5 * noise_eps * l > 1:
print("l too large to compute sensitivity")
return 0
# Now we can assume that at k, gap remains positive
# or we have reached the point where logmgf_exact is
# determined by the first term and ind of q.
if counts[0] < counts[1] + k:
return 0
counts_sorted[0] -= k
counts_sorted[1] += k
val = logmgf_from_counts(counts_sorted, noise_eps, l)
counts_sorted[0] -= 1
counts_sorted[1] += 1
val_changed = logmgf_from_counts(counts_sorted, noise_eps, l)
return val_changed - val
def smoothed_sens(counts, noise_eps, l, beta):
"""Compute beta-smooth sensitivity.
Args:
counts: array of scors
noise_eps: noise parameter
l: moment of interest
beta: smoothness parameter
Returns:
smooth_sensitivity: a beta smooth upper bound
"""
k = 0
smoothed_sensitivity = sens_at_k(counts, noise_eps, l, k)
while k < max(counts):
k += 1
sensitivity_at_k = sens_at_k(counts, noise_eps, l, k)
smoothed_sensitivity = max(
smoothed_sensitivity,
math.exp(-beta * k) * sensitivity_at_k)
if sensitivity_at_k == 0.0:
break
return smoothed_sensitivity
def main(unused_argv):
##################################################################
# If we are reproducing results from paper https://arxiv.org/abs/1610.05755,
# download the required binaries with label information.
##################################################################
# Binaries for MNIST results
paper_binaries_mnist = \
["https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_labels.npy?raw=true",
"https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_100_indices_used_by_student.npy?raw=true"]
if FLAGS.counts_file == "mnist_250_teachers_labels.npy" \
or FLAGS.indices_file == "mnist_250_teachers_100_indices_used_by_student.npy":
maybe_download(paper_binaries_mnist, os.getcwd())
# Binaries for SVHN results
paper_binaries_svhn = ["https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/svhn_250_teachers_labels.npy?raw=true"]
if FLAGS.counts_file == "svhn_250_teachers_labels.npy":
maybe_download(paper_binaries_svhn, os.getcwd())
input_mat = np.load(FLAGS.counts_file)
if FLAGS.input_is_counts:
counts_mat = input_mat
else:
# In this case, the input is the raw predictions. Transform
num_teachers, n = input_mat.shape
counts_mat = np.zeros((n, 10)).astype(np.int32)
for i in range(n):
for j in range(num_teachers):
counts_mat[i, int(input_mat[j, i])] += 1
n = counts_mat.shape[0]
num_examples = min(n, FLAGS.max_examples)
if not FLAGS.indices_file:
indices = np.array(range(num_examples))
else:
index_list = np.load(FLAGS.indices_file)
indices = index_list[:num_examples]
l_list = 1.0 + np.array(xrange(FLAGS.moments))
beta = FLAGS.beta
total_log_mgf_nm = np.array([0.0 for _ in l_list])
total_ss_nm = np.array([0.0 for _ in l_list])
noise_eps = FLAGS.noise_eps
for i in indices:
total_log_mgf_nm += np.array(
[logmgf_from_counts(counts_mat[i], noise_eps, l)
for l in l_list])
total_ss_nm += np.array(
[smoothed_sens(counts_mat[i], noise_eps, l, beta)
for l in l_list])
delta = FLAGS.delta
# We want delta = exp(alpha - eps l).
# Solving gives eps = (alpha - ln (delta))/l
eps_list_nm = (total_log_mgf_nm - math.log(delta)) / l_list
print("Epsilons (Noisy Max): " + str(eps_list_nm))
print("Smoothed sensitivities (Noisy Max): " + str(total_ss_nm / l_list))
# If beta < eps / 2 ln (1/delta), then adding noise Lap(1) * 2 SS/eps
# is eps,delta DP
# Also if beta < eps / 2(gamma +1), then adding noise 2(gamma+1) SS eta / eps
# where eta has density proportional to 1 / (1+|z|^gamma) is eps-DP
# Both from Corolloary 2.4 in
# http://www.cse.psu.edu/~ads22/pubs/NRS07/NRS07-full-draft-v1.pdf
# Print the first one's scale
ss_eps = 2.0 * beta * math.log(1/delta)
ss_scale = 2.0 / ss_eps
print("To get an " + str(ss_eps) + "-DP estimate of epsilon, ")
print("..add noise ~ " + str(ss_scale))
print("... times " + str(total_ss_nm / l_list))
print("Epsilon = " + str(min(eps_list_nm)) + ".")
if min(eps_list_nm) == eps_list_nm[-1]:
print("Warning: May not have used enough values of l")
# Data independent bound, as mechanism is
# 2*noise_eps DP.
data_ind_log_mgf = np.array([0.0 for _ in l_list])
data_ind_log_mgf += num_examples * np.array(
[logmgf_exact(1.0, 2.0 * noise_eps, l) for l in l_list])
data_ind_eps_list = (data_ind_log_mgf - math.log(delta)) / l_list
print("Data independent bound = " + str(min(data_ind_eps_list)) + ".")
return
if __name__ == "__main__":
tf.app.run()
|
from nova import flags
from nova import log as logging
from nova import utils
from nova.network import linux_net
from nova.openstack.common import cfg
from ryu.app.client import OFPClient
LOG = logging.getLogger(__name__)
ryu_linux_net_opt = cfg.StrOpt('linuxnet_ovs_ryu_api_host',
default='127.0.0.1:8080',
help='Openflow Ryu REST API host:port')
FLAGS = flags.FLAGS
FLAGS.register_opt(ryu_linux_net_opt)
def _get_datapath_id(bridge_name):
out, _err = utils.execute('ovs-vsctl', 'get', 'Bridge',
bridge_name, 'datapath_id', run_as_root=True)
return out.strip().strip('"')
def _get_port_no(dev):
out, _err = utils.execute('ovs-vsctl', 'get', 'Interface', dev,
'ofport', run_as_root=True)
return int(out.strip())
class LinuxOVSRyuInterfaceDriver(linux_net.LinuxOVSInterfaceDriver):
def __init__(self):
super(LinuxOVSRyuInterfaceDriver, self).__init__()
LOG.debug('ryu rest host %s', FLAGS.linuxnet_ovs_ryu_api_host)
self.ryu_client = OFPClient(FLAGS.linuxnet_ovs_ryu_api_host)
self.datapath_id = _get_datapath_id(
FLAGS.linuxnet_ovs_integration_bridge)
if linux_net.binary_name == 'nova-network':
for tables in [linux_net.iptables_manager.ipv4,
linux_net.iptables_manager.ipv6]:
tables['filter'].add_rule('FORWARD',
'--in-interface gw-+ --out-interface gw-+ -j DROP')
linux_net.iptables_manager.apply()
def plug(self, network, mac_address, gateway=True):
LOG.debug("network %s mac_adress %s gateway %s",
network, mac_address, gateway)
ret = super(LinuxOVSRyuInterfaceDriver, self).plug(
network, mac_address, gateway)
port_no = _get_port_no(self.get_dev(network))
self.ryu_client.create_port(network['uuid'], self.datapath_id, port_no)
return ret
|
from random import choice
from suds import *
from ws import *
import time
def load_kam(client, kam_name):
'''
Loads a KAM by name. This function will sleep until the KAM's
loadStatus is 'COMPLETE'.
'''
def call():
'''
Load the KAM and return result. Exit with error if 'loadStatus'
is FAILED.
'''
kam = client.create('Kam')
kam.name = kam_name
result = client.service.LoadKam(kam)
status = result['loadStatus']
if status == 'FAILED':
print 'FAILED!'
print sys.exc_info()[1]
exit_failure()
return result
# load kam and wait for completion
result = call()
while result['loadStatus'] != 'COMPLETE':
time.sleep(0.5)
result = call()
return result['handle']
if __name__ == '__main__':
from sys import argv, exit, stderr
if len(argv) != 3:
msg = 'usage: find-kam-node.py <kam name> <source_bel_term>\n'
stderr.write(msg)
exit(1)
# unpack command-line arguments; except the first script name argument
(kam_name, source_term) = argv[1:]
client = WS('http://localhost:8080/openbel-ws/belframework.wsdl')
handle = load_kam(client, kam_name)
print "loaded kam '%s', handle '%s'" % (kam_name, handle.handle)
# create nodes using BEL term labels from command-line
node = client.create("Node")
node.label = source_term
# resolve node
result = client.service.ResolveNodes(handle, [node], None)
if len(result) == 1 and result[0]:
the_node = result[0]
print "found node, id: %s" % (the_node.id)
terms = client.service.GetSupportingTerms(the_node, None)
for t in terms:
print t
else:
print "edge not found"
exit_success()
|
"""
Weather component that handles meteorological data for your location.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/weather/
"""
import asyncio
import logging
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.temperature import display_temp as show_temp
from homeassistant.const import PRECISION_WHOLE, PRECISION_TENTHS, TEMP_CELSIUS
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = []
DOMAIN = 'weather'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ATTR_CONDITION_CLASS = 'condition_class'
ATTR_FORECAST = 'forecast'
ATTR_FORECAST_CONDITION = 'condition'
ATTR_FORECAST_PRECIPITATION = 'precipitation'
ATTR_FORECAST_TEMP = 'temperature'
ATTR_FORECAST_TEMP_LOW = 'templow'
ATTR_FORECAST_TIME = 'datetime'
ATTR_WEATHER_ATTRIBUTION = 'attribution'
ATTR_WEATHER_HUMIDITY = 'humidity'
ATTR_WEATHER_OZONE = 'ozone'
ATTR_WEATHER_PRESSURE = 'pressure'
ATTR_WEATHER_TEMPERATURE = 'temperature'
ATTR_WEATHER_VISIBILITY = 'visibility'
ATTR_WEATHER_WIND_BEARING = 'wind_bearing'
ATTR_WEATHER_WIND_SPEED = 'wind_speed'
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the weather component."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup(config)
return True
class WeatherEntity(Entity):
"""ABC for weather data."""
@property
def temperature(self):
"""Return the platform temperature."""
raise NotImplementedError()
@property
def temperature_unit(self):
"""Return the unit of measurement."""
raise NotImplementedError()
@property
def pressure(self):
"""Return the pressure."""
return None
@property
def humidity(self):
"""Return the humidity."""
raise NotImplementedError()
@property
def wind_speed(self):
"""Return the wind speed."""
return None
@property
def wind_bearing(self):
"""Return the wind bearing."""
return None
@property
def ozone(self):
"""Return the ozone level."""
return None
@property
def attribution(self):
"""Return the attribution."""
return None
@property
def visibility(self):
"""Return the visibility."""
return None
@property
def forecast(self):
"""Return the forecast."""
return None
@property
def precision(self):
"""Return the forecast."""
return PRECISION_TENTHS if self.temperature_unit == TEMP_CELSIUS \
else PRECISION_WHOLE
@property
def state_attributes(self):
"""Return the state attributes."""
data = {
ATTR_WEATHER_TEMPERATURE: show_temp(
self.hass, self.temperature, self.temperature_unit,
self.precision),
}
humidity = self.humidity
if humidity is not None:
data[ATTR_WEATHER_HUMIDITY] = round(humidity)
ozone = self.ozone
if ozone is not None:
data[ATTR_WEATHER_OZONE] = ozone
pressure = self.pressure
if pressure is not None:
data[ATTR_WEATHER_PRESSURE] = pressure
wind_bearing = self.wind_bearing
if wind_bearing is not None:
data[ATTR_WEATHER_WIND_BEARING] = wind_bearing
wind_speed = self.wind_speed
if wind_speed is not None:
data[ATTR_WEATHER_WIND_SPEED] = wind_speed
visibility = self.visibility
if visibility is not None:
data[ATTR_WEATHER_VISIBILITY] = visibility
attribution = self.attribution
if attribution is not None:
data[ATTR_WEATHER_ATTRIBUTION] = attribution
if self.forecast is not None:
forecast = []
for forecast_entry in self.forecast:
forecast_entry = dict(forecast_entry)
forecast_entry[ATTR_FORECAST_TEMP] = show_temp(
self.hass, forecast_entry[ATTR_FORECAST_TEMP],
self.temperature_unit, self.precision)
if ATTR_FORECAST_TEMP_LOW in forecast_entry:
forecast_entry[ATTR_FORECAST_TEMP_LOW] = show_temp(
self.hass, forecast_entry[ATTR_FORECAST_TEMP_LOW],
self.temperature_unit, self.precision)
forecast.append(forecast_entry)
data[ATTR_FORECAST] = forecast
return data
@property
def state(self):
"""Return the current state."""
return self.condition
@property
def condition(self):
"""Return the current condition."""
raise NotImplementedError()
|
import argparse
import parsl
from parsl.app.app import python_app
from parsl.tests.configs.local_threads import config
@python_app(cache=True)
def random_uuid(x, cache=True):
import uuid
return str(uuid.uuid4())
def test_python_memoization(n=2):
"""Testing python memoization disable
"""
x = random_uuid(0)
print(x.result())
for i in range(0, n):
foo = random_uuid(0)
print(foo.result())
assert foo.result() == x.result(), "Memoized results were not used"
if __name__ == '__main__':
parsl.clear()
parsl.load(config)
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="10",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.debug:
parsl.set_stream_logger()
x = test_python_memoization(n=4)
|
import argparse
import logging
import os
import sys
from typing import List, Union
import numpy as np
from ludwig.api import LudwigModel
from ludwig.backend import ALL_BACKENDS, LOCAL, Backend
from ludwig.constants import FULL, TEST, TRAINING, VALIDATION
from ludwig.contrib import contrib_command
from ludwig.globals import LUDWIG_VERSION
from ludwig.utils.print_utils import (logging_level_registry, print_boxed,
print_ludwig)
from ludwig.utils.strings_utils import make_safe_filename
logger = logging.getLogger(__name__)
def collect_activations(
model_path: str,
layers: List[str],
dataset: str,
data_format: str = None,
split: str = FULL,
batch_size: int = 128,
output_directory: str = 'results',
gpus: List[str] = None,
gpu_memory_limit: int =None,
allow_parallel_threads: bool = True,
backend: Union[Backend, str] = None,
debug: bool = False,
**kwargs
) -> List[str]:
"""
Uses the pretrained model to collect the tensors corresponding to a
datapoint in the dataset. Saves the tensors to the experiment directory
# Inputs
:param model_path: (str) filepath to pre-trained model.
:param layers: (List[str]) list of strings for layer names in the model
to collect activations.
:param dataset: (str) source
containing the data to make predictions.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'excel'`, `'feather'`,
`'fwf'`, `'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
`'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
`'stata'`, `'tsv'`.
:param split: (str, default: `full`) split on which
to perform predictions. Valid values are `'training'`, `'validation'`,
`'test'` and `'full'`.
:param batch_size: (int, default `128`) size of batches for processing.
:param output_directory: (str, default: `'results'`) the directory that
will contain the training statistics, TensorBoard logs, the saved
model and the training progress files.
:param gpus: (list, default: `None`) list of GPUs that are available
for training.
:param gpu_memory_limit: (int, default: `None`) maximum memory in MB to
allocate per GPU device.
:param allow_parallel_threads: (bool, default: `True`) allow TensorFlow
to use multithreading parallelism to improve performance at
the cost of determinism.
:param backend: (Union[Backend, str]) `Backend` or string name
of backend to use to execute preprocessing / training steps.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (List[str]) list of filepath to `*.npy` files containing
the activations.
"""
logger.info('Dataset path: {}'.format(dataset)
)
logger.info('Model path: {}'.format(model_path))
logger.info('Output path: {}'.format(output_directory))
logger.info('\n')
model = LudwigModel.load(
model_path,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
backend=backend
)
# collect activations
print_boxed('COLLECT ACTIVATIONS')
collected_tensors = model.collect_activations(
layers,
dataset,
data_format=data_format,
split=split,
batch_size=batch_size,
debug=debug
)
# saving
os.makedirs(output_directory, exist_ok=True)
saved_filenames = save_tensors(collected_tensors, output_directory)
logger.info('Saved to: {0}'.format(output_directory))
return saved_filenames
def collect_weights(
model_path: str,
tensors: List[str],
output_directory: str = 'results',
debug: bool = False,
**kwargs
) -> List[str]:
"""
Loads a pretrained model and collects weights.
# Inputs
:param model_path: (str) filepath to pre-trained model.
:param tensors: (list, default: `None`) List of tensor names to collect
weights
:param output_directory: (str, default: `'results'`) the directory where
collected weights will be stored.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (List[str]) list of filepath to `*.npy` files containing
the weights.
"""
logger.info('Model path: {}'.format(model_path))
logger.info('Output path: {}'.format(output_directory))
logger.info('\n')
model = LudwigModel.load(model_path)
# collect weights
print_boxed('COLLECT WEIGHTS')
collected_tensors = model.collect_weights(tensors)
# saving
os.makedirs(output_directory, exist_ok=True)
saved_filenames = save_tensors(collected_tensors, output_directory)
logger.info('Saved to: {0}'.format(output_directory))
return saved_filenames
def save_tensors(collected_tensors, output_directory):
filenames = []
for tensor_name, tensor_value in collected_tensors:
np_filename = os.path.join(
output_directory,
make_safe_filename(tensor_name) + '.npy'
)
np.save(np_filename, tensor_value.numpy())
filenames.append(np_filename)
return filenames
def print_model_summary(
model_path: str,
**kwargs
) -> None:
"""
Loads a pretrained model and prints names of weights and layers activations.
# Inputs
:param model_path: (str) filepath to pre-trained model.
# Return
:return: (`None`)
"""
model = LudwigModel.load(model_path)
collected_tensors = model.collect_weights()
names = [name for name, w in collected_tensors]
keras_model = model.model.get_connected_model(training=False)
keras_model.summary()
print('\nLayers:\n')
for layer in keras_model.layers:
print(layer.name)
print('\nWeights:\n')
for name in names:
print(name)
def cli_collect_activations(sys_argv):
"""Command Line Interface to communicate with the collection of tensors and
there are several options that can specified when calling this function:
--data_csv: Filepath for the input csv
--data_hdf5: Filepath for the input hdf5 file, if there is a csv file, this
is not read
--d: Refers to the dataset type of the file being read, by default is
*generic*
--s: Refers to the split of the data, can be one of: train, test,
validation, full
--m: Input model that is necessary to collect to the tensors, this is a
required *option*
--t: Tensors to collect
--od: Output directory of the model, defaults to results
--bs: Batch size
--g: Number of gpus that are to be used
--gf: Fraction of each GPUs memory to use.
--dbg: Debug if the model is to be started with python debugger
--v: Verbose: Defines the logging level that the user will be exposed to
"""
parser = argparse.ArgumentParser(
description='This script loads a pretrained model and uses it collect '
'tensors for each datapoint in the dataset.',
prog='ludwig collect_activations',
usage='%(prog)s [options]')
# ---------------
# Data parameters
# ---------------
parser.add_argument(
'--dataset',
help='input data file path',
required=True
)
parser.add_argument(
'--data_format',
help='format of the input data',
default='auto',
choices=['auto', 'csv', 'excel', 'feather', 'fwf', 'hdf5',
'html' 'tables', 'json', 'jsonl', 'parquet', 'pickle', 'sas',
'spss', 'stata', 'tsv']
)
parser.add_argument(
'-s',
'--split',
default=FULL,
choices=[TRAINING, VALIDATION, TEST, FULL],
help='the split to obtain the model activations from'
)
# ----------------
# Model parameters
# ----------------
parser.add_argument(
'-m',
'--model_path',
help='model to load',
required=True
)
parser.add_argument(
'-lyr',
'--layers',
help='tensors to collect',
nargs='+',
required=True
)
# -------------------------
# Output results parameters
# -------------------------
parser.add_argument(
'-od',
'--output_directory',
type=str,
default='results',
help='directory that contains the results'
)
# ------------------
# Generic parameters
# ------------------
parser.add_argument(
'-bs',
'--batch_size',
type=int,
default=128,
help='size of batches'
)
# ------------------
# Runtime parameters
# ------------------
parser.add_argument(
'-g',
'--gpus',
type=int,
default=0,
help='list of gpu to use'
)
parser.add_argument(
'-gml',
'--gpu_memory_limit',
type=int,
default=None,
help='maximum memory in MB to allocate per GPU device'
)
parser.add_argument(
'-dpt',
'--disable_parallel_threads',
action='store_false',
dest='allow_parallel_threads',
help='disable TensorFlow from using multithreading for reproducibility'
)
parser.add_argument(
"-b",
"--backend",
help='specifies backend to use for parallel / distributed execution, '
'defaults to local execution or Horovod if called using horovodrun',
choices=ALL_BACKENDS,
)
parser.add_argument(
'-dbg',
'--debug',
action='store_true',
default=False,
help='enables debugging mode'
)
parser.add_argument(
'-l',
'--logging_level',
default='info',
help='the level of logging to use',
choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']
)
args = parser.parse_args(sys_argv)
args.logging_level = logging_level_registry[args.logging_level]
logging.getLogger('ludwig').setLevel(
args.logging_level
)
global logger
logger = logging.getLogger('ludwig.collect')
print_ludwig('Collect Activations', LUDWIG_VERSION)
collect_activations(**vars(args))
def cli_collect_weights(sys_argv):
"""Command Line Interface to collecting the weights for the model
--m: Input model that is necessary to collect to the tensors, this is a
required *option*
--t: Tensors to collect
--od: Output directory of the model, defaults to results
--dbg: Debug if the model is to be started with python debugger
--v: Verbose: Defines the logging level that the user will be exposed to
"""
parser = argparse.ArgumentParser(
description='This script loads a pretrained model '
'and uses it collect weights.',
prog='ludwig collect_weights',
usage='%(prog)s [options]'
)
# ----------------
# Model parameters
# ----------------
parser.add_argument(
'-m',
'--model_path',
help='model to load',
required=True
)
parser.add_argument(
'-t',
'--tensors',
help='tensors to collect',
nargs='+',
required=True
)
# -------------------------
# Output results parameters
# -------------------------
parser.add_argument(
'-od',
'--output_directory',
type=str,
default='results',
help='directory that contains the results'
)
# ------------------
# Runtime parameters
# ------------------
parser.add_argument(
'-dbg',
'--debug',
action='store_true',
default=False,
help='enables debugging mode'
)
parser.add_argument(
'-l',
'--logging_level',
default='info',
help='the level of logging to use',
choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']
)
args = parser.parse_args(sys_argv)
args.logging_level = logging_level_registry[args.logging_level]
logging.getLogger('ludwig').setLevel(
args.logging_level
)
global logger
logger = logging.getLogger('ludwig.collect')
print_ludwig('Collect Weights', LUDWIG_VERSION)
collect_weights(**vars(args))
def cli_collect_summary(sys_argv):
"""Command Line Interface to collecting a summary of the model layers and weights.
--m: Input model that is necessary to collect to the tensors, this is a
required *option*
--v: Verbose: Defines the logging level that the user will be exposed to
"""
parser = argparse.ArgumentParser(
description='This script loads a pretrained model '
'and prints names of weights and layers activations '
'to use with other collect commands',
prog='ludwig collect_summary',
usage='%(prog)s [options]'
)
# ----------------
# Model parameters
# ----------------
parser.add_argument(
'-m',
'--model_path',
help='model to load',
required=True
)
# ------------------
# Runtime parameters
# ------------------
parser.add_argument(
'-l',
'--logging_level',
default='info',
help='the level of logging to use',
choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']
)
args = parser.parse_args(sys_argv)
args.logging_level = logging_level_registry[args.logging_level]
logging.getLogger('ludwig').setLevel(
args.logging_level
)
global logger
logger = logging.getLogger('ludwig.collect')
print_ludwig('Collect Summary', LUDWIG_VERSION)
print_model_summary(**vars(args))
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == 'activations':
contrib_command("collect_activations", *sys.argv)
cli_collect_activations(sys.argv[2:])
elif sys.argv[1] == 'weights':
contrib_command("collect_weights", *sys.argv)
cli_collect_weights(sys.argv[2:])
elif sys.argv[1] == 'names':
contrib_command("collect_summary", *sys.argv)
cli_collect_summary(sys.argv[2:])
else:
print('Unrecognized command')
else:
print('Unrecognized command')
|
"""Formatter for Android contacts2.db database events."""
from plaso.lib import eventdata
class AndroidCallFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for Android call history events."""
DATA_TYPE = 'android:event:call'
FORMAT_STRING_PIECES = [
u'{call_type}',
u'Number: {number}',
u'Name: {name}',
u'Duration: {duration} seconds']
FORMAT_STRING_SHORT_PIECES = [u'{call_type} Call']
SOURCE_LONG = 'Android Call History'
SOURCE_SHORT = 'LOG'
|
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
__all__ = [
'assert_same_float_dtype',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'with_shape',
'with_same_shape']
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be `dtypes.float32` or `dtypes.float64`. If neither `tensors` nor
`dtype` is supplied, default to `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected float, got %s.' % dtype)
return dtype
def assert_scalar_int(tensor):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: Tensor to test.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of type `tf.int32` or `tf.int64`.
"""
data_type = tensor.dtype
if data_type.base_dtype not in [dtypes.int32, dtypes.int64]:
raise ValueError('Unexpected type %s for %s.' % (data_type, tensor.name))
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Unexpected shape %s for %s.' % (shape, tensor.name))
return tensor
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
tensors = [math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
with ops.name_scope(name, 'reduce_sum_n', tensors) as scope:
return math_ops.add_n(tensors, name=scope)
def _all_equal(tensor0, tensor1):
with ops.name_scope('all_equal', values=[tensor0, tensor1]) as scope:
return math_ops.reduce_all(
math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
Tuple of (actual_tensor, label_tensor), possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def is_tensor(x):
"""Check for tensor types.
Check whether an object is a tensor. Equivalent to
`isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])`.
Args:
x: An python object to check.
Returns:
`True` if `x` is a tensor, `False` if not.
"""
tensor_types = (ops.Tensor, ops.SparseTensor, variables.Variable)
return isinstance(x, tensor_types)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, ops.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if not actual_shape.is_fully_defined() or is_tensor(expected_shape):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if not is_tensor(expected_shape) and (len(expected_shape) < 1):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def convert_to_tensor_or_sparse_tensor(
value, dtype=None, name=None, as_ref=False):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the result as a ref tensor. Only used if a new
`Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, ops.SparseTensorValue):
value = ops.SparseTensor.from_value(value)
if isinstance(value, ops.SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError(
'Sparse dtype: requested = %s, actual = %s' % (
dtype.name, value.dtype.name))
return value
return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
|
import os
import unittest
import synapse
import synapse.lib.datfile as s_datfile
from synapse.tests.common import *
syndir = os.path.dirname(synapse.__file__)
class DatFileTest(SynTest):
def test_datfile_basic(self):
with s_datfile.openDatFile('synapse.tests/test.dat') as fd:
self.nn(fd)
self.eq(fd.read(), b'woot\n')
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from working_waterfronts.working_waterfronts_api.models import Video
from django.contrib.auth.models import User
class EditVideoTestCase(TestCase):
"""
Test that the Edit Video page works as expected.
Things tested:
URLs reverse correctly
The outputted page has the correct form fields
POSTing "correct" data will result in the update of the video
object with the specified ID
"""
fixtures = ['test_fixtures']
def setUp(self):
user = User.objects.create_user(
'temporary', 'temporary@gmail.com', 'temporary')
user.save()
response = self.client.login(
username='temporary', password='temporary')
self.assertEqual(response, True)
def test_not_logged_in(self):
self.client.logout()
response = self.client.get(
reverse('edit-video', kwargs={'id': '1'}))
self.assertRedirects(response, '/login?next=/entry/videos/1')
def test_url_endpoint(self):
url = reverse('edit-video', kwargs={'id': '1'})
self.assertEqual(url, '/entry/videos/1')
def test_successful_video_update(self):
"""
POST a proper "update video" command to the server, and see if
the update appears in the database
"""
# Data that we'll post to the server to get the new video created
new_video = {
'caption': "A thrilling display of utmost might",
'name': "You won't believe number 3!",
'video': 'http://www.youtube.com/watch?v=dQw4w9WgXcQ'}
self.client.post(
reverse('edit-video', kwargs={'id': '1'}),
new_video)
video = Video.objects.get(id=1)
for field in new_video:
self.assertEqual(
getattr(video, field), new_video[field])
def test_form_fields(self):
"""
Tests to see if the form contains all of the right fields
"""
response = self.client.get(
reverse('edit-video', kwargs={'id': '1'}))
fields = {
'name': 'A Starship',
'caption': "Traveling at the speed of light!",
'video': 'http://www.youtube.com/watch?v=efgDdSWDg0g'
}
form = response.context['video_form']
for field in fields:
self.assertEqual(fields[field], form[field].value())
def test_delete_video(self):
"""
Tests that DELETing entry/videos/<id> deletes the item
"""
response = self.client.delete(
reverse('edit-video', kwargs={'id': '2'}))
self.assertEqual(response.status_code, 200)
with self.assertRaises(Video.DoesNotExist):
Video.objects.get(id=2)
response = self.client.delete(
reverse('edit-video', kwargs={'id': '2'}))
self.assertEqual(response.status_code, 404)
|
from textwrap import dedent
import pytest
import salt.modules.pdbedit as pdbedit
from tests.support.mock import MagicMock, patch
@pytest.fixture(autouse=True)
def setup_loader(request):
setup_loader_modules = {pdbedit: {}}
with pytest.helpers.loader_mock(request, setup_loader_modules) as loader_mock:
yield loader_mock
@pytest.mark.parametrize("verbose", [True, False])
def test_when_no_users_returned_no_data_should_be_returned(verbose):
expected_users = {} if verbose else []
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": "", "stderr": "", "retcode": 0}
)
},
):
actual_users = pdbedit.list_users(verbose=verbose)
assert actual_users == expected_users
def test_when_verbose_and_retcode_is_nonzero_output_should_be_had():
expected_stderr = "this is something fnord"
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": "", "stderr": expected_stderr, "retcode": 1}
)
},
), patch("salt.modules.pdbedit.log.error", autospec=True) as fake_error_log:
pdbedit.list_users(verbose=True)
actual_error = fake_error_log.mock_calls[0].args[0]
assert actual_error == expected_stderr
def test_when_verbose_and_single_good_output_expected_data_should_be_parsed():
expected_data = {
"roscivs": {
"unix username": "roscivs",
"nt username": "bottia",
"full name": "Roscivs Bottia",
"user sid": "42",
"primary group sid": "99",
"home directory": r"\\samba\roscivs",
"account desc": "separators! xxx so long and thanks for all the fish",
"logoff time": "Sat, 14 Aug 2010 15:06:39 UTC",
"kickoff time": "Sat, 14 Aug 2010 15:06:39 UTC",
"password must change": "never",
}
}
pdb_output = dedent(
r"""
Unix username: roscivs
NT username: bottia
User SID: 42
Primary Group SID: 99
Full Name: Roscivs Bottia
Home Directory: \\samba\roscivs
Account desc: separators! xxx so long and thanks for all the fish
Logoff time: Sat, 14 Aug 2010 15:06:39 UTC
Kickoff time: Sat, 14 Aug 2010 15:06:39 UTC
Password must change: never
"""
).strip()
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": pdb_output, "stderr": "", "retcode": 0}
)
},
):
actual_data = pdbedit.list_users(verbose=True)
assert actual_data == expected_data
def test_when_verbose_and_multiple_records_present_data_should_be_correctly_parsed():
expected_data = {
"roscivs": {
"unix username": "roscivs",
"nt username": "bottia",
"user sid": "42",
},
"srilyk": {
"unix username": "srilyk",
"nt username": "srilyk",
"account desc": "trololollol",
"user sid": "99",
},
"jewlz": {
"unix username": "jewlz",
"nt username": "flutterbies",
"user sid": "4",
},
}
pdb_output = dedent(
"""
-------------
Unix username: roscivs
NT username: bottia
User SID: 42
-------------
Unix username: srilyk
NT username: srilyk
User SID: 99
Account desc: trololol\x1dlol
-------------
Unix username: jewlz
NT username: flutterbies
User SID: 4
-------------
-------------
-------------
"""
).strip()
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": pdb_output, "stderr": "", "retcode": 0}
)
},
):
actual_data = pdbedit.list_users(verbose=True)
assert actual_data == expected_data
|
from nose.tools import *
from leela.client.sensors.linux import disk_usage
def test_disk_usage_sensor_is_stateless():
sensor = disk_usage.DiskUsage()
ok_([] != sensor.measure())
def test_disk_usage_sensor_produces_core_metrics():
sensor = disk_usage.DiskUsage()
events = [e.name() for e in sensor.measure()]
ok_(reduce(lambda acc, e: acc or e.endswith(".total"), events, False))
ok_(reduce(lambda acc, e: acc or e.endswith(".used"), events, False))
ok_(reduce(lambda acc, e: acc or e.endswith(".free"), events, False))
|
from enum import Enum
from math import *
__version__ = "0.3.0"
LIBCELLML_VERSION = "0.2.0"
STATE_COUNT = 4
VARIABLE_COUNT = 18
class VariableType(Enum):
VARIABLE_OF_INTEGRATION = 1
STATE = 2
CONSTANT = 3
COMPUTED_CONSTANT = 4
ALGEBRAIC = 5
EXTERNAL = 6
VOI_INFO = {"name": "time", "units": "millisecond", "component": "environment", "type": VariableType.VARIABLE_OF_INTEGRATION}
STATE_INFO = [
{"name": "m", "units": "dimensionless", "component": "sodium_channel_m_gate", "type": VariableType.STATE},
{"name": "h", "units": "dimensionless", "component": "sodium_channel_h_gate", "type": VariableType.STATE},
{"name": "n", "units": "dimensionless", "component": "potassium_channel_n_gate", "type": VariableType.STATE},
{"name": "V", "units": "millivolt", "component": "membrane", "type": VariableType.STATE}
]
VARIABLE_INFO = [
{"name": "g_L", "units": "milliS_per_cm2", "component": "leakage_current", "type": VariableType.CONSTANT},
{"name": "Cm", "units": "microF_per_cm2", "component": "membrane", "type": VariableType.CONSTANT},
{"name": "E_R", "units": "millivolt", "component": "membrane", "type": VariableType.CONSTANT},
{"name": "g_K", "units": "milliS_per_cm2", "component": "potassium_channel", "type": VariableType.CONSTANT},
{"name": "g_Na", "units": "milliS_per_cm2", "component": "sodium_channel", "type": VariableType.CONSTANT},
{"name": "i_Stim", "units": "microA_per_cm2", "component": "membrane", "type": VariableType.ALGEBRAIC},
{"name": "E_L", "units": "millivolt", "component": "leakage_current", "type": VariableType.EXTERNAL},
{"name": "i_L", "units": "microA_per_cm2", "component": "leakage_current", "type": VariableType.ALGEBRAIC},
{"name": "E_Na", "units": "millivolt", "component": "sodium_channel", "type": VariableType.COMPUTED_CONSTANT},
{"name": "i_Na", "units": "microA_per_cm2", "component": "sodium_channel", "type": VariableType.ALGEBRAIC},
{"name": "alpha_m", "units": "per_millisecond", "component": "sodium_channel_m_gate", "type": VariableType.ALGEBRAIC},
{"name": "beta_m", "units": "per_millisecond", "component": "sodium_channel_m_gate", "type": VariableType.ALGEBRAIC},
{"name": "alpha_h", "units": "per_millisecond", "component": "sodium_channel_h_gate", "type": VariableType.ALGEBRAIC},
{"name": "beta_h", "units": "per_millisecond", "component": "sodium_channel_h_gate", "type": VariableType.ALGEBRAIC},
{"name": "E_K", "units": "millivolt", "component": "potassium_channel", "type": VariableType.COMPUTED_CONSTANT},
{"name": "i_K", "units": "microA_per_cm2", "component": "potassium_channel", "type": VariableType.ALGEBRAIC},
{"name": "alpha_n", "units": "per_millisecond", "component": "potassium_channel_n_gate", "type": VariableType.ALGEBRAIC},
{"name": "beta_n", "units": "per_millisecond", "component": "potassium_channel_n_gate", "type": VariableType.ALGEBRAIC}
]
def leq_func(x, y):
return 1.0 if x <= y else 0.0
def geq_func(x, y):
return 1.0 if x >= y else 0.0
def and_func(x, y):
return 1.0 if bool(x) & bool(y) else 0.0
def create_states_array():
return [nan]*STATE_COUNT
def create_variables_array():
return [nan]*VARIABLE_COUNT
def initialise_states_and_constants(states, variables):
variables[0] = 0.3
variables[1] = 1.0
variables[2] = 0.0
variables[3] = 36.0
variables[4] = 120.0
states[0] = 0.05
states[1] = 0.6
states[2] = 0.325
states[3] = 0.0
def compute_computed_constants(variables):
variables[8] = variables[2]-115.0
variables[14] = variables[2]+12.0
def compute_rates(voi, states, rates, variables, external_variable):
variables[10] = 0.1*(states[3]+25.0)/(exp((states[3]+25.0)/10.0)-1.0)
variables[11] = 4.0*exp(states[3]/18.0)
rates[0] = variables[10]*(1.0-states[0])-variables[11]*states[0]
variables[12] = 0.07*exp(states[3]/20.0)
variables[13] = 1.0/(exp((states[3]+30.0)/10.0)+1.0)
rates[1] = variables[12]*(1.0-states[1])-variables[13]*states[1]
variables[16] = 0.01*(states[3]+10.0)/(exp((states[3]+10.0)/10.0)-1.0)
variables[17] = 0.125*exp(states[3]/80.0)
rates[2] = variables[16]*(1.0-states[2])-variables[17]*states[2]
variables[5] = -20.0 if and_func(geq_func(voi, 10.0), leq_func(voi, 10.5)) else 0.0
variables[6] = external_variable(voi, states, rates, variables, 6)
variables[7] = variables[0]*(states[3]-variables[6])
variables[15] = variables[3]*pow(states[2], 4.0)*(states[3]-variables[14])
variables[9] = variables[4]*pow(states[0], 3.0)*states[1]*(states[3]-variables[8])
rates[3] = -(-variables[5]+variables[9]+variables[15]+variables[7])/variables[1]
def compute_variables(voi, states, rates, variables, external_variable):
variables[7] = variables[0]*(states[3]-variables[6])
variables[9] = variables[4]*pow(states[0], 3.0)*states[1]*(states[3]-variables[8])
variables[10] = 0.1*(states[3]+25.0)/(exp((states[3]+25.0)/10.0)-1.0)
variables[11] = 4.0*exp(states[3]/18.0)
variables[12] = 0.07*exp(states[3]/20.0)
variables[13] = 1.0/(exp((states[3]+30.0)/10.0)+1.0)
variables[15] = variables[3]*pow(states[2], 4.0)*(states[3]-variables[14])
variables[16] = 0.01*(states[3]+10.0)/(exp((states[3]+10.0)/10.0)-1.0)
variables[17] = 0.125*exp(states[3]/80.0)
|
from setuptools import setup
from setuptools.command.test import test
class TestHook(test):
def run_tests(self):
import nose
nose.main(argv=['nosetests', 'tests/', '-v', '--logging-clear-handlers'])
setup(
name='lxml-asserts',
version='0.1.2',
description='Handy functions for testing lxml etree objects for equality and compatibility',
url='https://github.com/SuminAndrew/lxml-asserts',
author='Andrew Sumin',
author_email='sumin.andrew@gmail.com',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Testing',
],
license="http://www.apache.org/licenses/LICENSE-2.0",
cmdclass={
'test': TestHook
},
packages=[
'lxml_asserts'
],
install_requires=[
'lxml',
],
test_suite='tests',
tests_require=[
'nose',
'pycodestyle == 2.3.1'
],
zip_safe=False
)
|
"""
Initialization script for restapi for the application.
"""
from flask import Blueprint
from app.common.logging import setup_logging
api = Blueprint('api', __name__)
from . import views, errors
|
from google.cloud import appengine_admin_v1
def sample_delete_instance():
# Create a client
client = appengine_admin_v1.InstancesClient()
# Initialize request argument(s)
request = appengine_admin_v1.DeleteInstanceRequest(
)
# Make the request
operation = client.delete_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
|
class HeapBuilder:
def __init__(self):
self._swaps = []
self._data = []
def ReadData(self):
n = int(input())
self._data = [int(s) for s in input().split()]
assert n == len(self._data)
def WriteResponse(self):
print(len(self._swaps))
for swap in self._swaps:
print(swap[0], swap[1])
def GenerateSwaps(self):
# The following naive implementation just sorts
# the given sequence using selection sort algorithm
# and saves the resulting sequence of swaps.
# This turns the given array into a heap,
# but in the worst case gives a quadratic number of swaps.
#
# TODO: replace by a more efficient implementation
for i in range(len(self._data)):
for j in range(i + 1, len(self._data)):
if self._data[i] > self._data[j]:
self._swaps.append((i, j))
self._data[i], self._data[j] = self._data[j], self._data[i]
def Solve(self):
self.ReadData()
self.GenerateSwaps()
self.WriteResponse()
if __name__ == '__main__':
heap_builder = HeapBuilder()
heap_builder.Solve()
|
"""Class that is responsible for building and assessing proposed.
bonding patterns.
"""
import operator
from typing import List, Optional
import numpy as np
from smu import dataset_pb2
from smu.parser import smu_utils_lib
class MatchingParameters:
"""A class to specify optional matching parameters for SmuMolecule.place_bonds."""
def __init__(self):
self._must_match_all_bonds: bool = True
self._smiles_with_h: bool = False
self._smiles_with_labels: bool = True
# A variant on matching is to consider all N and O as neutral forms during
# matching, and then as a post processing step, see whether a valid,
# neutral, molecule can be formed.
self._neutral_forms_during_bond_matching: bool = False
# If not a bond is being considered during matching.
self._consider_not_bonded = False
# Avoid destroying rings if not bonded is enabled.
# Note that only the ring atom count is considered.
self._ring_atom_count_cannot_decrease = True
@property
def must_match_all_bonds(self):
return self._must_match_all_bonds
@must_match_all_bonds.setter
def must_match_all_bonds(self, value):
self._must_match_all_bonds = value
@property
def smiles_with_h(self):
return self._smiles_with_h
@smiles_with_h.setter
def smiles_with_h(self, value):
self._smiles_with_h = value
@property
def smiles_with_labels(self):
return self._smiles_with_labels
@smiles_with_labels.setter
def smiles_with_labels(self, value):
self._smiles_with_labels = value
@property
def neutral_forms_during_bond_matching(self):
return self._neutral_forms_during_bond_matching
@neutral_forms_during_bond_matching.setter
def neutral_forms_during_bond_matching(self, value):
self._neutral_forms_during_bond_matching = value
@property
def consider_not_bonded(self):
return self._consider_not_bonded
@consider_not_bonded.setter
def consider_not_bonded(self, value):
self._consider_not_bonded = value
@property
def ring_atom_count_cannot_decrease(self):
return self._ring_atom_count_cannot_decrease
@ring_atom_count_cannot_decrease.setter
def ring_atom_count_cannot_decrease(self, value):
self._ring_atom_count_cannot_decrease = value
def add_bond(a1, a2, btype, destination):
"""Add a new Bond to `destination`.
Args:
a1: atom
a2: atom
btype: bond type.
destination:
"""
destination.bonds.append(
dataset_pb2.BondTopology.Bond(
atom_a=a1,
atom_b=a2,
bond_type=smu_utils_lib.INTEGER_TO_BOND_TYPE[btype]))
class SmuMolecule:
"""Holds information about partially built molecules."""
def __init__(self, hydrogens_attached, bonds_to_scores, matching_parameters):
"""Class to perform bonding assessments.
Args:
hydrogens_attached: a BondTopology that has all atoms, and the bonds
associated with the Hydrogen atoms.
bonds_to_scores: A dict that maps tuples of pairs of atoms, to a numpy
array of scores [0,3], for each possible bond type.
matching_parameters: contains possible optional behaviour modifiers.
Returns:
"""
self._starting_bond_topology = hydrogens_attached
self._natoms = len(hydrogens_attached.atoms)
self._heavy_atoms = sum(1 for atom in hydrogens_attached.atoms
if atom != dataset_pb2.BondTopology.ATOM_H)
self._contains_both_oxygen_and_nitrogen = False
# If the molecule contains both N and O atoms, then we can
# do more extensive atom type matching if requested.
if matching_parameters.neutral_forms_during_bond_matching:
self.set_contains_both_oxygen_and_nitrogen(hydrogens_attached)
# For each atom, the maximum number of bonds that can be attached.
self._max_bonds = np.zeros(self._natoms, dtype=np.int32)
if matching_parameters.neutral_forms_during_bond_matching and self._contains_both_oxygen_and_nitrogen:
for i in range(0, self._natoms):
self._max_bonds[i] = smu_utils_lib.ATOM_TYPE_TO_MAX_BONDS_ANY_FORM[
hydrogens_attached.atoms[i]]
else:
for i in range(0, self._natoms):
self._max_bonds[i] = smu_utils_lib.ATOM_TYPE_TO_MAX_BONDS[
hydrogens_attached.atoms[i]]
# With the Hydrogens attached, the number of bonds to each atom.
self._bonds_with_hydrogens_attached = np.zeros((self._natoms),
dtype=np.int32)
for bond in hydrogens_attached.bonds:
self._bonds_with_hydrogens_attached[bond.atom_a] += 1
self._bonds_with_hydrogens_attached[bond.atom_b] += 1
self._current_bonds_attached = np.zeros((self._natoms), dtype=np.int32)
# We turn bonds_to_scores into two arrays. So they can be iterated
# via itertools.
self._bonds = list(bonds_to_scores.keys())
self._scores = list(bonds_to_scores.values())
# Initialize for probability type accumulation
self._initial_score = 1.0
self._accumulate_score = operator.mul
# For testing, it can be convenient to allow for partial matches
# For example this allows matching C-C and C=C without the need
# to add explicit hydrogens
self._must_match_all_bonds = matching_parameters.must_match_all_bonds
def set_contains_both_oxygen_and_nitrogen(self, bt):
"""Examine `bt` and set self._contains_both_oxygen_and_nitrogen.
Args:
bt: BondTopology
"""
self._contains_both_oxygen_and_nitrogen = False
oxygen_count = 0
nitrogen_count = 0
for atom in bt.atoms:
if atom in [
dataset_pb2.BondTopology.ATOM_N, dataset_pb2.BondTopology.ATOM_NPOS
]:
nitrogen_count += 1
elif atom in [
dataset_pb2.BondTopology.ATOM_O, dataset_pb2.BondTopology.ATOM_ONEG
]:
oxygen_count += 1
if oxygen_count > 0 and nitrogen_count > 0:
self._contains_both_oxygen_and_nitrogen = True
def set_initial_score_and_incrementer(self, initial_score, op):
"""Update values used for computing scores."""
self._initial_score = initial_score
self._accumulate_score = op
def _initialize(self):
"""Make the molecule ready for adding bonds between heavy atoms."""
self._current_bonds_attached = np.copy(self._bonds_with_hydrogens_attached)
def _place_bond(self, a1, a2, btype):
"""Possibly add a new bond to the current config.
If the bond can be placed, updates self._current_bonds_attached for
both `a`` and `a2`.
Args:
a1:
a2:
btype:
Returns:
Bool.
"""
if self._current_bonds_attached[a1] + btype > self._max_bonds[a1]:
return False
if self._current_bonds_attached[a2] + btype > self._max_bonds[a2]:
return False
self._current_bonds_attached[a1] += btype
self._current_bonds_attached[a2] += btype
return True
def generate_search_state(self):
"""For each pair of atoms, return a list of plausible bond types.
This will be passed to itertools.product, which thereby enumerates all
possible bonding combinations.
Args:
Returns:
List of lists - one for each atom pair.
"""
result: List[List[int]] = []
for ndx in range(0, len(self._bonds)):
# For each pair of atoms, the plausible bond types - non zero score.
plausible_types: List[int] = []
for i, score in enumerate(self._scores[ndx]):
if score > 0.0:
plausible_types.append(i)
result.append(plausible_types)
return result
def place_bonds_inner(self, state):
"""Place bonds corresponding to `state`.
No validity checking is done, the calling function is responsible
for that.
Args:
state: for each pair of atoms, the kind of bond to be placed.
Returns:
If successful, a BondTopology.
"""
self._current_bonds_attached = np.copy(self._bonds_with_hydrogens_attached)
result = dataset_pb2.BondTopology()
result.CopyFrom(self._starting_bond_topology) # only Hydrogens attached.
result.score = self._initial_score
# Make sure each atoms gets at least one bond
atom_got_bond = np.zeros(self._heavy_atoms)
for i, btype in enumerate(state):
if btype != dataset_pb2.BondTopology.BOND_UNDEFINED:
a1 = self._bonds[i][0]
a2 = self._bonds[i][1]
if not self._place_bond(a1, a2, btype):
return None
add_bond(a1, a2, btype, result)
atom_got_bond[a1] = 1
atom_got_bond[a2] = 1
result.score = self._accumulate_score(result.score,
self._scores[i][btype])
if not np.all(atom_got_bond):
return None
return result
def place_bonds(
self, state, matching_parameters
):
"""Place bonds corresponding to `state`.
Args:
state: bonding pattern to be placed.
matching_parameters: optional settings
Returns:
If successful, a BondTopology
"""
bt = self.place_bonds_inner(state)
if not bt:
return None
if matching_parameters.neutral_forms_during_bond_matching and self._contains_both_oxygen_and_nitrogen:
if not self.assign_charged_atoms(bt):
return None
# all bonds matched has already been checked.
return bt
# Optionally check whether all bonds have been matched
if not self._must_match_all_bonds:
return bt
if not np.array_equal(self._current_bonds_attached, self._max_bonds):
return None
return bt
def assign_charged_atoms(self, bt):
"""Assign (N, N+) and (O, O-) possibilities in `bt`.
bt must contain both N and O atoms.
Note that we assume _must_match_all_bonds, and return None if that cannot
be achieved.
Args:
bt: BondTopology, bt.atoms are updated in place
Returns:
True if successful, False otherwise
"""
carbon = dataset_pb2.BondTopology.ATOM_C
hydrogen = dataset_pb2.BondTopology.ATOM_H
fluorine = dataset_pb2.BondTopology.ATOM_F
nitrogen = dataset_pb2.BondTopology.ATOM_N
npos = dataset_pb2.BondTopology.ATOM_NPOS
oxygen = dataset_pb2.BondTopology.ATOM_O
oneg = dataset_pb2.BondTopology.ATOM_ONEG
net_charge = 0
for i, atom in enumerate(bt.atoms):
if atom in [carbon, hydrogen, fluorine]:
if self._max_bonds[i] != self._current_bonds_attached[i]:
return False
elif atom in [nitrogen, npos]:
if self._current_bonds_attached[i] == 4:
bt.atoms[i] = npos
net_charge += 1
elif self._current_bonds_attached[i] == 3:
bt.atoms[i] = nitrogen
else:
return False
elif atom in [oxygen, oneg]:
if self._current_bonds_attached[i] == 2:
bt.atoms[i] = oxygen
elif self._current_bonds_attached[i] == 1:
bt.atoms[i] = oneg
net_charge -= 1
else: # not attached.
return False
if net_charge != 0:
return False
return True
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import sys, os
from os.path import abspath, dirname, join
from warnings import warn
GRAPHITE_WEB_APP_SETTINGS_LOADED = False
WEBAPP_VERSION = '0.10.0-alpha'
DEBUG = False
JAVASCRIPT_DEBUG = False
WEB_DIR = dirname( abspath(__file__) )
WEBAPP_DIR = dirname(WEB_DIR)
GRAPHITE_ROOT = dirname(WEBAPP_DIR)
CONTENT_DIR = ''
CSS_DIR = ''
CONF_DIR = ''
DASHBOARD_CONF = ''
GRAPHTEMPLATES_CONF = ''
STORAGE_DIR = ''
WHITELIST_FILE = ''
INDEX_FILE = ''
LOG_DIR = ''
CERES_DIR = ''
WHISPER_DIR = ''
RRD_DIR = ''
STANDARD_DIRS = []
CLUSTER_SERVERS = []
CLUSTER_SERVERS = []
REMOTE_FIND_TIMEOUT = 3.0
REMOTE_FETCH_TIMEOUT = 6.0
REMOTE_RETRY_DELAY = 60.0
REMOTE_READER_CACHE_SIZE_LIMIT = 1000
CARBON_METRIC_PREFIX='carbon'
CARBONLINK_HOSTS = ["127.0.0.1:7002"]
CARBONLINK_TIMEOUT = 1.0
CARBONLINK_HASHING_KEYFUNC = None
CARBONLINK_RETRY_DELAY = 15
REPLICATION_FACTOR = 1
MEMCACHE_HOSTS = []
MEMCACHE_KEY_PREFIX = ''
FIND_CACHE_DURATION = 300
FIND_TOLERANCE = 2 * FIND_CACHE_DURATION
DEFAULT_CACHE_DURATION = 60 #metric data and graphs are cached for one minute by default
LOG_CACHE_PERFORMANCE = False
LOG_ROTATE = True
MAX_FETCH_RETRIES = 2
REMOTE_RENDERING = False #if True, rendering is delegated to RENDERING_HOSTS
RENDERING_HOSTS = []
REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
LOG_RENDERING_PERFORMANCE = False
SMTP_SERVER = "localhost"
DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
ALLOW_ANONYMOUS_CLI = True
LOG_METRIC_ACCESS = False
LEGEND_MAX_ITEMS = 10
RRD_CF = 'AVERAGE'
STORAGE_FINDERS = (
'graphite.finders.standard.StandardFinder',
)
USE_LDAP_AUTH = False
LDAP_SERVER = "" # "ldapserver.mydomain.com"
LDAP_PORT = 389
LDAP_USE_TLS = False
LDAP_SEARCH_BASE = "" # "OU=users,DC=mydomain,DC=com"
LDAP_BASE_USER = "" # "CN=some_readonly_account,DC=mydomain,DC=com"
LDAP_BASE_PASS = "" # "my_password"
LDAP_USER_QUERY = "" # "(username=%s)" For Active Directory use "(sAMAccountName=%s)"
LDAP_URI = None
USE_REMOTE_USER_AUTHENTICATION = False
SECRET_KEY = 'UNSAFE_DEFAULT'
ALLOWED_HOSTS = [ '*' ]
LOGIN_URL = '/account/login'
DASHBOARD_REQUIRE_AUTHENTICATION = False
DASHBOARD_REQUIRE_PERMISSIONS = False
DASHBOARD_REQUIRE_EDIT_GROUP = None
DATABASES = {
'default': {
'NAME': '/opt/graphite/storage/graphite.db',
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
}
FLUSHRRDCACHED = ''
try:
from graphite.local_settings import *
except ImportError:
print >> sys.stderr, "Could not import graphite.local_settings, using defaults!"
if not GRAPHITE_WEB_APP_SETTINGS_LOADED:
from graphite.app_settings import *
if not CONTENT_DIR:
CONTENT_DIR = join(WEBAPP_DIR, 'content')
if not CSS_DIR:
CSS_DIR = join(CONTENT_DIR, 'css')
if not CONF_DIR:
CONF_DIR = os.environ.get('GRAPHITE_CONF_DIR', join(GRAPHITE_ROOT, 'conf'))
if not DASHBOARD_CONF:
DASHBOARD_CONF = join(CONF_DIR, 'dashboard.conf')
if not GRAPHTEMPLATES_CONF:
GRAPHTEMPLATES_CONF = join(CONF_DIR, 'graphTemplates.conf')
if not STORAGE_DIR:
STORAGE_DIR = os.environ.get('GRAPHITE_STORAGE_DIR', join(GRAPHITE_ROOT, 'storage'))
if not WHITELIST_FILE:
WHITELIST_FILE = join(STORAGE_DIR, 'lists', 'whitelist')
if not INDEX_FILE:
INDEX_FILE = join(STORAGE_DIR, 'index')
if not LOG_DIR:
LOG_DIR = join(STORAGE_DIR, 'log', 'webapp')
if not WHISPER_DIR:
WHISPER_DIR = join(STORAGE_DIR, 'whisper/')
if not CERES_DIR:
CERES_DIR = join(STORAGE_DIR, 'ceres/')
if not RRD_DIR:
RRD_DIR = join(STORAGE_DIR, 'rrd/')
if not STANDARD_DIRS:
try:
import whisper
if os.path.exists(WHISPER_DIR):
STANDARD_DIRS.append(WHISPER_DIR)
except ImportError:
print >> sys.stderr, "WARNING: whisper module could not be loaded, whisper support disabled"
try:
import rrdtool
if os.path.exists(RRD_DIR):
STANDARD_DIRS.append(RRD_DIR)
except ImportError:
pass
if 'sqlite3' in DATABASES.get('default',{}).get('ENGINE','') \
and not DATABASES.get('default',{}).get('NAME'):
DATABASES['default']['NAME'] = join(STORAGE_DIR, 'graphite.db')
if MEMCACHE_HOSTS:
CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': MEMCACHE_HOSTS,
'TIMEOUT': DEFAULT_CACHE_DURATION,
'KEY_PREFIX': MEMCACHE_KEY_PREFIX,
}
if USE_LDAP_AUTH and LDAP_URI is None:
LDAP_URI = "ldap://%s:%d/" % (LDAP_SERVER, LDAP_PORT)
if USE_REMOTE_USER_AUTHENTICATION:
MIDDLEWARE_CLASSES += ('django.contrib.auth.middleware.RemoteUserMiddleware',)
AUTHENTICATION_BACKENDS.insert(0,'django.contrib.auth.backends.RemoteUserBackend')
if USE_LDAP_AUTH:
AUTHENTICATION_BACKENDS.insert(0,'graphite.account.ldapBackend.LDAPBackend')
if SECRET_KEY == 'UNSAFE_DEFAULT':
warn('SECRET_KEY is set to an unsafe default. This should be set in local_settings.py for better security')
|
import time
def current_millis():
return int(round(time.time() * 1000))
|
import mock
import openstack.common.context
from openstack.common.middleware import context
from openstack.common import test
class ContextMiddlewareTest(test.BaseTestCase):
def test_process_request(self):
req = mock.Mock()
app = mock.Mock()
options = mock.MagicMock()
ctx = mock.sentinel.context
with mock.patch.object(context.ContextMiddleware,
'make_context',
mock.Mock(return_value=ctx)):
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.process_request(req)
self.assertEqual(req.context, ctx)
def test_make_context(self):
app = mock.Mock()
options = mock.MagicMock()
with mock.patch.object(openstack.common.context.RequestContext,
'__init__',
mock.Mock(return_value=None)) as init:
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.make_context(mock.sentinel.arg)
init.assert_called_with(mock.sentinel.arg)
def test_make_explicit_context(self):
app = mock.Mock()
import_class = mock.Mock()
options = {'context_class': mock.sentinel.context_class}
with mock.patch('openstack.common.importutils.import_class',
mock.Mock(return_value=import_class)):
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.make_context(mock.sentinel.arg)
import_class.assert_called_with(mock.sentinel.arg)
class FilterFactoryTest(test.BaseTestCase):
def test_filter_factory(self):
global_conf = dict(sentinel=mock.sentinel.global_conf)
app = mock.sentinel.app
target = 'openstack.common.middleware.context.ContextMiddleware'
def check_ctx_middleware(arg_app, arg_conf):
self.assertEqual(app, arg_app)
self.assertEqual(global_conf['sentinel'], arg_conf['sentinel'])
return mock.DEFAULT
with mock.patch(target,
mock.Mock(return_value=mock.sentinel.ctx)) as mid:
mid.side_effect = check_ctx_middleware
filter = context.filter_factory(global_conf)
self.assertEqual(filter(app), mock.sentinel.ctx)
|
import unittest
import config_test
from backupcmd.commands import backupCommands
class BackupCommandsTestCase(unittest.TestCase):
"""Test commands passed to main script"""
def test_hyphen_r_option(self):
print 'Pending BackupCommandsTestCase'
self.assertEqual(1,1)
|
"""
Simulate DSR over a network of nodes.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-10-26 21:51:40 -0500 (Wed, 26 Oct 2011) $
* $LastChangedRevision: 5314 $
:author: Ketan Mandke <kmandke@mail.utexas.edu>
:copyright:
Copyright 2009-2011 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
from SimPy.Simulation import *
from scapy.all import *
from wins import *
from wins.ieee80211 import *
from copy import copy, deepcopy
from wins.backend import RNG_init
from wins.backend import *
from wins.mac import RBAR, ARF
from wins.net import DSR
from wins.traffic import Agent
import sys
from optparse import OptionParser
import numpy as np
import struct
import gc
import time
RNG_INIT = 1
EXIT_WITH_TRACE = 1
class Node(Element):
name = "node"
tracename = "NODE"
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
def configure(self, pos=None, # motion \
useshared=False, # arp \
cfocorrection=True, # phy \
usecsma=False, # mac \
rreqrate=None, datarate=None, # net \
dest=None, plen=None, delay=None, mode=None, # agent \
**kwargs):
cif = self.newchild('cif', Dot11NRadio)
phy = self.newchild('phy', Dot11NPHY, radio=cif, cfocorrection=cfocorrection)
mac = self.newchild('mac', DCF, usecsma=usecsma, phy=phy)
net = self.newchild('net', DSR, rreqrate=rreqrate, datarate=datarate)
arp = self.newchild('arp', ARP, useshared=useshared)
agt = self.newchild('agent', Agent, dest=dest, plen=plen, \
delay=delay, mode=mode)
mobi = self.newchild('motion', Motion, pos=pos)
# connect ports
agt.connect(net)
arp.connect(net, mac)
mac.connect(phy)
phy.connect(cif)
def read_topo(options, topofile):
"""Read topology layout from file."""
f = file(topofile, 'r')
s = f.readline()
topo = {'border':None, 'layout':None}
done = not (s)
while not done:
# convert s to dict (check for border and layout)
try:
d = eval(s)
assert isinstance(d, dict)
assert ('border' in d) and ('layout' in d)
except:
d = None
# add dict to topo
if d: topo.update(d)
# get next input
s = f.readline()
done = not(s)
f.close()
return topo
def read_route(options, routefile):
"""Read routing tables from file."""
f = file(routefile, 'r')
s = f.readline()
routedata = {}
done = not (s)
while not done:
# convert s to dict
try:
d = eval(s)
assert isinstance(d, dict)
for x,y in d.items():
# maps src x -> routing table y
assert isinstance(y, dict)
for a,b in y.items():
# maps dst a -> info b (for route table y)
assert ('index' in b)
assert ('list' in b)
except:
d = None
# add dict to routedata
if d: routedata.update(d)
# get next input
s = f.readline()
done = not(s)
f.close()
return routedata
def get_topology(options, numnodes):
"""Get/create topology."""
# load topology from file
if options.usetopo:
topofile = options.usetopo
topo = read_topo(options, topofile)
border = topo['border']
layout = topo['layout']
xmin, xmax, ymin, ymax = border[:4]
assert (len(layout)>=numnodes)
return topo
# create new topology
assert (options.xmin<=options.xmax)
assert (options.ymin<=options.ymax)
xmin, xmax = options.xmin, options.xmax
ymin, ymax = options.ymin, options.ymax
border = (xmin, xmax, ymin, ymax)
# use uniform distribution for layout
xpos = np.random.uniform(xmin, xmax, numnodes)
ypos = np.random.uniform(ymin, ymax, numnodes)
layout = [(xpos[k],ypos[k]) for k in range(numnodes)]
# verify layout parameters
assert (len(layout)>=numnodes)
topo = {'border':border, 'layout':layout}
return topo
def set_routing(options, nodelist):
"""Set routing tables if needed."""
if not options.useroute: return
routefile = options.useroute
rdata = read_route(options, routefile)
for n in nodelist:
addr = n.net.address
if addr not in rdata: continue
for dst, data in rdata[addr].items():
paths = data['list']
for c,ts,nh in paths:
n.net.addroute(dst, nexthop=nh, cost=c)
return rdata
def run_experiment(options):
# record start time
starttime = time.time()
# initialize RNG
if RNG_INIT: RNG_init()
# set SIMULATION parameters
mon = Element(tracename="MON")
verbose = options.verbose
stoptime = 2.0
if not (options.stop<0): stoptime = options.stop
stoptime *= 1.05 # allow events around stoptime to finish
simargs = {'verbose':verbose}
# set EXPERIMENT parameters
ntx, nrx = 1, 1
numnodes = options.numnodes
nconnect = options.nconnect
assert (nconnect>0)
assert (numnodes>=2*nconnect)
# set CHANNEL parameters
alpha = options.alpha
modeltype = options.tgnmodel # default -> LOS Channel
usedoppler = options.usedoppler
usefading = options.usefading
envspeed = options.envspeed
chargs = {'modeltype':modeltype, 'n':alpha, \
'usedoppler':usedoppler, 'usefading':usefading, \
'environmentspeed': envspeed}
chargs.update(simargs)
# set AGENT parameters
mode = options.agent_mode
plen = Agent.DefaultPacketLength
rate = options.rate # transmission rate in packets/second
delay = None
if mode is None: mode = "cbr"
if options.plen>0: plen = options.plen
if (rate>0): delay = 1.0/rate
# set agent delay if not already specified
if delay is None:
cm = Dot11NChannel(**chargs)
chan = Dot11N_Channel(cm.modelnum, nrx, ntx, cm.flags)
delay = 2*chan.coherencetime()
if rate is None: rate = 1.0/delay
agtargs = {'plen': plen, 'mode':mode, 'delay':delay}
# set DSR parameters
rreqrate, datarate = None, None
if 0<=options.rreqrate<8*ntx: rreqrate=options.rreqrate
if 0<=options.datarate<8*ntx: datarate=options.datarate
netargs = {'rreqrate':rreqrate, 'datarate':datarate}
# set other protocol parameters (MAC, ARP, etc.)
useshared = True
arpargs = {'useshared':useshared}
usecsma = False
macargs = {'usecsma':usecsma}
# set phy parameters
Dot11NPHY.usewaveform = options.usewaveform
Dot11NRadio.Ntx, Dot11NRadio.Nrx = ntx, nrx
Dot11NRadio.fomax = options.fomax
cfocorrection = True
if options.disable_cfo_correction: cfocorrection = False
phyargs = {'cfocorrection':cfocorrection}
# set node parameters
nodeargs = {}
nodeargs.update(agtargs)
nodeargs.update(netargs)
nodeargs.update(arpargs)
nodeargs.update(macargs)
nodeargs.update(phyargs)
nodeargs.update(simargs)
############################
# Set Up Simulation
############################
initialize()
# create channel
bidirectional = options.bidirectional
ch = Channel(model=Dot11NChannel, bidirectional=bidirectional, **simargs)
# get topology
topo = get_topology(options, numnodes)
border = topo['border']
layout = topo['layout']
# create nodes
nodelist = []
for k in range(numnodes):
pos = layout[k]
n = Node(pos=pos, **nodeargs)
nodelist.append(n)
n.motion.log("pos", pos=["%.3f"%(p) for p in n.motion.position] )
# connect source/destination pairs
assert (nconnect<len(nodelist))
for k in range(nconnect):
src = nodelist[k] # first N are sources
dst = nodelist[-k-1] # last N are destinations
src.agent.dest = dst.net.address
# set routing tables
set_routing(options, nodelist)
# connect all nodes via channel
for n in nodelist:
for m in nodelist:
if (n is not m):
ch.add_edge(n.cif, m.cif, **chargs)
# create monitor
if options.monitor:
mon = Monitor(period=stoptime/1e4)
mon.start()
############################
# Run Simulation
############################
if options.usetopo:
mon.log("topo", topofile=options.usetopo)
mon.log("model", **chargs)
mon.log("rate", rate="%.5g"%(rate) )
simerror = None
if EXIT_WITH_TRACE:
try:
simulate(until=stoptime)
except Exception, e:
mon.log("SIMERR", error=str(e))
simerror = e
else:
simulate(until=stoptime)
# log remaining trace information
mon.log("stoptime", stoptime="%.6f"%(stoptime))
n = gc.collect()
mon.log("GC", collected=n)
totaltime = time.time() - starttime
t = time.gmtime(totaltime)
mon.log("runtime", runtime="%02d:%02d:%02d (h/m/s)"%(t.tm_hour, t.tm_min, t.tm_sec) )
############################
# Teardown/Cleanup
############################
# print output
sys.stdout.flush()
if options.trace: ch.trace.output()
# write tracefile
if options.output is not None: ch.trace.write(options.output)
# write topofile
if options.savetopo:
f = file(options.savetopo, 'w')
f.write("%s\n"%(topo) )
f.close()
# write routefile
if options.saveroute:
# write data
f = file(options.saveroute, 'w')
for n in nodelist:
addr = n.net.address
rdata = {addr: n.net.table.data.copy()}
f.write("%s\n"%(rdata))
f.close()
# if Exception occurred during simulation ...
if simerror: raise simerror
def main():
usage = "%prog [OPTIONS]"
parser = OptionParser(usage=usage)
# simulation parameters
parser.add_option("-v", "--verbose", dest="verbose", type="int", \
default=ROUTING_VERBOSE+1, help="Set verbose level [default=%default].")
parser.add_option("-t", "--trace", dest="trace", action="store_true", \
default=False, help="Output formatted trace to stdout")
parser.add_option("-o", "--output", dest="output", \
default=None, help="Name of output file for trace")
parser.add_option("-s", "--stop", dest="stop", \
type="float", default=2.0, \
help="Run simulation until stop time [default=%default]")
parser.add_option("-m", "--monitor", dest="monitor", action="store_true", \
default=False, help="Enable simulation montior")
# experiment parameters
parser.add_option("-n", "--num-nodes", dest="numnodes", type="int", \
default=50, help="Set number of nodes [default=%default]")
parser.add_option("-c", "--num-connections", dest="nconnect", type="int", \
default=1, help="Set number of active connections [default=%default]")
# agent parameters
parser.add_option("-r", "--rate", dest="rate", type="float", \
default=None, help="Packets/second generated by a source [default=%default]")
parser.add_option("-l", "--packet-length", dest="plen", type="int", \
default=1024, help="Set packet size in bytes [default=%default]")
parser.add_option("", "--agent-mode", dest="agent_mode", \
default=None, help="Specify traffic mode [options=%s]."%(Agent.TrafficModes))
# net parameters
parser.add_option("", "--rreqrate", dest="rreqrate", type="int", \
default=None, help="Set rate index for RREQ in DSR [default=%default]")
parser.add_option("", "--datarate", dest="datarate", type="int", \
default=None, help="Set rate index for non-RREQ packets in DSR [default=%default]")
# mac parameters
# phy parameters
parser.add_option("", "--mcs", dest="mcs", type="int", \
default=0, help="Set rate index for MCS [default=%default]")
parser.add_option("", "--fomax", dest="fomax", \
type="float", default=0.0, \
help="Specify maximum frequency offset in ppm [default=%default]")
parser.add_option("", "--use-waveform", dest="usewaveform", action="store_true", \
default=False, help="Enable waveform-level simulation [default=%default]")
parser.add_option("", "--disable-cfo-correction", \
dest="disable_cfo_correction", action="store_true", \
default=False, help="Disable CFO correction in waveform-level simulation [default=%default]")
# channel parameters
parser.add_option("", "--tgn-model", dest="tgnmodel", \
default=None, help="Specify TGn model.")
parser.add_option("", "--alpha", dest="alpha", type="float", \
default=2.0, help="Specify pathloss exponent [default=%default]")
parser.add_option("", "--use-doppler", dest="usedoppler", action="store_true", \
default=False, help="Enable doppler filter for fading in TGn channel model.")
parser.add_option("", "--disable-fading", dest="usefading", action="store_false", \
default=True, help="Normalize channel and remove impact of fading on pathloss in TGn channel model.")
parser.add_option("-E", "--environment-speed", dest="envspeed", type="float", \
default=1.2, help="Environmental speed in (km/hr) [default=%default]")
parser.add_option("", "--bidirectional-channel", dest="bidirectional", action="store_true", \
default=False, help="Use bidirectional links in channel [default=%default]")
# topology/layout parameters
parser.add_option("", "--xmin", dest="xmin", type="float", \
default=0.0, help="Set x-axis left boundary [default=%default]")
parser.add_option("", "--xmax", dest="xmax", type="float", \
default=500.0, help="Set x-axis right boundary [default=%default]")
parser.add_option("", "--ymin", dest="ymin", type="float", \
default=0.0, help="Set y-axis lower boundary [default=%default]")
parser.add_option("", "--ymax", dest="ymax", type="float", \
default=500.0, help="Set y-axis upper boundary [default=%default]")
parser.add_option("", "--use-topo", dest="usetopo", \
default=None, help="Specify topology file instead of generating random topology.")
parser.add_option("", "--save-topo", dest="savetopo", \
default=None, help="Save topology to file.")
# routing parameters
parser.add_option("", "--use-route", dest="useroute", \
default=None, help="Specify routing file to initialize route tables.")
parser.add_option("", "--save-route", dest="saveroute", \
default=None, help="Save route tables to file.")
(options, args) = parser.parse_args()
if len(args)>0:
print "Invalid number of arguments."
parser.print_help()
raise SystemExit
run_experiment(options)
if __name__ == '__main__':
main()
|
class Customer:
def __init__(self, firstname, lastname, country, address, postcode, city, email, phone, password):
self.firstname = firstname
self.lastname = lastname
self.country = country
self.address = address
self.postcode = postcode
self.city= city
self.email = email
self.phone = phone
self.password = password
def __repr__(self):
return "%s:%s:%s" % (self.email, self.firstname, self.lastname)
|
import copy
import datetime
import json
import logging
import subprocess
import sys
import warnings
from email.mime.text import MIMEText
from email.utils import formatdate
from smtplib import SMTP
from smtplib import SMTP_SSL
from smtplib import SMTPAuthenticationError
from smtplib import SMTPException
from socket import error
import boto.sns as sns
import requests
import stomp
from exotel import Exotel
from jira.client import JIRA
from jira.exceptions import JIRAError
from requests.exceptions import RequestException
from staticconf.loader import yaml_loader
from texttable import Texttable
from twilio import TwilioRestException
from twilio.rest import TwilioRestClient
from util import EAException
from util import elastalert_logger
from util import lookup_es_key
from util import pretty_ts
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class BasicMatchString(object):
""" Creates a string containing fields in match for the given rule. """
def __init__(self, rule, match):
self.rule = rule
self.match = match
def _ensure_new_line(self):
while self.text[-2:] != '\n\n':
self.text += '\n'
def _add_custom_alert_text(self):
missing = '<MISSING VALUE>'
alert_text = unicode(self.rule.get('alert_text', ''))
if 'alert_text_args' in self.rule:
alert_text_args = self.rule.get('alert_text_args')
alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i in xrange(len(alert_text_values)):
if alert_text_values[i] is None:
alert_value = self.rule.get(alert_text_args[i])
if alert_value:
alert_text_values[i] = alert_value
alert_text_values = [missing if val is None else val for val in alert_text_values]
alert_text = alert_text.format(*alert_text_values)
elif 'alert_text_kw' in self.rule:
kw = {}
for name, kw_name in self.rule.get('alert_text_kw').items():
val = lookup_es_key(self.match, name)
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
if val is None:
val = self.rule.get(name)
kw[kw_name] = missing if val is None else val
alert_text = alert_text.format(**kw)
self.text += alert_text
def _add_rule_text(self):
self.text += self.rule['type'].get_match_str(self.match)
def _add_top_counts(self):
for key, counts in self.match.items():
if key.startswith('top_events_'):
self.text += '%s:\n' % (key[11:])
top_events = counts.items()
if not top_events:
self.text += 'No events found.\n'
else:
top_events.sort(key=lambda x: x[1], reverse=True)
for term, count in top_events:
self.text += '%s: %s\n' % (term, count)
self.text += '\n'
def _add_match_items(self):
match_items = self.match.items()
match_items.sort(key=lambda x: x[0])
for key, value in match_items:
if key.startswith('top_events_'):
continue
value_str = unicode(value)
if type(value) in [list, dict]:
try:
value_str = self._pretty_print_as_json(value)
except TypeError:
# Non serializable object, fallback to str
pass
self.text += '%s: %s\n' % (key, value_str)
def _pretty_print_as_json(self, blob):
try:
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False)
except UnicodeDecodeError:
# This blob contains non-unicode, so lets pretend it's Latin-1 to show something
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False)
def __str__(self):
self.text = ''
if 'alert_text' not in self.rule:
self.text += self.rule['name'] + '\n\n'
self._add_custom_alert_text()
self._ensure_new_line()
if self.rule.get('alert_text_type') != 'alert_text_only':
self._add_rule_text()
self._ensure_new_line()
if self.rule.get('top_count_keys'):
self._add_top_counts()
if self.rule.get('alert_text_type') != 'exclude_fields':
self._add_match_items()
return self.text
class JiraFormattedMatchString(BasicMatchString):
def _add_match_items(self):
match_items = dict([(x, y) for x, y in self.match.items() if not x.startswith('top_events_')])
json_blob = self._pretty_print_as_json(match_items)
preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob)
self.text += preformatted_text
class Alerter(object):
""" Base class for types of alerts.
:param rule: The rule configuration.
"""
required_options = frozenset([])
def __init__(self, rule):
elastalert_logger.info("Starting up method:---alerts.__init__---")
self.rule = rule
# pipeline object is created by ElastAlerter.send_alert()
# and attached to each alerters used by a rule before calling alert()
self.pipeline = None
self.resolve_rule_references(self.rule)
def resolve_rule_references(self, root):
# Support referencing other top-level rule properties to avoid redundant copy/paste
if type(root) == list:
# Make a copy since we may be modifying the contents of the structure we're walking
for i, item in enumerate(copy.copy(root)):
if type(item) == dict or type(item) == list:
self.resolve_rule_references(root[i])
else:
root[i] = self.resolve_rule_reference(item)
elif type(root) == dict:
# Make a copy since we may be modifying the contents of the structure we're walking
for key, value in root.copy().iteritems():
if type(value) == dict or type(value) == list:
self.resolve_rule_references(root[key])
else:
root[key] = self.resolve_rule_reference(value)
def resolve_rule_reference(self, value):
strValue = unicode(value)
if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule:
if type(value) == int:
return int(self.rule[strValue[1:-1]])
else:
return self.rule[strValue[1:-1]]
else:
return value
def alert(self, match):
""" Send an alert. Match is a dictionary of information about the alert.
:param match: A dictionary of relevant information to the alert.
"""
raise NotImplementedError()
def get_info(self):
""" Returns a dictionary of data related to this alert. At minimum, this should contain
a field type corresponding to the type of Alerter. """
return {'type': 'Unknown'}
def create_title(self, matches):
""" Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary.
:param matches: A list of dictionaries of relevant information to the alert.
"""
if 'alert_subject' in self.rule:
return self.create_custom_title(matches)
return self.create_default_title(matches)
def create_custom_title(self, matches):
alert_subject = unicode(self.rule['alert_subject'])
if 'alert_subject_args' in self.rule:
alert_subject_args = self.rule['alert_subject_args']
alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i in xrange(len(alert_subject_values)):
if alert_subject_values[i] is None:
alert_value = self.rule.get(alert_subject_args[i])
if alert_value:
alert_subject_values[i] = alert_value
alert_subject_values = ['<MISSING VALUE>' if val is None else val for val in alert_subject_values]
return alert_subject.format(*alert_subject_values)
return alert_subject
def create_alert_body(self, matches):
body = self.get_aggregation_summary_text(matches)
for match in matches:
body += unicode(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = ''
if 'aggregation' in self.rule and 'summary_table_fields' in self.rule:
summary_table_fields = self.rule['summary_table_fields']
if not isinstance(summary_table_fields, list):
summary_table_fields = [summary_table_fields]
# Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered
summary_table_fields_with_count = summary_table_fields + ['count']
text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format(summary_table_fields_with_count)
text_table = Texttable()
text_table.header(summary_table_fields_with_count)
match_aggregation = {}
# Maintain an aggregate count for each unique key encountered in the aggregation period
for match in matches:
key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields])
if key_tuple not in match_aggregation:
match_aggregation[key_tuple] = 1
else:
match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1
for keys, count in match_aggregation.iteritems():
text_table.add_row([key for key in keys] + [count])
text += text_table.draw() + '\n\n'
return unicode(text)
def create_default_title(self, matches):
return self.rule['name']
def get_account(self, account_file):
""" Gets the username and password from an account file.
:param account_file: Name of the file which contains user and password information.
"""
account_conf = yaml_loader(account_file)
if 'user' not in account_conf or 'password' not in account_conf:
raise EAException('Account file must have user and password fields')
self.user = account_conf['user']
self.password = account_conf['password']
class StompAlerter(Alerter):
""" The stomp alerter publishes alerts via stomp to a broker. """
required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password'])
def alert(self, matches):
alerts = []
qk = self.rule.get('query_key', None)
fullmessage = {}
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append('1)Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
fullmessage['match'] = match[qk]
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'2)Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))
)
fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field'])
elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))
fullmessage['alerts'] = alerts
fullmessage['rule'] = self.rule['name']
fullmessage['matching'] = unicode(BasicMatchString(self.rule, match))
fullmessage['alertDate'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
fullmessage['body'] = self.create_alert_body(matches)
self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost')
self.stomp_hostport = self.rule.get('stomp_hostport', '61613')
self.stomp_login = self.rule.get('stomp_login', 'admin')
self.stomp_password = self.rule.get('stomp_password', 'admin')
self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT')
conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)])
conn.start()
conn.connect(self.stomp_login, self.stomp_password)
conn.send(self.stomp_destination, json.dumps(fullmessage))
conn.disconnect()
def get_info(self):
return {'type': 'stomp'}
class DebugAlerter(Alerter):
""" The debug alerter uses a Python logger (by default, alerting to terminal). """
def alert(self, matches):
qk = self.rule.get('query_key', None)
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))
def get_info(self):
return {'type': 'debug'}
class EmailAlerter(Alerter):
""" Sends an email alert """
required_options = frozenset(['email'])
def __init__(self, *args):
super(EmailAlerter, self).__init__(*args)
self.smtp_host = self.rule.get('smtp_host', 'localhost')
self.smtp_ssl = self.rule.get('smtp_ssl', False)
self.from_addr = self.rule.get('from_addr', 'ElastAlert')
self.smtp_port = self.rule.get('smtp_port')
self.user = self.rule.get('user')
self.password = self.rule.get('password')
# Convert email to a list if it isn't already
if isinstance(self.rule['email'], basestring):
self.rule['email'] = [self.rule['email']]
# If there is a cc then also convert it a list if it isn't
cc = self.rule.get('cc')
if cc and isinstance(cc, basestring):
self.rule['cc'] = [self.rule['cc']]
# If there is a bcc then also convert it to a list if it isn't
bcc = self.rule.get('bcc')
if bcc and isinstance(bcc, basestring):
self.rule['bcc'] = [self.rule['bcc']]
add_suffix = self.rule.get('email_add_domain')
if add_suffix and not add_suffix.startswith('@'):
self.rule['email_add_domain'] = '@' + add_suffix
def alert(self, matches):
body = self.create_alert_body(matches)
# Add JIRA ticket if it exists
if self.pipeline is not None and 'jira_ticket' in self.pipeline:
url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket'])
body += '\nJIRA ticket: %s' % (url)
to_addr = self.rule['email']
if 'email_from_field' in self.rule:
recipient = lookup_es_key(matches[0], self.rule['email_from_field'])
if isinstance(recipient, basestring):
if '@' in recipient:
to_addr = [recipient]
elif 'email_add_domain' in self.rule:
to_addr = [recipient + self.rule['email_add_domain']]
email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8')
email_msg['Subject'] = self.create_title(matches)
email_msg['To'] = ', '.join(to_addr)
email_msg['From'] = self.from_addr
email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To'])
email_msg['Date'] = formatdate()
if self.rule.get('cc'):
email_msg['CC'] = ','.join(self.rule['cc'])
to_addr = to_addr + self.rule['cc']
if self.rule.get('bcc'):
to_addr = to_addr + self.rule['bcc']
try:
if self.smtp_ssl:
if self.smtp_port:
self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port)
else:
self.smtp = SMTP_SSL(self.smtp_host)
else:
if self.smtp_port:
self.smtp = SMTP(self.smtp_host, self.smtp_port)
else:
self.smtp = SMTP(self.smtp_host)
self.smtp.ehlo()
if self.smtp.has_extn('STARTTLS'):
self.smtp.starttls()
#邮箱认证
self.smtp.login(self.user, self.password)
except (SMTPException, error) as e:
raise EAException("Error connecting to SMTP host: %s" % (e))
except SMTPAuthenticationError as e:
raise EAException("SMTP username/password rejected: %s" % (e))
self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string())
self.smtp.close()
elastalert_logger.info("Sent email to %s" % (to_addr))
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
# If the rule has a query_key, add that value plus timestamp to subject
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'email',
'recipients': self.rule['email']}
class JiraAlerter(Alerter):
""" Creates a Jira ticket for each alert """
required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype'])
# Maintain a static set of built-in fields that we explicitly know how to set
# For anything else, we will do best-effort and try to set a string value
known_field_list = [
'jira_account_file',
'jira_assignee',
'jira_bump_in_statuses',
'jira_bump_not_in_statuses',
'jira_bump_tickets',
'jira_component',
'jira_components',
'jira_description',
'jira_ignore_in_title',
'jira_issuetype',
'jira_label',
'jira_labels',
'jira_max_age',
'jira_priority',
'jira_project',
'jira_server',
'jira_watchers',
]
# Some built-in jira types that can be used as custom fields require special handling
# Here is a sample of one of them:
# {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,"navigable":true,"searchable":true,
# "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string",
# "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}}
# There are likely others that will need to be updated on a case-by-case basis
custom_string_types_with_special_handling = [
'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes',
'com.atlassian.jira.plugin.system.customfieldtypes:multiselect',
'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons',
]
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule['jira_server']
self.get_account(self.rule['jira_account_file'])
self.project = self.rule['jira_project']
self.issue_type = self.rule['jira_issuetype']
# We used to support only a single component. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.components = self.rule.get('jira_components', self.rule.get('jira_component'))
# We used to support only a single label. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.labels = self.rule.get('jira_labels', self.rule.get('jira_label'))
self.description = self.rule.get('jira_description', '')
self.assignee = self.rule.get('jira_assignee')
self.max_age = self.rule.get('jira_max_age', 30)
self.priority = self.rule.get('jira_priority')
self.bump_tickets = self.rule.get('jira_bump_tickets', False)
self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses')
self.bump_in_statuses = self.rule.get('jira_bump_in_statuses')
self.watchers = self.rule.get('jira_watchers')
if self.bump_in_statuses and self.bump_not_in_statuses:
msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \
(','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses))
intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses))
if intersection:
msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % (
msg, ','.join(intersection))
msg += ' This should be simplified to use only one or the other.'
logging.warning(msg)
self.jira_args = {'project': {'key': self.project},
'issuetype': {'name': self.issue_type}}
if self.components:
# Support single component or list
if type(self.components) != list:
self.jira_args['components'] = [{'name': self.components}]
else:
self.jira_args['components'] = [{'name': component} for component in self.components]
if self.labels:
# Support single label or list
if type(self.labels) != list:
self.labels = [self.labels]
self.jira_args['labels'] = self.labels
if self.watchers:
# Support single watcher or list
if type(self.watchers) != list:
self.watchers = [self.watchers]
if self.assignee:
self.jira_args['assignee'] = {'name': self.assignee}
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
self.get_priorities()
self.get_arbitrary_fields()
except JIRAError as e:
# JIRAError may contain HTML, pass along only first 1024 chars
raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024]))
try:
if self.priority is not None:
self.jira_args['priority'] = {'id': self.priority_ids[self.priority]}
except KeyError:
logging.error("Priority %s not found. Valid priorities are %s" % (self.priority, self.priority_ids.keys()))
def get_arbitrary_fields(self):
# This API returns metadata about all the fields defined on the jira server (built-ins and custom ones)
fields = self.client.fields()
for jira_field, value in self.rule.iteritems():
# If we find a field that is not covered by the set that we are aware of, it means it is either:
# 1. A built-in supported field in JIRA that we don't have on our radar
# 2. A custom field that a JIRA admin has configured
if jira_field.startswith('jira_') and jira_field not in self.known_field_list:
# Remove the jira_ part. Convert underscores to spaces
normalized_jira_field = jira_field[5:].replace('_', ' ').lower()
# All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case
for identifier in ['name', 'id']:
field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None)
if field:
break
if not field:
# Log a warning to ElastAlert saying that we couldn't find that type?
# OR raise and fail to load the alert entirely? Probably the latter...
raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field))
arg_name = field['id']
# Check the schema information to decide how to set the value correctly
# If the schema information is not available, raise an exception since we don't know how to set it
# Note this is only the case for two built-in types, id: issuekey and id: thumbnail
if not ('schema' in field or 'type' in field['schema']):
raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field))
arg_type = field['schema']['type']
# Handle arrays of simple types like strings or numbers
if arg_type == 'array':
# As a convenience, support the scenario wherein the user only provides
# a single value for a multi-value field e.g. jira_labels: Only_One_Label
if type(value) != list:
value = [value]
array_items = field['schema']['items']
# Simple string types
if array_items in ['string', 'date', 'datetime']:
# Special case for multi-select custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
self.jira_args[arg_name] = value
elif array_items == 'number':
self.jira_args[arg_name] = [int(v) for v in value]
# Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key'
elif array_items == 'option':
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
# Try setting it as an object, using 'name' as the key
# This may not work, as the key might actually be 'key', 'id', 'value', or something else
# If it works, great! If not, it will manifest itself as an API error that will bubble up
self.jira_args[arg_name] = [{'name': v} for v in value]
# Handle non-array types
else:
# Simple string types
if arg_type in ['string', 'date', 'datetime']:
# Special case for custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = {'value': value}
else:
self.jira_args[arg_name] = value
# Number type
elif arg_type == 'number':
self.jira_args[arg_name] = int(value)
elif arg_type == 'option':
self.jira_args[arg_name] = {'value': value}
# Complex type
else:
self.jira_args[arg_name] = {'name': value}
def get_priorities(self):
""" Creates a mapping of priority index to id. """
priorities = self.client.priorities()
self.priority_ids = {}
for x in range(len(priorities)):
self.priority_ids[x] = priorities[x].id
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args['assignee'] = {'name': assignee}
elif 'assignee' in self.jira_args:
self.jira_args.pop('assignee')
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if 'alert_subject' not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
if 'jira_ignore_in_title' in self.rule:
title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '')
# This is necessary for search to work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(' - ', ' ')
title = title.replace('\\', '\\\\')
date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d')
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date)
if self.bump_in_statuses:
jql = '%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses))
if self.bump_not_in_statuses:
jql = '%s and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses))
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e))
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = unicode(JiraFormattedMatchString(self.rule, match))
timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field']))
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def alert(self, matches):
elastalert_logger.info("Starting up method:---alerts.start---")
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key))
for match in matches:
try:
self.comment_on_ticket(ticket, match)
except JIRAError as e:
logging.exception("Error while commenting on ticket %s: %s" % (ticket, e))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = ticket
self.pipeline['jira_server'] = self.server
return None
self.jira_args['summary'] = title
self.jira_args['description'] = self.create_alert_body(matches)
try:
self.issue = self.client.create_issue(**self.jira_args)
# You can not add watchers on initial creation. Only as a follow-up action
if self.watchers:
for watcher in self.watchers:
try:
self.client.add_watcher(self.issue.key, watcher)
except Exception as ex:
# Re-raise the exception, preserve the stack-trace, and give some
# context as to which watcher failed to be added
raise Exception("Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format(watcher, ex)), None, sys.exc_info()[2]
except JIRAError as e:
raise EAException("Error creating JIRA ticket: %s" % (e))
elastalert_logger.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = self.issue
self.pipeline['jira_server'] = self.server
def create_alert_body(self, matches):
body = self.description + '\n'
body += self.get_aggregation_summary_text(matches)
for match in matches:
body += unicode(JiraFormattedMatchString(self.rule, match))
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = super(JiraAlerter, self).get_aggregation_summary_text(matches)
if text:
text = u'{{noformat}}{0}{{noformat}}'.format(text)
return text
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if 'query_key' in self.rule and self.rule['query_key'] in matches[0]:
title = 'ElastAlert: %s matched %s' % (matches[0][self.rule['query_key']], self.rule['name'])
else:
title = 'ElastAlert: %s' % (self.rule['name'])
if for_search:
return title
title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time')))
# Add count for spikes
count = matches[0].get('spike_count')
if count:
title += ' - %s+ events' % (count)
return title
def get_info(self):
return {'type': 'jira'}
class CommandAlerter(Alerter):
required_options = set(['command'])
def __init__(self, *args):
super(CommandAlerter, self).__init__(*args)
self.last_command = []
self.shell = False
if isinstance(self.rule['command'], basestring):
self.shell = True
if '%' in self.rule['command']:
logging.warning('Warning! You could be vulnerable to shell injection!')
self.rule['command'] = [self.rule['command']]
self.new_style_string_format = False
if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']:
self.new_style_string_format = True
def alert(self, matches):
elastalert_logger.info("Starting up method:---alerts.command.alert---")
# Format the command and arguments
try:
if self.new_style_string_format:
command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']]
else:
command = [command_arg % matches[0] for command_arg in self.rule['command']]
self.last_command = command
except KeyError as e:
raise EAException("Error formatting command: %s" % (e))
# Run command and pipe data
try:
subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell)
if self.rule.get('pipe_match_json'):
match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n'
stdout, stderr = subp.communicate(input=match_json)
if self.rule.get("fail_on_non_zero_exit", False) and subp.wait():
raise EAException("Non-zero exit code while running command %s" % (' '.join(command)))
except OSError as e:
raise EAException("Error while running command %s: %s" % (' '.join(command), e))
def get_info(self):
return {'type': 'command',
'command': ' '.join(self.last_command)}
class SnsAlerter(Alerter):
"""send alert using AWS SNS service"""
required_options = frozenset(['sns_topic_arn'])
def __init__(self, *args):
super(SnsAlerter, self).__init__(*args)
self.sns_topic_arn = self.rule.get('sns_topic_arn', '')
self.aws_access_key = self.rule.get('aws_access_key', '')
self.aws_secret_key = self.rule.get('aws_secret_key', '')
self.aws_region = self.rule.get('aws_region', 'us-east-1')
self.boto_profile = self.rule.get('boto_profile', '')
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
return subject
def alert(self, matches):
body = self.create_alert_body(matches)
# use aws_access_key and aws_secret_key if specified; then use boto profile if specified;
# otherwise use instance role
if not self.aws_access_key and not self.aws_secret_key:
if not self.boto_profile:
sns_client = sns.connect_to_region(self.aws_region)
else:
sns_client = sns.connect_to_region(self.aws_region,
profile_name=self.boto_profile)
else:
sns_client = sns.connect_to_region(self.aws_region,
aws_access_key_id=self.aws_access_key,
aws_secret_access_key=self.aws_secret_key)
sns_client.publish(self.sns_topic_arn, body, subject=self.create_title(matches))
elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn))
class HipChatAlerter(Alerter):
""" Creates a HipChat room notification for each alert """
required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id'])
def __init__(self, rule):
super(HipChatAlerter, self).__init__(rule)
self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red')
self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html')
self.hipchat_auth_token = self.rule['hipchat_auth_token']
self.hipchat_room_id = self.rule['hipchat_room_id']
self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com')
self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False)
self.hipchat_notify = self.rule.get('hipchat_notify', True)
self.hipchat_from = self.rule.get('hipchat_from', '')
self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % (
self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token)
self.hipchat_proxy = self.rule.get('hipchat_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
# HipChat sends 400 bad request on messages longer than 10000 characters
if (len(body) > 9999):
body = body[:9980] + '..(truncated)'
# Use appropriate line ending for text/html
if self.hipchat_message_format == 'html':
body = body.replace('\n', '<br />')
# Post to HipChat
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None
payload = {
'color': self.hipchat_msg_color,
'message': body,
'message_format': self.hipchat_message_format,
'notify': self.hipchat_notify,
'from': self.hipchat_from
}
try:
if self.hipchat_ignore_ssl_errors:
requests.packages.urllib3.disable_warnings()
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers,
verify=not self.hipchat_ignore_ssl_errors,
proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to HipChat: %s" % e)
elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id)
def get_info(self):
return {'type': 'hipchat',
'hipchat_room_id': self.hipchat_room_id}
class SlackAlerter(Alerter):
""" Creates a Slack room message for each alert """
required_options = frozenset(['slack_webhook_url'])
def __init__(self, rule):
super(SlackAlerter, self).__init__(rule)
self.slack_webhook_url = self.rule['slack_webhook_url']
if isinstance(self.slack_webhook_url, basestring):
self.slack_webhook_url = [self.slack_webhook_url]
self.slack_proxy = self.rule.get('slack_proxy', None)
self.slack_username_override = self.rule.get('slack_username_override', 'elastalert')
self.slack_channel_override = self.rule.get('slack_channel_override', '')
self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:')
self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '')
self.slack_msg_color = self.rule.get('slack_msg_color', 'danger')
self.slack_parse_override = self.rule.get('slack_parse_override', 'none')
self.slack_text_string = self.rule.get('slack_text_string', '')
def format_body(self, body):
# https://api.slack.com/docs/formatting
body = body.encode('UTF-8')
body = body.replace('&', '&')
body = body.replace('<', '<')
body = body.replace('>', '>')
return body
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
# post to slack
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.slack_proxy} if self.slack_proxy else None
payload = {
'username': self.slack_username_override,
'channel': self.slack_channel_override,
'parse': self.slack_parse_override,
'text': self.slack_text_string,
'attachments': [
{
'color': self.slack_msg_color,
'title': self.create_title(matches),
'text': body,
'fields': []
}
]
}
if self.slack_icon_url_override != '':
payload['icon_url'] = self.slack_icon_url_override
else:
payload['icon_emoji'] = self.slack_emoji_override
for url in self.slack_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to slack: %s" % e)
elastalert_logger.info("Alert sent to Slack")
def get_info(self):
return {'type': 'slack',
'slack_username_override': self.slack_username_override,
'slack_webhook_url': self.slack_webhook_url}
class PagerDutyAlerter(Alerter):
""" Create an incident on PagerDuty for each alert """
required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name'])
def __init__(self, rule):
super(PagerDutyAlerter, self).__init__(rule)
self.pagerduty_service_key = self.rule['pagerduty_service_key']
self.pagerduty_client_name = self.rule['pagerduty_client_name']
self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '')
self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None)
self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json'
def alert(self, matches):
body = self.create_alert_body(matches)
# post to pagerduty
headers = {'content-type': 'application/json'}
payload = {
'service_key': self.pagerduty_service_key,
'description': self.rule['name'],
'event_type': 'trigger',
'incident_key': self.pagerduty_incident_key,
'client': self.pagerduty_client_name,
'details': {
"information": body.encode('UTF-8'),
},
}
# set https proxy, if it was provided
proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to pagerduty: %s" % e)
elastalert_logger.info("Trigger sent to PagerDuty")
def get_info(self):
return {'type': 'pagerduty',
'pagerduty_client_name': self.pagerduty_client_name}
class ExotelAlerter(Alerter):
required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number'])
def __init__(self, rule):
super(ExotelAlerter, self).__init__(rule)
self.exotel_account_sid = self.rule['exotel_account_sid']
self.exotel_auth_token = self.rule['exotel_auth_token']
self.exotel_to_number = self.rule['exotel_to_number']
self.exotel_from_number = self.rule['exotel_from_number']
self.sms_body = self.rule.get('exotel_message_body', '')
def alert(self, matches):
client = Exotel(self.exotel_account_sid, self.exotel_auth_token)
try:
message_body = self.rule['name'] + self.sms_body
response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body)
if response != 200:
raise EAException("Error posting to Exotel, response code is %s" % response)
except:
raise EAException("Error posting to Exotel")
elastalert_logger.info("Trigger sent to Exotel")
def get_info(self):
return {'type': 'exotel', 'exotel_account': self.exotel_account_sid}
class TwilioAlerter(Alerter):
required_options = frozenset(['twilio_accout_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number'])
def __init__(self, rule):
super(TwilioAlerter, self).__init__(rule)
self.twilio_accout_sid = self.rule['twilio_accout_sid']
self.twilio_auth_token = self.rule['twilio_auth_token']
self.twilio_to_number = self.rule['twilio_to_number']
self.twilio_from_number = self.rule['twilio_from_number']
def alert(self, matches):
client = TwilioRestClient(self.twilio_accout_sid, self.twilio_auth_token)
try:
client.messages.create(body=self.rule['name'],
to=self.twilio_to_number,
from_=self.twilio_to_number)
except TwilioRestException as e:
raise EAException("Error posting to twilio: %s" % e)
elastalert_logger.info("Trigger sent to Twilio")
def get_info(self):
return {'type': 'twilio',
'twilio_client_name': self.twilio_from_number}
class VictorOpsAlerter(Alerter):
""" Creates a VictorOps Incident for each alert """
required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type'])
def __init__(self, rule):
super(VictorOpsAlerter, self).__init__(rule)
self.victorops_api_key = self.rule['victorops_api_key']
self.victorops_routing_key = self.rule['victorops_routing_key']
self.victorops_message_type = self.rule['victorops_message_type']
self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name')
self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % (
self.victorops_api_key, self.victorops_routing_key)
self.victorops_proxy = self.rule.get('victorops_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
# post to victorops
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None
payload = {
"message_type": self.victorops_message_type,
"entity_display_name": self.victorops_entity_display_name,
"monitoring_tool": "ElastAlert",
"state_message": body
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to VictorOps: %s" % e)
elastalert_logger.info("Trigger sent to VictorOps")
def get_info(self):
return {'type': 'victorops',
'victorops_routing_key': self.victorops_routing_key}
class TelegramAlerter(Alerter):
""" Send a Telegram message via bot api for each alert """
required_options = frozenset(['telegram_bot_token', 'telegram_room_id'])
def __init__(self, rule):
super(TelegramAlerter, self).__init__(rule)
self.telegram_bot_token = self.rule['telegram_bot_token']
self.telegram_room_id = self.rule['telegram_room_id']
self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org')
self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage")
self.telegram_proxy = self.rule.get('telegram_proxy', None)
def alert(self, matches):
body = u'⚠ *%s* ⚠ ```\n' % (self.create_title(matches))
for match in matches:
body += unicode(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
body += u' ```'
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None
payload = {
'chat_id': self.telegram_room_id,
'text': body,
'parse_mode': 'markdown',
'disable_web_page_preview': True
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Telegram: %s" % e)
elastalert_logger.info(
"Alert sent to Telegram room %s" % self.telegram_room_id)
def get_info(self):
return {'type': 'telegram',
'telegram_room_id': self.telegram_room_id}
class GitterAlerter(Alerter):
""" Creates a Gitter activity message for each alert """
required_options = frozenset(['gitter_webhook_url'])
def __init__(self, rule):
super(GitterAlerter, self).__init__(rule)
self.gitter_webhook_url = self.rule['gitter_webhook_url']
self.gitter_proxy = self.rule.get('gitter_proxy', None)
self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error')
def alert(self, matches):
body = self.create_alert_body(matches)
# post to Gitter
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None
payload = {
'message': body,
'level': self.gitter_msg_level
}
try:
response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Gitter: %s" % e)
elastalert_logger.info("Alert sent to Gitter")
def get_info(self):
return {'type': 'gitter',
'gitter_webhook_url': self.gitter_webhook_url}
class ServiceNowAlerter(Alerter):
""" Creates a ServiceNow alert """
required_options = set(['username', 'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id'])
def __init__(self, rule):
super(GitterAlerter, self).__init__(rule)
self.servicenow_rest_url = self.rule['servicenow_rest_url']
self.servicenow_proxy = self.rule.get('servicenow_proxy', None)
def alert(self, matches):
for match in matches:
# Parse everything into description.
description = str(BasicMatchString(self.rule, match))
# Set proper headers
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None
payload = {
"description": description,
"short_description": self.rule['short_description'],
"comments": self.rule['comments'],
"assignment_group": self.rule['assignment_group'],
"category": self.rule['category'],
"subcategory": self.rule['subcategory'],
"cmdb_ci": self.rule['cmdb_ci'],
"caller_id": self.rule["caller_id"]
}
try:
response = requests.post(self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ServiceNow: %s" % e)
elastalert_logger.info("Alert sent to ServiceNow")
def get_info(self):
return {'type': 'ServiceNow',
'self.servicenow_rest_url': self.servicenow_rest_url}
class SimplePostAlerter(Alerter):
def __init__(self, rule):
super(SimplePostAlerter, self).__init__(rule)
simple_webhook_url = self.rule.get('simple_webhook_url')
if isinstance(simple_webhook_url, basestring):
simple_webhook_url = [simple_webhook_url]
self.simple_webhook_url = simple_webhook_url
self.simple_proxy = self.rule.get('simple_proxy')
def alert(self, matches):
payload = {
'rule': self.rule['name'],
'matches': matches
}
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.simple_proxy} if self.simple_proxy else None
for url in self.simple_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting simple alert: %s" % e)
elastalert_logger.info("Simple alert sent")
def get_info(self):
return {'type': 'simple',
'simple_webhook_url': self.simple_webhook_url}
|
# coding=utf-8
def get(ar,index):
l=len(ar);
if index<0:
return ar[l+index];
else:
return ar[index];
def find(ar,filter):
for r in ar:
if filter(r):
return r;
return None;
def execute(ar,filter,action):
for r in ar:
if filter(r):
action(r);
unabled=[户型图存储方案,户型图存储,安居客户型列表,安居客评价,安居客楼盘详情,相册存储方案,安居客相册];
for e in unabled:
e.etls[0].Enabled=False
页数范围控制=find(安居客核心流程.etls,lambda x:x.TypeName=='数量范围选择')
重试次数='3'
页数范围控制.Skip=0
页数范围控制.Take=20000000
debug=False
not_repeat=True
def work2(x):
x.Enabled=not_repeat;
def work(x):
x.MaxTryCount=重试次数;
execute(安居客核心流程.etls,lambda x:x.TypeName=='从爬虫转换',work)
execute(安居客核心流程.etls,lambda x:x.Name=='防重复',work2)
execute(安居客核心流程.etls,lambda x:x.TypeName=='从爬虫转换',work)
get(安居客核心流程.etls,-2).Enabled=not debug;
get(安居客核心流程.etls,-3).Enabled=False
get(安居客相册.etls,-1).Enabled=True
get(户型图存储.etls,-1).Enabled=True
get(安居客城市.etls,-1).Script='锦州|景德镇|吉安|济宁|金华|揭阳|晋中|九江|焦作|晋城|荆州|佳木斯|酒泉|鸡西|济源|金昌|嘉峪关'
get(户型图存储方案.etls,-4).Format='D:\安居客图片\{0}\户型图\{1}_{2}_{3}.jpg'
get(相册存储方案.etls,-4).Format='D:\安居客图片\{0}\相册\{1}_{2}_{3}.jpg'
|
import datetime
import hashlib
import json
import logging as std_logging
import os
import urllib
from eventlet import greenthread
from time import strftime
from time import time
from requests import HTTPError
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import importutils
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip.cluster_manager import \
ClusterManager
from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2 as f5const
from f5_openstack_agent.lbaasv2.drivers.bigip.esd_filehandler import \
EsdTagProcessor
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5ex
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_builder import \
LBaaSBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_driver import \
LBaaSBaseDriver
from f5_openstack_agent.lbaasv2.drivers.bigip import network_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.network_service import \
NetworkServiceBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.service_adapter import \
ServiceModelAdapter
from f5_openstack_agent.lbaasv2.drivers.bigip import ssl_profile
from f5_openstack_agent.lbaasv2.drivers.bigip import stat_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.system_helper import \
SystemHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.tenants import \
BigipTenantManager
from f5_openstack_agent.lbaasv2.drivers.bigip.utils import serialized
from f5_openstack_agent.lbaasv2.drivers.bigip.virtual_address import \
VirtualAddress
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
__VERSION__ = '0.1.1'
OPTS = [ # XXX maybe we should make this a dictionary
cfg.StrOpt(
'bigiq_hostname',
help='The hostname (name or IP address) to use for the BIG-IQ host'
),
cfg.StrOpt(
'bigiq_admin_username',
default='admin',
help='The admin username to use for BIG-IQ authentication',
),
cfg.StrOpt(
'bigiq_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_keystone_uri',
default='http://192.0.2.248:5000/',
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_admin_username',
default='admin',
help='The admin username to use for authentication '
'with the Keystone service'
),
cfg.StrOpt(
'openstack_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for authentication'
' with the Keystone service'
),
cfg.StrOpt(
'bigip_management_username',
default='admin',
help='The admin username that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'bigip_management_password',
default='[Provide password in config file]',
secret=True,
help='The admin password that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'f5_device_type', default='external',
help='What type of device onboarding'
),
cfg.StrOpt(
'f5_ha_type', default='pair',
help='Are we standalone, pair(active/standby), or scalen'
),
cfg.ListOpt(
'f5_external_physical_mappings', default=['default:1.1:True'],
help='Mapping between Neutron physical_network to interfaces'
),
cfg.StrOpt(
'f5_vtep_folder', default='Common',
help='Folder for the VTEP SelfIP'
),
cfg.StrOpt(
'f5_vtep_selfip_name', default=None,
help='Name of the VTEP SelfIP'
),
cfg.ListOpt(
'advertised_tunnel_types', default=['vxlan'],
help='tunnel types which are advertised to other VTEPs'
),
cfg.BoolOpt(
'f5_populate_static_arp', default=False,
help='create static arp entries based on service entries'
),
cfg.StrOpt(
'vlan_binding_driver',
default=None,
help='driver class for binding vlans to device ports'
),
cfg.StrOpt(
'interface_port_static_mappings',
default=None,
help='JSON encoded static mapping of'
'devices to list of '
'interface and port_id'
),
cfg.StrOpt(
'l3_binding_driver',
default=None,
help='driver class for binding l3 address to l2 ports'
),
cfg.StrOpt(
'l3_binding_static_mappings', default=None,
help='JSON encoded static mapping of'
'subnet_id to list of '
'port_id, device_id list.'
),
cfg.BoolOpt(
'f5_route_domain_strictness', default=False,
help='Strict route domain isolation'
),
cfg.BoolOpt(
'f5_common_networks', default=False,
help='All networks defined under Common partition'
),
cfg.BoolOpt(
'f5_common_external_networks', default=True,
help='Treat external networks as common'
),
cfg.BoolOpt(
'external_gateway_mode', default=False,
help='All subnets have an external l3 route on gateway'
),
cfg.StrOpt(
'icontrol_vcmp_hostname',
help='The hostname (name or IP address) to use for vCMP Host '
'iControl access'
),
cfg.StrOpt(
'icontrol_hostname',
default="10.190.5.7",
help='The hostname (name or IP address) to use for iControl access'
),
cfg.StrOpt(
'icontrol_username', default='admin',
help='The username to use for iControl access'
),
cfg.StrOpt(
'icontrol_password', default='admin', secret=True,
help='The password to use for iControl access'
),
cfg.IntOpt(
'icontrol_connection_timeout', default=30,
help='How many seconds to timeout a connection to BIG-IP'
),
cfg.IntOpt(
'icontrol_connection_retry_interval', default=10,
help='How many seconds to wait between retry connection attempts'
),
cfg.DictOpt(
'common_network_ids', default={},
help='network uuid to existing Common networks mapping'
),
cfg.StrOpt(
'icontrol_config_mode', default='objects',
help='Whether to use iapp or objects for bigip configuration'
),
cfg.IntOpt(
'max_namespaces_per_tenant', default=1,
help='How many routing tables the BIG-IP will allocate per tenant'
' in order to accommodate overlapping IP subnets'
),
cfg.StrOpt(
'cert_manager',
default=None,
help='Class name of the certificate mangager used for retrieving '
'certificates and keys.'
),
cfg.StrOpt(
'auth_version',
default=None,
help='Keystone authentication version (v2 or v3) for Barbican client.'
),
cfg.StrOpt(
'os_project_id',
default='service',
help='OpenStack project ID.'
),
cfg.StrOpt(
'os_auth_url',
default=None,
help='OpenStack authentication URL.'
),
cfg.StrOpt(
'os_username',
default=None,
help='OpenStack user name for Keystone authentication.'
),
cfg.StrOpt(
'os_user_domain_name',
default=None,
help='OpenStack user domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_name',
default=None,
help='OpenStack project name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_domain_name',
default=None,
help='OpenStack domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_password',
default=None,
help='OpenStack user password for Keystone authentication.'
),
cfg.StrOpt(
'f5_network_segment_physical_network', default=None,
help='Name of physical network to use for discovery of segment ID'
),
cfg.StrOpt(
'unlegacy_setting_placeholder', default=None,
help='use this setting to separate legacy with hw/etc on agent side'
),
cfg.IntOpt(
'f5_network_segment_polling_interval', default=10,
help='Seconds between periodic scans for disconnected virtual servers'
),
cfg.IntOpt(
'f5_network_segment_gross_timeout', default=300,
help='Seconds to wait for a virtual server to become connected'
),
cfg.StrOpt(
'f5_parent_ssl_profile',
default='clientssl',
help='Parent profile used when creating client SSL profiles '
'for listeners with TERMINATED_HTTPS protocols.'
),
cfg.StrOpt(
'os_tenant_name',
default=None,
help='OpenStack tenant name for Keystone authentication (v2 only).'
),
cfg.BoolOpt(
'trace_service_requests',
default=False,
help='Log service object.'
),
cfg.BoolOpt(
'report_esd_names_in_agent',
default=False,
help='whether or not to add valid esd names during report.'
)
]
def is_operational(method):
# Decorator to check we are operational before provisioning.
def wrapper(*args, **kwargs):
instance = args[0]
if instance.operational:
try:
return method(*args, **kwargs)
except IOError as ioe:
LOG.error('IO Error detected: %s' % method.__name__)
LOG.error(str(ioe))
raise ioe
else:
LOG.error('Cannot execute %s. Not operational. Re-initializing.'
% method.__name__)
instance._init_bigips()
return wrapper
class iControlDriver(LBaaSBaseDriver):
"""Control service deployment."""
# pzhang(NOTE) here: we only sync, CRUD objs in below status
positive_plugin_const_state = \
tuple([f5const.F5_PENDING_CREATE,
f5const.F5_PENDING_UPDATE])
def __init__(self, conf, registerOpts=True):
# The registerOpts parameter allows a test to
# turn off config option handling so that it can
# set the options manually instead.
super(iControlDriver, self).__init__(conf)
self.conf = conf
if registerOpts:
self.conf.register_opts(OPTS)
self.initialized = False
self.hostnames = None
self.device_type = conf.f5_device_type
self.plugin_rpc = None # overrides base, same value
self.agent_report_state = None # overrides base, same value
self.operational = False # overrides base, same value
self.driver_name = 'f5-lbaasv2-icontrol'
#
# BIG-IP containers
#
# BIG-IPs which currectly active
self.__bigips = {}
self.__last_connect_attempt = None
# HA and traffic group validation
self.ha_validated = False
self.tg_initialized = False
# traffic groups discovered from BIG-IPs for service placement
self.__traffic_groups = []
# base configurations to report to Neutron agent state reports
self.agent_configurations = {} # overrides base, same value
self.agent_configurations['device_drivers'] = [self.driver_name]
self.agent_configurations['icontrol_endpoints'] = {}
# to store the verified esd names
self.esd_names = []
# service component managers
self.tenant_manager = None
self.cluster_manager = None
self.system_helper = None
self.lbaas_builder = None
self.service_adapter = None
self.vlan_binding = None
self.l3_binding = None
self.cert_manager = None # overrides register_OPTS
# server helpers
self.stat_helper = stat_helper.StatHelper()
self.network_helper = network_helper.NetworkHelper()
# f5-sdk helpers
self.vs_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual)
self.pool_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool)
try:
# debug logging of service requests recieved by driver
if self.conf.trace_service_requests:
path = '/var/log/neutron/service/'
if not os.path.exists(path):
os.makedirs(path)
self.file_name = path + strftime("%H%M%S-%m%d%Y") + '.json'
with open(self.file_name, 'w') as fp:
fp.write('[{}] ')
# driver mode settings - GRM vs L2 adjacent
if self.conf.f5_global_routed_mode:
LOG.info('WARNING - f5_global_routed_mode enabled.'
' There will be no L2 or L3 orchestration'
' or tenant isolation provisioned. All vips'
' and pool members must be routable through'
' pre-provisioned SelfIPs.')
self.conf.use_namespaces = False
self.conf.f5_snat_mode = True
self.conf.f5_snat_addresses_per_subnet = 0
self.agent_configurations['tunnel_types'] = []
self.agent_configurations['bridge_mappings'] = {}
else:
self.agent_configurations['tunnel_types'] = \
self.conf.advertised_tunnel_types
for net_id in self.conf.common_network_ids:
LOG.debug('network %s will be mapped to /Common/%s'
% (net_id, self.conf.common_network_ids[net_id]))
self.agent_configurations['common_networks'] = \
self.conf.common_network_ids
LOG.debug('Setting static ARP population to %s'
% self.conf.f5_populate_static_arp)
self.agent_configurations['f5_common_external_networks'] = \
self.conf.f5_common_external_networks
f5const.FDB_POPULATE_STATIC_ARP = \
self.conf.f5_populate_static_arp
# parse the icontrol_hostname setting
self._init_bigip_hostnames()
# instantiate the managers
self._init_bigip_managers()
self.initialized = True
LOG.debug('iControlDriver loaded successfully')
except Exception as exc:
LOG.error("exception in intializing driver %s" % str(exc))
self._set_agent_status(False)
def connect(self):
# initialize communications wiht BIG-IP via iControl
try:
self._init_bigips()
except Exception as exc:
LOG.error("exception in intializing communications to BIG-IPs %s"
% str(exc))
self._set_agent_status(False)
def get_valid_esd_names(self):
LOG.debug("verified esd names in get_valid_esd_names():")
LOG.debug(self.esd_names)
return self.esd_names
def _init_bigip_managers(self):
if self.conf.vlan_binding_driver:
try:
self.vlan_binding = importutils.import_object(
self.conf.vlan_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import VLAN binding driver: %s'
% self.conf.vlan_binding_driver)
if self.conf.l3_binding_driver:
try:
self.l3_binding = importutils.import_object(
self.conf.l3_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import L3 binding driver: %s'
% self.conf.l3_binding_driver)
else:
LOG.debug('No L3 binding driver configured.'
' No L3 binding will be done.')
if self.conf.cert_manager:
try:
self.cert_manager = importutils.import_object(
self.conf.cert_manager, self.conf)
except ImportError as import_err:
LOG.error('Failed to import CertManager: %s.' %
import_err.message)
raise
except Exception as err:
LOG.error('Failed to initialize CertManager. %s' % err.message)
# re-raise as ImportError to cause agent exit
raise ImportError(err.message)
self.service_adapter = ServiceModelAdapter(self.conf)
self.tenant_manager = BigipTenantManager(self.conf, self)
self.cluster_manager = ClusterManager()
self.system_helper = SystemHelper()
self.lbaas_builder = LBaaSBuilder(self.conf, self)
if self.conf.f5_global_routed_mode:
self.network_builder = None
else:
self.network_builder = NetworkServiceBuilder(
self.conf.f5_global_routed_mode,
self.conf,
self,
self.l3_binding)
def _init_bigip_hostnames(self):
# Validate and parse bigip credentials
if not self.conf.icontrol_hostname:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_hostname',
opt_value='valid hostname or IP address'
)
if not self.conf.icontrol_username:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_username',
opt_value='valid username'
)
if not self.conf.icontrol_password:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_password',
opt_value='valid password'
)
self.hostnames = self.conf.icontrol_hostname.split(',')
self.hostnames = [item.strip() for item in self.hostnames]
self.hostnames = sorted(self.hostnames)
# initialize per host agent_configurations
for hostname in self.hostnames:
self.__bigips[hostname] = bigip = type('', (), {})()
bigip.hostname = hostname
bigip.status = 'creating'
bigip.status_message = 'creating BIG-IP from iControl hostnames'
bigip.device_interfaces = dict()
self.agent_configurations[
'icontrol_endpoints'][hostname] = {}
self.agent_configurations[
'icontrol_endpoints'][hostname]['failover_state'] = \
'undiscovered'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status'] = 'unknown'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status_message'] = ''
def _init_bigips(self):
# Connect to all BIG-IPs
if self.operational:
LOG.debug('iControl driver reports connection is operational')
return
LOG.debug('initializing communications to BIG-IPs')
try:
# setup logging options
if not self.conf.debug:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.ERROR)
requests_log.propagate = False
else:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.DEBUG)
requests_log.propagate = True
self.__last_connect_attempt = datetime.datetime.now()
for hostname in self.hostnames:
# connect to each BIG-IP and set it status
bigip = self._open_bigip(hostname)
if bigip.status == 'connected':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('learned traffic groups from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = 'validating the current HA state'
if self._validate_ha_operational(bigip):
LOG.debug('setting status to active for %s' % hostname)
bigip.status = 'active'
bigip.status_message = 'BIG-IP ready for provisioning'
self._post_init()
else:
LOG.debug('setting status to error for %s' % hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.error('error opening BIG-IP %s - %s:%s'
% (hostname, bigip.status, bigip.status_message))
self._set_agent_status(False)
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
self._set_agent_status(force_resync=True)
def _init_errored_bigips(self):
try:
errored_bigips = self.get_errored_bigips_hostnames()
if errored_bigips:
LOG.debug('attempting to recover %s BIG-IPs' %
len(errored_bigips))
for hostname in errored_bigips:
# try to connect and set status
bigip = self._open_bigip(hostname)
if bigip.status == 'connected':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
LOG.debug('proceeding to initialize %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('known traffic groups initialized',
' from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = \
'validating the current HA state'
if self._validate_ha_operational(bigip):
LOG.debug('setting status to active for %s'
% hostname)
bigip.status = 'active'
bigip.status_message = \
'BIG-IP ready for provisioning'
self._post_init()
self._set_agent_status(True)
else:
LOG.debug('setting status to error for %s'
% hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.debug('there are no BIG-IPs with error status')
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
def _open_bigip(self, hostname):
# Open bigip connection
try:
bigip = self.__bigips[hostname]
if bigip.status not in ['creating', 'error']:
LOG.debug('BIG-IP %s status invalid %s to open a connection'
% (hostname, bigip.status))
return bigip
bigip.status = 'connecting'
bigip.status_message = 'requesting iControl endpoint'
LOG.info('opening iControl connection to %s @ %s' %
(self.conf.icontrol_username, hostname))
bigip = ManagementRoot(hostname,
self.conf.icontrol_username,
self.conf.icontrol_password,
timeout=f5const.DEVICE_CONNECTION_TIMEOUT,
debug=self.conf.debug)
bigip.status = 'connected'
bigip.status_message = 'connected to BIG-IP'
self.__bigips[hostname] = bigip
return bigip
except Exception as exc:
LOG.error('could not communicate with ' +
'iControl device: %s' % hostname)
# since no bigip object was created, create a dummy object
# so we can store the status and status_message attributes
errbigip = type('', (), {})()
errbigip.hostname = hostname
errbigip.status = 'error'
errbigip.status_message = str(exc)[:80]
self.__bigips[hostname] = errbigip
return errbigip
def _init_bigip(self, bigip, hostname, check_group_name=None):
# Prepare a bigip for usage
try:
major_version, minor_version = self._validate_bigip_version(
bigip, hostname)
device_group_name = None
extramb = self.system_helper.get_provision_extramb(bigip)
if int(extramb) < f5const.MIN_EXTRA_MB:
raise f5ex.ProvisioningExtraMBValidateFailed(
'Device %s BIG-IP not provisioned for '
'management LARGE.' % hostname)
if self.conf.f5_ha_type == 'pair' and \
self.cluster_manager.get_sync_status(bigip) == \
'Standalone':
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and bigip %s in standalone mode'
% hostname)
if self.conf.f5_ha_type == 'scalen' and \
self.cluster_manager.get_sync_status(bigip) == \
'Standalone':
raise f5ex.BigIPClusterInvalidHA(
'HA mode is scalen and bigip %s in standalone mode'
% hostname)
if self.conf.f5_ha_type != 'standalone':
device_group_name = \
self.cluster_manager.get_device_group(bigip)
if not device_group_name:
raise f5ex.BigIPClusterInvalidHA(
'HA mode is %s and no sync failover '
'device group found for device %s.'
% (self.conf.f5_ha_type, hostname))
if check_group_name and device_group_name != check_group_name:
raise f5ex.BigIPClusterInvalidHA(
'Invalid HA. Device %s is in device group'
' %s but should be in %s.'
% (hostname, device_group_name, check_group_name))
bigip.device_group_name = device_group_name
if self.network_builder:
for network in self.conf.common_network_ids.values():
if not self.network_builder.vlan_exists(bigip,
network,
folder='Common'):
raise f5ex.MissingNetwork(
'Common network %s on %s does not exist'
% (network, bigip.hostname))
bigip.device_name = self.cluster_manager.get_device_name(bigip)
bigip.mac_addresses = self.system_helper.get_mac_addresses(bigip)
LOG.debug("Initialized BIG-IP %s with MAC addresses %s" %
(bigip.device_name, ', '.join(bigip.mac_addresses)))
bigip.device_interfaces = \
self.system_helper.get_interface_macaddresses_dict(bigip)
bigip.assured_networks = {}
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = []
if self.conf.f5_ha_type != 'standalone':
self.cluster_manager.disable_auto_sync(
device_group_name, bigip)
# validate VTEP SelfIPs
if not self.conf.f5_global_routed_mode:
self.network_builder.initialize_tunneling(bigip)
# Turn off tunnel syncing between BIG-IP
# as our VTEPs properly use only local SelfIPs
if self.system_helper.get_tunnel_sync(bigip) == 'enable':
self.system_helper.set_tunnel_sync(bigip, enabled=False)
LOG.debug('connected to iControl %s @ %s ver %s.%s'
% (self.conf.icontrol_username, hostname,
major_version, minor_version))
except Exception as exc:
bigip.status = 'error'
bigip.status_message = str(exc)[:80]
raise
return bigip
def _post_init(self):
# After we have a connection to the BIG-IPs, initialize vCMP
# on all connected BIG-IPs
if self.network_builder:
self.network_builder.initialize_vcmp()
self.agent_configurations['network_segment_physical_network'] = \
self.conf.f5_network_segment_physical_network
LOG.info('iControlDriver initialized to %d bigips with username:%s'
% (len(self.get_active_bigips()),
self.conf.icontrol_username))
LOG.info('iControlDriver dynamic agent configurations:%s'
% self.agent_configurations)
if self.vlan_binding:
LOG.debug(
'getting BIG-IP device interface for VLAN Binding')
self.vlan_binding.register_bigip_interfaces()
if self.l3_binding:
LOG.debug('getting BIG-IP MAC Address for L3 Binding')
self.l3_binding.register_bigip_mac_addresses()
# endpoints = self.agent_configurations['icontrol_endpoints']
# for ic_host in endpoints.keys():
for hostbigip in self.get_all_bigips():
# hostbigip = self.__bigips[ic_host]
mac_addrs = [mac_addr for interface, mac_addr in
hostbigip.device_interfaces.items()
if interface != "mgmt"]
ports = self.plugin_rpc.get_ports_for_mac_addresses(
mac_addresses=mac_addrs)
if ports:
self.agent_configurations['nova_managed'] = True
else:
self.agent_configurations['nova_managed'] = False
if self.network_builder:
self.network_builder.post_init()
# read enhanced services definitions
esd_dir = os.path.join(self.get_config_dir(), 'esd')
esd = EsdTagProcessor(esd_dir)
try:
esd.process_esd(self.get_all_bigips())
self.lbaas_builder.init_esd(esd)
self.service_adapter.init_esd(esd)
LOG.debug('esd details here after process_esd(): ')
LOG.debug(esd)
self.esd_names = esd.esd_dict.keys() or []
LOG.debug('##### self.esd_names obtainded here:')
LOG.debug(self.esd_names)
except f5ex.esdJSONFileInvalidException as err:
LOG.error("unable to initialize ESD. Error: %s.", err.message)
self._set_agent_status(False)
def _validate_ha(self, bigip):
# if there was only one address supplied and
# this is not a standalone device, get the
# devices trusted by this device.
device_group_name = None
if self.conf.f5_ha_type == 'standalone':
if len(self.hostnames) != 1:
bigip.status = 'error'
bigip.status_message = \
'HA mode is standalone and %d hosts found.'\
% len(self.hostnames)
raise f5ex.BigIPClusterInvalidHA(
'HA mode is standalone and %d hosts found.'
% len(self.hostnames))
device_group_name = 'standalone'
elif self.conf.f5_ha_type == 'pair':
device_group_name = self.cluster_manager.\
get_device_group(bigip)
if len(self.hostnames) != 2:
mgmt_addrs = []
devices = self.cluster_manager.devices(bigip)
for device in devices:
mgmt_addrs.append(
self.cluster_manager.get_mgmt_addr_by_device(
bigip, device))
self.hostnames = mgmt_addrs
if len(self.hostnames) != 2:
bigip.status = 'error'
bigip.status_message = 'HA mode is pair and %d hosts found.' \
% len(self.hostnames)
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and %d hosts found.'
% len(self.hostnames))
elif self.conf.f5_ha_type == 'scalen':
device_group_name = self.cluster_manager.\
get_device_group(bigip)
if len(self.hostnames) < 2:
mgmt_addrs = []
devices = self.cluster_manager.devices(bigip)
for device in devices:
mgmt_addrs.append(
self.cluster_manager.get_mgmt_addr_by_device(
bigip, device)
)
self.hostnames = mgmt_addrs
if len(self.hostnames) < 2:
bigip.status = 'error'
bigip.status_message = 'HA mode is scale and 1 hosts found.'
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and 1 hosts found.')
return device_group_name
def _validate_ha_operational(self, bigip):
if self.conf.f5_ha_type == 'standalone':
return True
else:
# how many active BIG-IPs are there?
active_bigips = self.get_active_bigips()
if active_bigips:
sync_status = self.cluster_manager.get_sync_status(bigip)
if sync_status in ['Disconnected', 'Sync Failure']:
if len(active_bigips) > 1:
# the device should not be in the disconnected state
return False
if len(active_bigips) > 1:
# it should be in the same sync-failover group
# as the rest of the active bigips
device_group_name = \
self.cluster_manager.get_device_group(bigip)
for active_bigip in active_bigips:
adgn = self.cluster_manager.get_device_group(
active_bigip)
if not adgn == device_group_name:
return False
return True
else:
return True
def _init_agent_config(self, bigip):
# Init agent config
ic_host = {}
ic_host['version'] = self.system_helper.get_version(bigip)
ic_host['device_name'] = bigip.device_name
ic_host['platform'] = self.system_helper.get_platform(bigip)
ic_host['serial_number'] = self.system_helper.get_serial_number(bigip)
ic_host['status'] = bigip.status
ic_host['status_message'] = bigip.status_message
ic_host['failover_state'] = self.get_failover_state(bigip)
if hasattr(bigip, 'local_ip') and bigip.local_ip:
ic_host['local_ip'] = bigip.local_ip
else:
ic_host['local_ip'] = 'VTEP disabled'
self.agent_configurations['tunnel_types'] = list()
self.agent_configurations['icontrol_endpoints'][bigip.hostname] = \
ic_host
if self.network_builder:
self.agent_configurations['bridge_mappings'] = \
self.network_builder.interface_mapping
def _set_agent_status(self, force_resync=False):
for hostname in self.__bigips:
bigip = self.__bigips[hostname]
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'status'] = bigip.status
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'status_message'] = bigip.status_message
if self.conf.report_esd_names_in_agent:
LOG.debug('adding names to report:')
self.agent_configurations['esd_name'] = \
self.get_valid_esd_names()
# Policy - if any BIG-IP are active we're operational
if self.get_active_bigips():
self.operational = True
else:
self.operational = False
if self.agent_report_state:
self.agent_report_state(force_resync=force_resync)
def get_failover_state(self, bigip):
try:
if hasattr(bigip, 'tm'):
fs = bigip.tm.sys.dbs.db.load(name='failover.state')
bigip.failover_state = fs.value
return bigip.failover_state
else:
return 'error'
except Exception as exc:
LOG.exception('Error getting %s failover state' % bigip.hostname)
bigip.status = 'error'
bigip.status_message = str(exc)[:80]
self._set_agent_status(False)
return 'error'
def get_agent_configurations(self):
for hostname in self.__bigips:
bigip = self.__bigips[hostname]
if bigip.status == 'active':
failover_state = self.get_failover_state(bigip)
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'failover_state'] = failover_state
else:
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'failover_state'] = 'unknown'
self.agent_configurations['icontrol_endpoints'][
bigip.hostname]['status'] = bigip.status
self.agent_configurations['icontrol_endpoints'][
bigip.hostname]['status_message'] = bigip.status_message
self.agent_configurations['operational'] = \
self.operational
LOG.debug('agent configurations are: %s' % self.agent_configurations)
return dict(self.agent_configurations)
def recover_errored_devices(self):
# trigger a retry on errored BIG-IPs
try:
self._init_errored_bigips()
except Exception as exc:
LOG.error('Could not recover devices: %s' % exc.message)
def backend_integrity(self):
if self.operational:
return True
return False
def generate_capacity_score(self, capacity_policy=None):
"""Generate the capacity score of connected devices."""
if capacity_policy:
highest_metric = 0.0
highest_metric_name = None
my_methods = dir(self)
bigips = self.get_all_bigips()
for metric in capacity_policy:
func_name = 'get_' + metric
if func_name in my_methods:
max_capacity = int(capacity_policy[metric])
metric_func = getattr(self, func_name)
metric_value = 0
for bigip in bigips:
if bigip.status == 'active':
global_stats = \
self.stat_helper.get_global_statistics(bigip)
value = int(
metric_func(bigip=bigip,
global_statistics=global_stats)
)
LOG.debug('calling capacity %s on %s returned: %s'
% (func_name, bigip.hostname, value))
else:
value = 0
if value > metric_value:
metric_value = value
metric_capacity = float(metric_value) / float(max_capacity)
if metric_capacity > highest_metric:
highest_metric = metric_capacity
highest_metric_name = metric
else:
LOG.warn('capacity policy has method '
'%s which is not implemented in this driver'
% metric)
LOG.debug('capacity score: %s based on %s'
% (highest_metric, highest_metric_name))
return highest_metric
return 0
def set_context(self, context):
# Context to keep for database access
if self.network_builder:
self.network_builder.set_context(context)
def set_plugin_rpc(self, plugin_rpc):
# Provide Plugin RPC access
self.plugin_rpc = plugin_rpc
def set_tunnel_rpc(self, tunnel_rpc):
# Provide FDB Connector with ML2 RPC access
if self.network_builder:
self.network_builder.set_tunnel_rpc(tunnel_rpc)
def set_l2pop_rpc(self, l2pop_rpc):
# Provide FDB Connector with ML2 RPC access
if self.network_builder:
self.network_builder.set_l2pop_rpc(l2pop_rpc)
def set_agent_report_state(self, report_state_callback):
"""Set Agent Report State."""
self.agent_report_state = report_state_callback
def service_exists(self, service):
return self._service_exists(service)
def flush_cache(self):
# Remove cached objects so they can be created if necessary
for bigip in self.get_all_bigips():
bigip.assured_networks = {}
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = []
@serialized('get_all_deployed_loadbalancers')
@is_operational
def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False):
LOG.debug('getting all deployed loadbalancers on BIG-IPs')
deployed_lb_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address)
deployed_lbs = resource.get_resources(bigip, folder)
if deployed_lbs:
for lb in deployed_lbs:
lb_id = lb.name[len(self.service_adapter.prefix):]
if lb_id in deployed_lb_dict:
deployed_lb_dict[lb_id][
'hostnames'].append(bigip.hostname)
else:
deployed_lb_dict[lb_id] = {
'id': lb_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
else:
# delay to assure we are not in the tenant creation
# process before a virtual address is created.
greenthread.sleep(10)
deployed_lbs = resource.get_resources(bigip, folder)
if deployed_lbs:
for lb in deployed_lbs:
lb_id = lb.name[
len(self.service_adapter.prefix):]
deployed_lb_dict[lb_id] = \
{'id': lb_id, 'tenant_id': tenant_id}
else:
# Orphaned folder!
if purge_orphaned_folders:
try:
self.system_helper.purge_folder_contents(
bigip, folder)
self.system_helper.purge_folder(
bigip, folder)
LOG.error('orphaned folder %s on %s' %
(folder, bigip.hostname))
except Exception as exc:
LOG.error('error purging folder %s: %s' %
(folder, str(exc)))
return deployed_lb_dict
@serialized('get_all_deployed_listeners')
@is_operational
def get_all_deployed_listeners(self, expand_subcollections=False):
LOG.debug('getting all deployed listeners on BIG-IPs')
deployed_virtual_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual)
deployed_listeners = resource.get_resources(
bigip, folder, expand_subcollections)
if deployed_listeners:
for virtual in deployed_listeners:
virtual_id = \
virtual.name[len(self.service_adapter.prefix):]
l7_policy = ''
if hasattr(virtual, 'policiesReference') and \
'items' in virtual.policiesReference:
l7_policy = \
virtual.policiesReference['items'][0]
l7_policy = l7_policy['fullPath']
if virtual_id in deployed_virtual_dict:
deployed_virtual_dict[virtual_id][
'hostnames'].append(bigip.hostname)
else:
deployed_virtual_dict[virtual_id] = {
'id': virtual_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname],
'l7_policy': l7_policy
}
return deployed_virtual_dict
@serialized('purge_orphaned_nodes')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_nodes(self, tenant_members):
node_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.node)
node_dict = dict()
for bigip in self.get_all_bigips():
for tenant_id, members in tenant_members.iteritems():
partition = self.service_adapter.prefix + tenant_id
nodes = node_helper.get_resources(bigip, partition=partition)
for n in nodes:
node_dict[n.name] = n
for member in members:
rd = self.network_builder.find_subnet_route_domain(
tenant_id, member.get('subnet_id', None))
node_name = "{}%{}".format(member['address'], rd)
node_dict.pop(node_name, None)
for node_name, node in node_dict.iteritems():
try:
node_helper.delete(bigip, name=urllib.quote(node_name),
partition=partition)
except HTTPError as error:
if error.response.status_code == 400:
LOG.error(error.response)
@serialized('get_all_deployed_pools')
@is_operational
def get_all_deployed_pools(self):
LOG.debug('getting all deployed pools on BIG-IPs')
deployed_pool_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool)
deployed_pools = resource.get_resources(bigip, folder)
if deployed_pools:
for pool in deployed_pools:
pool_id = \
pool.name[len(self.service_adapter.prefix):]
monitor_id = ''
if hasattr(pool, 'monitor'):
monitor = pool.monitor.split('/')[2].strip()
monitor_id = \
monitor[len(self.service_adapter.prefix):]
LOG.debug(
'pool {} has monitor {}'.format(
pool.name, monitor))
else:
LOG.debug(
'pool {} has no healthmonitors'.format(
pool.name))
if pool_id in deployed_pool_dict:
deployed_pool_dict[pool_id][
'hostnames'].append(bigip.hostname)
else:
deployed_pool_dict[pool_id] = {
'id': pool_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname],
'monitors': monitor_id
}
return deployed_pool_dict
@serialized('purge_orphaned_pool')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_pool(self, tenant_id=None, pool_id=None,
hostnames=list()):
node_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.node)
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
pool_name = self.service_adapter.prefix + pool_id
partition = self.service_adapter.prefix + tenant_id
pool = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool).load(
bigip, pool_name, partition)
members = pool.members_s.get_collection()
pool.delete()
for member in members:
node_name = member.address
try:
node_helper.delete(bigip,
name=urllib.quote(node_name),
partition=partition)
except HTTPError as e:
if e.response.status_code == 404:
pass
if e.response.status_code == 400:
LOG.warn("Failed to delete node -- in use")
else:
LOG.exception("Failed to delete node")
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('pool %s not on BIG-IP %s.'
% (pool_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging pool %s' % str(exc))
@serialized('get_all_deployed_monitors')
@is_operational
def get_all_deployed_health_monitors(self):
"""Retrieve a list of all Health Monitors deployed"""
LOG.debug('getting all deployed monitors on BIG-IP\'s')
monitor_types = ['http_monitor', 'https_monitor', 'tcp_monitor',
'ping_monitor']
deployed_monitor_dict = {}
adapter_prefix = self.service_adapter.prefix
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(adapter_prefix):]
if str(folder).startswith(adapter_prefix):
resources = map(
lambda x: resource_helper.BigIPResourceHelper(
getattr(resource_helper.ResourceType, x)),
monitor_types)
for resource in resources:
deployed_monitors = resource.get_resources(
bigip, folder)
if deployed_monitors:
for monitor in deployed_monitors:
monitor_id = monitor.name[len(adapter_prefix):]
if monitor_id in deployed_monitor_dict:
deployed_monitor_dict[monitor_id][
'hostnames'].append(bigip.hostname)
else:
deployed_monitor_dict[monitor_id] = {
'id': monitor_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
return deployed_monitor_dict
@serialized('purge_orphaned_health_monitor')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_health_monitor(self, tenant_id=None, monitor_id=None,
hostnames=list()):
"""Purge all monitors that exist on the BIG-IP but not in Neutron"""
resource_types = [
resource_helper.BigIPResourceHelper(x) for x in [
resource_helper.ResourceType.http_monitor,
resource_helper.ResourceType.https_monitor,
resource_helper.ResourceType.ping_monitor,
resource_helper.ResourceType.tcp_monitor]]
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
monitor_name = self.service_adapter.prefix + monitor_id
partition = self.service_adapter.prefix + tenant_id
monitor = None
for monitor_type in resource_types:
try:
monitor = monitor_type.load(bigip, monitor_name,
partition)
break
except HTTPError as err:
if err.response.status_code == 404:
continue
monitor.delete()
except TypeError as err:
if 'NoneType' in err:
LOG.exception("Could not find monitor {}".format(
monitor_name))
except Exception as exc:
LOG.exception('Exception purging monitor %s' % str(exc))
@serialized('get_all_deployed_l7_policys')
@is_operational
def get_all_deployed_l7_policys(self):
"""Retrieve a dict of all l7policies deployed
The dict returned will have the following format:
{policy_bigip_id_0: {'id': policy_id_0,
'tenant_id': tenant_id,
'hostnames': [hostnames_0]}
...
}
Where hostnames is the list of BIG-IP hostnames impacted, and the
policy_id is the policy_bigip_id without 'wrapper_policy_'
"""
LOG.debug('getting all deployed l7_policys on BIG-IP\'s')
deployed_l7_policys_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.l7policy)
deployed_l7_policys = resource.get_resources(
bigip, folder)
if deployed_l7_policys:
for l7_policy in deployed_l7_policys:
l7_policy_id = l7_policy.name
if l7_policy_id in deployed_l7_policys_dict:
my_dict = \
deployed_l7_policys_dict[l7_policy_id]
my_dict['hostnames'].append(bigip.hostname)
else:
po_id = l7_policy_id.replace(
'wrapper_policy_', '')
deployed_l7_policys_dict[l7_policy_id] = {
'id': po_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
return deployed_l7_policys_dict
@serialized('purge_orphaned_l7_policy')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_l7_policy(self, tenant_id=None, l7_policy_id=None,
hostnames=list(), listener_id=None):
"""Purge all l7_policys that exist on the BIG-IP but not in Neutron"""
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
error = None
try:
l7_policy_name = l7_policy_id
partition = self.service_adapter.prefix + tenant_id
if listener_id and partition:
if self.service_adapter.prefix not in listener_id:
listener_id = \
self.service_adapter.prefix + listener_id
li_resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual).load(
bigip, listener_id, partition)
li_resource.update(policies=[])
l7_policy = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.l7policy).load(
bigip, l7_policy_name, partition)
l7_policy.delete()
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('l7_policy %s not on BIG-IP %s.'
% (l7_policy_id, bigip.hostname))
else:
error = err
except Exception as exc:
error = err
if error:
kwargs = dict(
tenant_id=tenant_id, l7_policy_id=l7_policy_id,
hostname=bigip.hostname, listener_id=listener_id)
LOG.exception('Exception: purge_orphaned_l7_policy({}) '
'"{}"'.format(kwargs, exc))
@serialized('purge_orphaned_loadbalancer')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_loadbalancer(self, tenant_id=None,
loadbalancer_id=None, hostnames=list()):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
va_name = self.service_adapter.prefix + loadbalancer_id
partition = self.service_adapter.prefix + tenant_id
va = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address).load(
bigip, va_name, partition)
# get virtual services (listeners)
# referencing this virtual address
vses = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual).get_resources(
bigip, partition)
vs_dest_compare = '/' + partition + '/' + va.name
for vs in vses:
if str(vs.destination).startswith(vs_dest_compare):
if hasattr(vs, 'pool'):
pool = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool).load(
bigip, os.path.basename(vs.pool),
partition)
vs.delete()
pool.delete()
else:
vs.delete()
resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address).delete(
bigip, va_name, partition)
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('loadbalancer %s not on BIG-IP %s.'
% (loadbalancer_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging loadbalancer %s'
% str(exc))
@serialized('purge_orphaned_listener')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_listener(
self, tenant_id=None, listener_id=None, hostnames=[]):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
listener_name = self.service_adapter.prefix + listener_id
partition = self.service_adapter.prefix + tenant_id
listener = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual).load(
bigip, listener_name, partition)
listener.delete()
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('listener %s not on BIG-IP %s.'
% (listener_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging listener %s' % str(exc))
@serialized('create_loadbalancer')
@is_operational
def create_loadbalancer(self, loadbalancer, service):
"""Create virtual server."""
self._common_service_handler(service)
return self._update_target(service)
@serialized('update_loadbalancer')
@is_operational
def update_loadbalancer(self, old_loadbalancer, loadbalancer, service):
"""Update virtual server."""
# anti-pattern three args unused.
self._common_service_handler(service)
return self._update_target(service)
@serialized('delete_loadbalancer')
@is_operational
def delete_loadbalancer(self, loadbalancer, service):
"""Delete loadbalancer."""
LOG.debug("Deleting loadbalancer")
self._common_service_handler(
service,
delete_partition=True,
delete_event=True)
return self._update_target(service)
@serialized('create_listener')
@is_operational
@log_helpers.log_method_call
def create_listener(self, listener, service):
"""Create virtual server."""
LOG.debug("Creating listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
service)
@serialized('update_listener')
@is_operational
def update_listener(self, old_listener, listener, service):
"""Update virtual server."""
LOG.debug("Updating listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
service)
@serialized('delete_listener')
@is_operational
def delete_listener(self, listener, service):
"""Delete virtual server."""
LOG.debug("Deleting listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
service)
@serialized('create_pool')
@is_operational
def create_pool(self, pool, service):
"""Create lb pool."""
LOG.debug("Creating pool")
# pzhang(NOTE): pool may not bound with a listener
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_pool_status,
service["pools"])
@serialized('update_pool')
@is_operational
def update_pool(self, old_pool, pool, service):
"""Update lb pool."""
LOG.debug("Updating pool")
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_pool_status,
service["pools"])
@serialized('delete_pool')
@is_operational
def delete_pool(self, pool, service):
"""Delete lb pool."""
LOG.debug("Deleting pool")
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_pool_status,
service["pools"])
@serialized('create_l7policy')
@is_operational
def create_l7policy(self, l7policy, service):
"""Create lb l7policy."""
LOG.debug("Creating l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7policy_status,
service["l7policies"])
@serialized('update_l7policy')
@is_operational
def update_l7policy(self, old_l7policy, l7policy, service):
"""Update lb l7policy."""
LOG.debug("Updating l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7policy_status,
service["l7policies"])
@serialized('delete_l7policy')
@is_operational
def delete_l7policy(self, l7policy, service):
"""Delete lb l7policy."""
LOG.debug("Deleting l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7policy_status,
service["l7policies"])
# TODO(pzhang): test this
@serialized('create_l7rule')
@is_operational
def create_l7rule(self, l7rule, service):
"""Create lb l7rule."""
LOG.debug("Creating l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7rule_status,
service['l7policy_rules'])
# TODO(pzhang): test this
@serialized('update_l7rule')
@is_operational
def update_l7rule(self, old_l7rule, l7rule, service):
"""Update lb l7rule."""
LOG.debug("Updating l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7rule_status,
service['l7policy_rules'])
# TODO(pzhang): test this
@serialized('delete_l7rule')
@is_operational
def delete_l7rule(self, l7rule, service):
"""Delete lb l7rule."""
LOG.debug("Deleting l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7rule_status,
service['l7policy_rules'])
@serialized('create_member')
@is_operational
def create_member(self, member, service):
"""Create pool member."""
LOG.debug("Creating member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_member_status,
service["members"])
@serialized('update_member')
@is_operational
def update_member(self, old_member, member, service):
"""Update pool member."""
LOG.debug("Updating member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_member_status,
service["members"])
@serialized('delete_member')
@is_operational
def delete_member(self, member, service):
"""Delete pool member."""
LOG.debug("Deleting member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_member_status,
service["members"])
@serialized('create_health_monitor')
@is_operational
def create_health_monitor(self, health_monitor, service):
"""Create pool health monitor."""
LOG.debug("Creating health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_health_monitor_status,
service["healthmonitors"])
@serialized('update_health_monitor')
@is_operational
def update_health_monitor(self, old_health_monitor,
health_monitor, service):
"""Update pool health monitor."""
LOG.debug("Updating health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_health_monitor_status,
service["healthmonitors"])
@serialized('delete_health_monitor')
@is_operational
def delete_health_monitor(self, health_monitor, service):
"""Delete pool health monitor."""
LOG.debug("Deleting health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_health_monitor_status,
service["healthmonitors"])
def _update_target(self, service,
update_method=None, target=None):
if self.do_service_update:
if target is not None and update_method is not None:
update_method(target)
self._update_loadbalancer_status(service, timed_out=False)
loadbalancer = service.get('loadbalancer', {})
lb_provisioning_status = loadbalancer.get("provisioning_status",
f5const.F5_ERROR)
lb_pending = \
(lb_provisioning_status == f5const.F5_PENDING_CREATE or
lb_provisioning_status == f5const.F5_PENDING_UPDATE)
return lb_pending
@is_operational
def get_stats(self, service):
lb_stats = {}
stats = ['clientside.bitsIn',
'clientside.bitsOut',
'clientside.curConns',
'clientside.totConns']
loadbalancer = service['loadbalancer']
try:
# sum virtual server stats for all BIG-IPs
vs_stats = self.lbaas_builder.get_listener_stats(service, stats)
# convert to bytes
lb_stats[f5const.F5_STATS_IN_BYTES] = \
vs_stats['clientside.bitsIn']/8
lb_stats[f5const.F5_STATS_OUT_BYTES] = \
vs_stats['clientside.bitsOut']/8
lb_stats[f5const.F5_STATS_ACTIVE_CONNECTIONS] = \
vs_stats['clientside.curConns']
lb_stats[f5const.F5_STATS_TOTAL_CONNECTIONS] = \
vs_stats['clientside.totConns']
# update Neutron
self.plugin_rpc.update_loadbalancer_stats(
loadbalancer['id'], lb_stats)
except Exception as e:
LOG.error("Error getting loadbalancer stats: %s", e.message)
finally:
return lb_stats
def fdb_add(self, fdb):
# Add (L2toL3) forwarding database entries
for bigip in self.get_all_bigips():
self.network_builder.add_bigip_fdb(bigip, fdb)
def fdb_remove(self, fdb):
# Remove (L2toL3) forwarding database entries
for bigip in self.get_all_bigips():
self.network_builder.remove_bigip_fdb(bigip, fdb)
def fdb_update(self, fdb):
# Update (L2toL3) forwarding database entries
for bigip in self.get_all_bigips():
self.network_builder.update_bigip_fdb(bigip, fdb)
def tunnel_update(self, **kwargs):
# Tunnel Update from Neutron Core RPC
pass
def tunnel_sync(self):
# Only sync when supported types are present
if not [i for i in self.agent_configurations['tunnel_types']
if i in ['gre', 'vxlan']]:
return False
tunnel_ips = []
for bigip in self.get_all_bigips():
if bigip.local_ip:
tunnel_ips.append(bigip.local_ip)
self.network_builder.tunnel_sync(tunnel_ips)
# Tunnel sync sent.
return False
@serialized('sync')
@is_operational
def sync(self, service):
"""Sync service defintion to device."""
# loadbalancer and plugin_rpc may not be set
lb_id = service.get('loadbalancer', dict()).get('id', '')
if hasattr(self, 'plugin_rpc') and self.plugin_rpc and lb_id:
# Get the latest service. It may have changed.
service = self.plugin_rpc.get_service_by_loadbalancer_id(lb_id)
if service.get('loadbalancer', None):
self.lbaas_builder.to_sync = True
self._common_service_handler(service)
self.lbaas_builder.to_sync = False
# pzhang(NOTE): move udpate neutron db out here for the lb tree
if self.do_service_update:
self.update_service_status(service)
loadbalancer = service.get('loadbalancer', {})
lb_provisioning_status = loadbalancer.get("provisioning_status",
f5const.F5_ERROR)
lb_pending = \
(lb_provisioning_status == f5const.F5_PENDING_CREATE or
lb_provisioning_status == f5const.F5_PENDING_UPDATE)
return lb_pending
else:
LOG.debug("Attempted sync of deleted pool")
@serialized('backup_configuration')
@is_operational
def backup_configuration(self):
# Save Configuration on Devices
for bigip in self.get_all_bigips():
LOG.debug('_backup_configuration: saving device %s.'
% bigip.hostname)
self.cluster_manager.save_config(bigip)
def _get_monitor_endpoint(self, bigip, service):
monitor_type = self.service_adapter.get_monitor_type(service)
if not monitor_type:
monitor_type = ""
if monitor_type == "HTTPS":
hm = bigip.tm.ltm.monitor.https_s.https
elif monitor_type == "TCP":
hm = bigip.tm.ltm.monitor.tcps.tcp
elif monitor_type == "PING":
hm = bigip.tm.ltm.monitor.gateway_icmps.gateway_icmp
else:
hm = bigip.tm.ltm.monitor.https.http
return hm
def service_rename_required(self, service):
rename_required = False
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
bigips = self.get_config_bigips()
loadbalancer = service['loadbalancer']
# Does the correctly named virtual address exist?
for bigip in bigips:
virtual_address = VirtualAddress(self.service_adapter,
loadbalancer)
if not virtual_address.exists(bigip):
rename_required = True
break
return rename_required
def service_object_teardown(self, service):
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
bigips = self.get_config_bigips()
loadbalancer = service['loadbalancer']
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
# Change to bigips
for bigip in bigips:
# Delete all virtuals
v = bigip.tm.ltm.virtuals.virtual
for listener in service['listeners']:
l_name = listener.get("name", "")
if not l_name:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
vip = self.service_adapter.get_virtual(svc)
l_name = vip['name']
if v.exists(name=l_name, partition=folder_name):
# Found a virtual that is named by the OS object,
# delete it.
l_obj = v.load(name=l_name, partition=folder_name)
LOG.warn("Deleting listener: /%s/%s" %
(folder_name, l_name))
l_obj.delete(name=l_name, partition=folder_name)
# Delete all pools
p = bigip.tm.ltm.pools.pool
for os_pool in service['pools']:
p_name = os_pool.get('name', "")
if not p_name:
svc = {"loadbalancer": loadbalancer,
"pool": os_pool}
pool = self.service_adapter.get_pool(svc)
p_name = pool['name']
if p.exists(name=p_name, partition=folder_name):
p_obj = p.load(name=p_name, partition=folder_name)
LOG.warn("Deleting pool: /%s/%s" % (folder_name, p_name))
p_obj.delete(name=p_name, partition=folder_name)
# Delete all healthmonitors
for healthmonitor in service['healthmonitors']:
svc = {'loadbalancer': loadbalancer,
'healthmonitor': healthmonitor}
monitor_ep = self._get_monitor_endpoint(bigip, svc)
m_name = healthmonitor.get('name', "")
if not m_name:
hm = self.service_adapter.get_healthmonitor(svc)
m_name = hm['name']
if monitor_ep.exists(name=m_name, partition=folder_name):
m_obj = monitor_ep.load(name=m_name, partition=folder_name)
LOG.warn("Deleting monitor: /%s/%s" % (
folder_name, m_name))
m_obj.delete()
def _service_exists(self, service):
# Returns whether the bigip has the service defined
if not service['loadbalancer']:
return False
loadbalancer = service['loadbalancer']
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
if self.network_builder:
# append route domain to member address
self.network_builder._annotate_service_route_domains(service)
# Foreach bigip in the cluster:
for bigip in self.get_config_bigips():
# Does the tenant folder exist?
if not self.system_helper.folder_exists(bigip, folder_name):
LOG.error("Folder %s does not exists on bigip: %s" %
(folder_name, bigip.hostname))
return False
# Get the virtual address
virtual_address = VirtualAddress(self.service_adapter,
loadbalancer)
if not virtual_address.exists(bigip):
LOG.error("Virtual address %s(%s) does not "
"exists on bigip: %s" % (virtual_address.name,
virtual_address.address,
bigip.hostname))
return False
# Ensure that each virtual service exists.
for listener in service['listeners']:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
virtual_server = self.service_adapter.get_virtual_name(svc)
if not self.vs_manager.exists(bigip,
name=virtual_server['name'],
partition=folder_name):
LOG.error("Virtual /%s/%s not found on bigip: %s" %
(virtual_server['name'], folder_name,
bigip.hostname))
return False
# Ensure that each pool exists.
for pool in service['pools']:
svc = {"loadbalancer": loadbalancer,
"pool": pool}
bigip_pool = self.service_adapter.get_pool(svc)
if not self.pool_manager.exists(
bigip,
name=bigip_pool['name'],
partition=folder_name):
LOG.error("Pool /%s/%s not found on bigip: %s" %
(folder_name, bigip_pool['name'],
bigip.hostname))
return False
else:
deployed_pool = self.pool_manager.load(
bigip,
name=bigip_pool['name'],
partition=folder_name)
deployed_members = \
deployed_pool.members_s.get_collection()
# First check that number of members deployed
# is equal to the number in the service.
if len(deployed_members) != len(pool['members']):
LOG.error("Pool %s members member count mismatch "
"match: deployed %d != service %d" %
(bigip_pool['name'], len(deployed_members),
len(pool['members'])))
return False
# Ensure each pool member exists
for member in service['members']:
if member['pool_id'] == pool['id']:
lb = self.lbaas_builder
pool = lb.get_pool_by_id(
service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
if not lb.pool_builder.member_exists(svc, bigip):
LOG.error("Pool member not found: %s" %
svc['member'])
return False
# Ensure that each health monitor exists.
for healthmonitor in service['healthmonitors']:
svc = {"loadbalancer": loadbalancer,
"healthmonitor": healthmonitor}
monitor = self.service_adapter.get_healthmonitor(svc)
monitor_ep = self._get_monitor_endpoint(bigip, svc)
if not monitor_ep.exists(name=monitor['name'],
partition=folder_name):
LOG.error("Monitor /%s/%s not found on bigip: %s" %
(monitor['name'], folder_name, bigip.hostname))
return False
return True
def get_loadbalancers_in_tenant(self, tenant_id):
loadbalancers = self.plugin_rpc.get_all_loadbalancers()
return [lb['lb_id'] for lb in loadbalancers
if lb['tenant_id'] == tenant_id]
def _common_service_handler(self, service,
delete_partition=False,
delete_event=False):
# Assure that the service is configured on bigip(s)
start_time = time()
lb_pending = True
self.do_service_update = True
if self.conf.trace_service_requests:
self.trace_service_requests(service)
loadbalancer = service.get("loadbalancer", None)
if not loadbalancer:
LOG.error("_common_service_handler: Service loadbalancer is None")
return lb_pending
lb_provisioning_status = loadbalancer.get("provisioning_status",
f5const.F5_ERROR)
try:
try:
self.tenant_manager.assure_tenant_created(service)
except Exception as e:
LOG.error("Tenant folder creation exception: %s",
e.message)
if lb_provisioning_status != f5const.F5_PENDING_DELETE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
raise e
LOG.debug(" _assure_tenant_created took %.5f secs" %
(time() - start_time))
traffic_group = self.service_to_traffic_group(service)
loadbalancer['traffic_group'] = traffic_group
if self.network_builder:
start_time = time()
try:
self.network_builder.prep_service_networking(
service, traffic_group)
except f5ex.NetworkNotReady as error:
LOG.debug("Network creation deferred until network "
"definition is completed: %s",
error.message)
if not delete_event:
self.do_service_update = False
raise error
except Exception as error:
LOG.error("Prep-network exception: icontrol_driver: %s",
error.message)
if lb_provisioning_status != f5const.F5_PENDING_DELETE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
if not delete_event:
raise error
finally:
if time() - start_time > .001:
LOG.debug(" _prep_service_networking "
"took %.5f secs" % (time() - start_time))
all_subnet_hints = {}
for bigip in self.get_config_bigips():
# check_for_delete_subnets:
# keep track of which subnets we should check to delete
# for a deleted vip or member
# do_not_delete_subnets:
# If we add an IP to a subnet we must not delete the subnet
all_subnet_hints[bigip.device_name] = \
{'check_for_delete_subnets': {},
'do_not_delete_subnets': []}
LOG.debug("XXXXXXXXX: Pre assure service")
self.lbaas_builder.assure_service(service,
traffic_group,
all_subnet_hints)
LOG.debug("XXXXXXXXX: Post assure service")
if self.network_builder:
start_time = time()
try:
self.network_builder.post_service_networking(
service, all_subnet_hints)
except Exception as error:
LOG.error("Post-network exception: icontrol_driver: %s",
error.message)
if lb_provisioning_status != f5const.F5_PENDING_DELETE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
raise error
if time() - start_time > .001:
LOG.debug(" _post_service_networking "
"took %.5f secs" % (time() - start_time))
except f5ex.NetworkNotReady as error:
pass
except Exception as err:
LOG.exception(err)
finally:
# only delete partition if loadbalancer is being deleted
if lb_provisioning_status == f5const.F5_PENDING_DELETE:
self.tenant_manager.assure_tenant_cleanup(service,
all_subnet_hints)
def update_service_status(self, service, timed_out=False):
"""Update status of objects in controller."""
LOG.debug("_update_service_status")
if not self.plugin_rpc:
LOG.error("Cannot update status in Neutron without "
"RPC handler.")
return
if 'members' in service:
# Call update_members_status
self._update_member_status(service['members'], timed_out)
if 'healthmonitors' in service:
# Call update_monitor_status
self._update_health_monitor_status(
service['healthmonitors']
)
if 'pools' in service:
# Call update_pool_status
self._update_pool_status(
service['pools']
)
if 'listeners' in service:
# Call update_listener_status
self._update_listener_status(service)
if 'l7policy_rules' in service:
self._update_l7rule_status(service['l7policy_rules'])
if 'l7policies' in service:
self._update_l7policy_status(service['l7policies'])
self._update_loadbalancer_status(service, timed_out)
def _update_member_status(self, members, timed_out=False):
"""Update member status in OpenStack."""
for member in members:
if 'provisioning_status' in member:
provisioning_status = member['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
if timed_out and \
provisioning_status != f5const.F5_ACTIVE:
member['provisioning_status'] = f5const.F5_ERROR
operating_status = f5const.F5_OFFLINE
else:
member['provisioning_status'] = f5const.F5_ACTIVE
operating_status = f5const.F5_ONLINE
self.plugin_rpc.update_member_status(
member['id'],
member['provisioning_status'],
operating_status
)
elif provisioning_status == f5const.F5_PENDING_DELETE:
if not member.get('parent_pool_deleted', False):
self.plugin_rpc.member_destroyed(
member['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_member_status(
member['id'],
f5const.F5_ERROR,
f5const.F5_OFFLINE)
def _update_health_monitor_status(self, health_monitors):
"""Update pool monitor status in OpenStack."""
for health_monitor in health_monitors:
if 'provisioning_status' in health_monitor:
provisioning_status = health_monitor['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
health_monitor['provisioning_status'] = \
f5const.F5_ACTIVE
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.health_monitor_destroyed(
health_monitor['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'])
@log_helpers.log_method_call
def _update_pool_status(self, pools):
"""Update pool status in OpenStack."""
for pool in pools:
if 'provisioning_status' in pool:
provisioning_status = pool['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_pool_status(
pool['id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
pool['provisioning_status'] = f5const.F5_ACTIVE
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.pool_destroyed(
pool['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_pool_status(pool['id'])
@log_helpers.log_method_call
def _update_listener_status(self, service):
"""Update listener status in OpenStack."""
listeners = service['listeners']
for listener in listeners:
if 'provisioning_status' in listener:
provisioning_status = listener['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_listener_status(
listener['id'],
f5const.F5_ACTIVE,
listener['operating_status']
)
listener['provisioning_status'] = \
f5const.F5_ACTIVE
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.listener_destroyed(
listener['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_listener_status(
listener['id'],
provisioning_status,
f5const.F5_OFFLINE)
@log_helpers.log_method_call
def _update_l7rule_status(self, l7rules):
"""Update l7rule status in OpenStack."""
for l7rule in l7rules:
if 'provisioning_status' in l7rule:
provisioning_status = l7rule['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_l7rule_status(
l7rule['id'],
l7rule['policy_id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.l7rule_destroyed(
l7rule['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_l7rule_status(
l7rule['id'], l7rule['policy_id'])
@log_helpers.log_method_call
def _update_l7policy_status(self, l7policies):
LOG.debug("_update_l7policy_status")
"""Update l7policy status in OpenStack."""
for l7policy in l7policies:
if 'provisioning_status' in l7policy:
provisioning_status = l7policy['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_l7policy_status(
l7policy['id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
elif provisioning_status == f5const.F5_PENDING_DELETE:
LOG.debug("calling l7policy_destroyed")
self.plugin_rpc.l7policy_destroyed(
l7policy['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_l7policy_status(l7policy['id'])
@log_helpers.log_method_call
def _update_loadbalancer_status(self, service, timed_out=False):
"""Update loadbalancer status in OpenStack."""
loadbalancer = service.get('loadbalancer', {})
provisioning_status = loadbalancer.get('provisioning_status',
f5const.F5_ERROR)
# if provisioning_status in self.positive_plugin_const_state:
if provisioning_status in self.positive_plugin_const_state:
if timed_out:
operating_status = (f5const.F5_OFFLINE)
if provisioning_status == f5const.F5_PENDING_CREATE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
else:
loadbalancer['provisioning_status'] = \
f5const.F5_ACTIVE
else:
operating_status = (f5const.F5_ONLINE)
loadbalancer['provisioning_status'] = \
f5const.F5_ACTIVE
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
loadbalancer['provisioning_status'],
operating_status)
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.loadbalancer_destroyed(
loadbalancer['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
provisioning_status,
f5const.F5_OFFLINE)
elif provisioning_status == f5const.F5_ACTIVE:
LOG.debug('Loadbalancer provisioning status is active')
else:
LOG.error('Loadbalancer provisioning status is invalid')
@is_operational
def update_operating_status(self, service):
if 'members' in service:
if self.network_builder:
# append route domain to member address
try:
self.network_builder._annotate_service_route_domains(
service)
except f5ex.InvalidNetworkType as exc:
LOG.warning(exc.msg)
return
# get currrent member status
self.lbaas_builder.update_operating_status(service)
# udpate Neutron
for member in service['members']:
if member['provisioning_status'] == f5const.F5_ACTIVE:
operating_status = member.get('operating_status', None)
self.plugin_rpc.update_member_status(
member['id'],
provisioning_status=None,
operating_status=operating_status)
def get_active_bigip(self):
bigips = self.get_all_bigips()
if len(bigips) == 1:
return bigips[0]
for bigip in bigips:
if hasattr(bigip, 'failover_state'):
if bigip.failover_state == 'active':
return bigip
# if can't determine active, default to first one
return bigips[0]
def service_to_traffic_group(self, service):
# Hash service tenant id to index of traffic group
# return which iControlDriver.__traffic_group that tenant is "in?"
return self.tenant_to_traffic_group(
service['loadbalancer']['tenant_id'])
def tenant_to_traffic_group(self, tenant_id):
# Hash tenant id to index of traffic group
hexhash = hashlib.md5(tenant_id).hexdigest()
tg_index = int(hexhash, 16) % len(self.__traffic_groups)
return self.__traffic_groups[tg_index]
# these functions should return only active BIG-IP
# not errored BIG-IPs.
def get_bigip(self):
hostnames = sorted(list(self.__bigips))
for host in hostnames:
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return self.__bigips[host]
def get_bigip_hosts(self):
return_hosts = []
for host in list(self.__bigips):
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return_hosts.append(host)
return sorted(return_hosts)
def get_all_bigips(self):
return_bigips = []
for host in list(self.__bigips):
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return_bigips.append(self.__bigips[host])
return return_bigips
def get_config_bigips(self):
return self.get_all_bigips()
# these are the refactored methods
def get_active_bigips(self):
return self.get_all_bigips()
def get_errored_bigips_hostnames(self):
return_hostnames = []
for host in list(self.__bigips):
bigip = self.__bigips[host]
if hasattr(bigip, 'status') and bigip.status == 'error':
return_hostnames.append(host)
return return_hostnames
def get_inbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_inbound_throughput(
bigip, global_stats=global_statistics)
def get_outbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_outbound_throughput(
bigip, global_stats=global_statistics)
def get_throughput(self, bigip=None, global_statistics=None):
return self.stat_helper.get_throughput(
bigip, global_stats=global_statistics)
def get_active_connections(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_connection_count(
bigip, global_stats=global_statistics)
def get_ssltps(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_SSL_TPS(
bigip, global_stats=global_statistics)
def get_node_count(self, bigip=None, global_statistics=None):
return len(bigip.tm.ltm.nodes.get_collection())
def get_clientssl_profile_count(self, bigip=None, global_statistics=None):
return ssl_profile.SSLProfileHelper.get_client_ssl_profile_count(bigip)
def get_tenant_count(self, bigip=None, global_statistics=None):
return self.system_helper.get_tenant_folder_count(bigip)
def get_tunnel_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_tunnel_count(bigip)
def get_vlan_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_vlan_count(bigip)
def get_route_domain_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_route_domain_count(bigip)
def _init_traffic_groups(self, bigip):
try:
LOG.debug('retrieving traffic groups from %s' % bigip.hostname)
self.__traffic_groups = \
self.cluster_manager.get_traffic_groups(bigip)
if 'traffic-group-local-only' in self.__traffic_groups:
LOG.debug('removing reference to non-floating traffic group')
self.__traffic_groups.remove('traffic-group-local-only')
self.__traffic_groups.sort()
LOG.debug('service placement will done on traffic group(s): %s'
% self.__traffic_groups)
except Exception:
bigip.status = 'error'
bigip.status_message = \
'could not determine traffic groups for service placement'
raise
def _validate_bigip_version(self, bigip, hostname):
# Ensure the BIG-IP has sufficient version
major_version = self.system_helper.get_major_version(bigip)
if major_version < f5const.MIN_TMOS_MAJOR_VERSION:
raise f5ex.MajorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
minor_version = self.system_helper.get_minor_version(bigip)
if minor_version < f5const.MIN_TMOS_MINOR_VERSION:
raise f5ex.MinorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
return major_version, minor_version
def trace_service_requests(self, service):
"""Dump services to a file for debugging."""
with open(self.file_name, 'r+') as fp:
fp.seek(-1, 2)
fp.write(',')
json.dump(service, fp, sort_keys=True, indent=2)
fp.write(']')
def get_config_dir(self):
"""Determine F5 agent configuration directory.
Oslo cfg has a config_dir option, but F5 agent is not currently
started with this option. To be complete, the code will check if
config_dir is defined, and use that value as long as it is a single
string (no idea what to do if it is not a str). If not defined,
get the full dir path of the INI file, which is currently used when
starting F5 agent. If neither option is available,
use /etc/neutron/services/f5.
:return: str defining configuration directory.
"""
if self.conf.config_dir and isinstance(self.conf.config_dir, str):
# use config_dir parameter if defined, and is a string
return self.conf.config_dir
elif self.conf.config_file:
# multiple config files (neutron and agent) are usually defined
if isinstance(self.conf.config_file, list):
# find agent config (f5-openstack-agent.ini)
config_files = self.conf.config_file
for file_name in config_files:
if 'f5-openstack-agent.ini' in file_name:
return os.path.dirname(file_name)
elif isinstance(self.conf.config_file, str):
# not a list, just a single string
return os.path.dirname(self.conf.config_file)
# if all else fails
return '/etc/neutron/services/f5'
|
"""
Predict labels using trained ML models. Use average probability ensemble.
"""
__author__ = 'bshang'
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.externals import joblib
def convert_label_to_array(str_label):
str_label = str_label.split(' ')
return [int(x) for x in str_label if len(x) > 0]
MODEL = 'inception-v3'
LAYER = 'global_pool_output'
NUM_EPOCH = 30
BIZ_FEATURES_PATH = '/data/test_biz_features_{0}_{1}_{2}.h5'.format(MODEL, LAYER, NUM_EPOCH)
df = pd.read_csv(BIZ_FEATURES_PATH, header=0)
cols = ["F" + str(i+1) for i in range(0, 2048)]
X = df[cols].values
model_svc = joblib.load('/data/skmodels/svc_inception-v3.pkl')
model_lrc = joblib.load('/data/skmodels/lrc_inception-v3.pkl')
model_rfc = joblib.load('/data/skmodels/rfc_inception-v3.pkl')
print('predict svc')
y_predict_proba_svc = model_svc.predict_proba(X)
print('predict lrc')
y_predict_proba_lrc = model_lrc.predict_proba(X)
print('predict rfc')
y_predict_proba_rfc = model_rfc.predict_proba(X)
y_predict_proba = np.mean(
np.array([y_predict_proba_svc, y_predict_proba_lrc, y_predict_proba_rfc]), axis=0)
THRESHOLD = 0.46 # estimated from cross-validation
y_predict = preprocessing.binarize(y_predict_proba, threshold=THRESHOLD)
df_biz2lab = pd.read_csv('/data/train.csv').dropna()
y = np.array([convert_label_to_array(y) for y in df_biz2lab['labels']])
mlb = preprocessing.MultiLabelBinarizer()
mlb.fit_transform(y)
y_ = mlb.inverse_transform(y_predict) # y_ contain the numbered labels
y_ = [' '.join(str(x) for x in ls) for ls in y_]
df['labels'] = pd.Series(y_, index=df.index)
df = df.sort_values('business_id')
with open('/data/submission/inception_v3_svc_rfc_lrc_epoch3.csv', 'w') as f:
df[['business_id', 'labels']].to_csv(f, index=False)
|
import re
from models.contact import Contact
def test_all_contacts_on_homepage(app, db):
if app.contact.count() == 0:
app.contact.add(Contact(first_name="Mister", last_name="Muster", mobile_phone="123", email_1="test@test.com"))
contacts_from_homepage = sorted(app.contact.get_contact_list(), key = Contact.contact_id_or_max)
contacts_from_db = sorted(db.get_contact_list(), key = Contact.contact_id_or_max)
for i in range(len(contacts_from_homepage)):
hp_contact=contacts_from_homepage[i]
db_contact=contacts_from_db[i]
assert hp_contact.first_name == db_contact.first_name
assert hp_contact.last_name == db_contact.last_name
assert clear_address(hp_contact.address) == clear_address(db_contact.address)
assert clear_phone(hp_contact.all_phones_homepage) == clear_phone(merge_phones_homepage(db_contact))
assert hp_contact.all_emails_homepage == merge_emails_homepage(db_contact)
print("Successfully verified %s contacts vs Database" % str(len(contacts_from_homepage)))
"""def test_contact_on_homepage(app):
if app.contact.count() == 0:
app.contact.add(Contact(first_name="Mister", last_name="Muster", mobile_phone="123", email_1="test@test.com"))
index = randrange(len(app.contact.get_contact_list()))
contact_from_homepage = app.contact.get_contact_list()[index]
contact_from_editpage = app.contact.get_contact_data_editpage(index)
assert contact_from_homepage.first_name == contact_from_editpage.first_name
assert contact_from_homepage.last_name == contact_from_editpage.last_name
assert contact_from_homepage.address == contact_from_editpage.address
assert contact_from_homepage.all_phones_homepage == merge_phones_homepage(contact_from_editpage)
assert contact_from_homepage.all_emails_homepage == merge_emails_homepage(contact_from_editpage)"""
"""def test_phones_on_viewpage(app):
contact_from_viewpage = app.contact.get_contact_data_viewpage(0)
contact_from_editpage = app.contact.get_contact_data_editpage(0)
assert contact_from_viewpage.home_phone == contact_from_editpage.home_phone
assert contact_from_viewpage.work_phone == contact_from_editpage.work_phone
assert contact_from_viewpage.mobile_phone == contact_from_editpage.mobile_phone
assert contact_from_viewpage.fax == contact_from_editpage.fax"""
def clear(s):
#return "".join(symbol for symbol in s if symbol not in "[]()- 0")
return re.sub("[- ()]", "", s)
def clear_phone(number):
return re.sub("0", "", number)
def clear_address(address):
return re.sub("[\n\r\s+]", "", address)
def merge_phones_homepage(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home_phone, contact.mobile_phone, contact.work_phone]))))
def merge_emails_homepage(contact):
return "\n".join(filter(lambda x: x != "", filter(lambda x: x is not None,
[contact.email_1, contact.email_2, contact.email_3])))
|
"""Shared class to maintain Plex server instances."""
import logging
import ssl
import time
from urllib.parse import urlparse
from plexapi.client import PlexClient
from plexapi.exceptions import BadRequest, NotFound, Unauthorized
import plexapi.myplex
import plexapi.playqueue
import plexapi.server
from requests import Session
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.media_player.const import (
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_VIDEO,
)
from homeassistant.const import CONF_CLIENT_ID, CONF_TOKEN, CONF_URL, CONF_VERIFY_SSL
from homeassistant.core import callback
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_USE_EPISODE_ART,
DEBOUNCE_TIMEOUT,
DEFAULT_VERIFY_SSL,
DOMAIN,
GDM_DEBOUNCER,
GDM_SCANNER,
PLAYER_SOURCE,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
PLEXTV_THROTTLE,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import (
MediaNotFound,
NoServersFound,
ServerNotSpecified,
ShouldUpdateConfigEntry,
)
from .media_search import lookup_movie, lookup_music, lookup_tv
from .models import PlexSession
_LOGGER = logging.getLogger(__name__)
plexapi.X_PLEX_DEVICE_NAME = X_PLEX_DEVICE_NAME
plexapi.X_PLEX_PLATFORM = X_PLEX_PLATFORM
plexapi.X_PLEX_PRODUCT = X_PLEX_PRODUCT
plexapi.X_PLEX_VERSION = X_PLEX_VERSION
class PlexServer:
"""Manages a single Plex server connection."""
def __init__(
self, hass, server_config, known_server_id=None, options=None, entry_id=None
):
"""Initialize a Plex server instance."""
self.hass = hass
self.entry_id = entry_id
self.active_sessions = {}
self._plex_account = None
self._plex_server = None
self._created_clients = set()
self._known_clients = set()
self._known_idle = set()
self._url = server_config.get(CONF_URL)
self._token = server_config.get(CONF_TOKEN)
self._server_name = server_config.get(CONF_SERVER)
self._verify_ssl = server_config.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL)
self._server_id = known_server_id
self.options = options
self.server_choice = None
self._accounts = []
self._owner_username = None
self._plextv_clients = None
self._plextv_client_timestamp = 0
self._client_device_cache = {}
self._use_plex_tv = self._token is not None
self._version = None
self.async_update_platforms = Debouncer(
hass,
_LOGGER,
cooldown=DEBOUNCE_TIMEOUT,
immediate=True,
function=self._async_update_platforms,
).async_call
self.thumbnail_cache = {}
# Header conditionally added as it is not available in config entry v1
if CONF_CLIENT_ID in server_config:
plexapi.X_PLEX_IDENTIFIER = server_config[CONF_CLIENT_ID]
plexapi.myplex.BASE_HEADERS = plexapi.reset_base_headers()
plexapi.server.BASE_HEADERS = plexapi.reset_base_headers()
@property
def account(self):
"""Return a MyPlexAccount instance."""
if not self._plex_account and self._use_plex_tv:
try:
self._plex_account = plexapi.myplex.MyPlexAccount(token=self._token)
except (BadRequest, Unauthorized):
self._use_plex_tv = False
_LOGGER.error("Not authorized to access plex.tv with provided token")
raise
return self._plex_account
def plextv_clients(self):
"""Return available clients linked to Plex account."""
if self.account is None:
return []
now = time.time()
if now - self._plextv_client_timestamp > PLEXTV_THROTTLE:
self._plextv_client_timestamp = now
self._plextv_clients = [
x
for x in self.account.resources()
if "player" in x.provides and x.presence and x.publicAddressMatches
]
_LOGGER.debug(
"Current available clients from plex.tv: %s", self._plextv_clients
)
return self._plextv_clients
def connect(self):
"""Connect to a Plex server directly, obtaining direct URL if necessary."""
config_entry_update_needed = False
def _connect_with_token():
all_servers = [
x for x in self.account.resources() if "server" in x.provides
]
servers = [x for x in all_servers if x.presence] or all_servers
available_servers = [(x.name, x.clientIdentifier) for x in servers]
if not available_servers:
raise NoServersFound
if not self._server_name and len(available_servers) > 1:
raise ServerNotSpecified(available_servers)
self.server_choice = (
self._server_name if self._server_name else available_servers[0][0]
)
self._plex_server = self.account.resource(self.server_choice).connect(
timeout=10
)
def _connect_with_url():
session = None
if self._url.startswith("https") and not self._verify_ssl:
session = Session()
session.verify = False
self._plex_server = plexapi.server.PlexServer(
self._url, self._token, session
)
def _update_plexdirect_hostname():
matching_servers = [
x.name
for x in self.account.resources()
if x.clientIdentifier == self._server_id
]
if matching_servers:
self._plex_server = self.account.resource(matching_servers[0]).connect(
timeout=10
)
return True
_LOGGER.error("Attempt to update plex.direct hostname failed")
return False
if self._url:
try:
_connect_with_url()
except requests.exceptions.SSLError as error:
while error and not isinstance(error, ssl.SSLCertVerificationError):
error = error.__context__
if isinstance(error, ssl.SSLCertVerificationError):
domain = urlparse(self._url).netloc.split(":")[0]
if domain.endswith("plex.direct") and error.args[0].startswith(
f"hostname '{domain}' doesn't match"
):
_LOGGER.warning(
"Plex SSL certificate's hostname changed, updating"
)
if _update_plexdirect_hostname():
config_entry_update_needed = True
else:
raise Unauthorized( # pylint: disable=raise-missing-from
"New certificate cannot be validated with provided token"
)
else:
raise
else:
raise
else:
_connect_with_token()
try:
system_accounts = self._plex_server.systemAccounts()
shared_users = self.account.users() if self.account else []
except Unauthorized:
_LOGGER.warning(
"Plex account has limited permissions, shared account filtering will not be available"
)
else:
self._accounts = []
for user in shared_users:
for shared_server in user.servers:
if shared_server.machineIdentifier == self.machine_identifier:
self._accounts.append(user.title)
_LOGGER.debug("Linked accounts: %s", self.accounts)
owner_account = next(
(account.name for account in system_accounts if account.accountID == 1),
None,
)
if owner_account:
self._owner_username = owner_account
self._accounts.append(owner_account)
_LOGGER.debug("Server owner found: '%s'", self._owner_username)
self._version = self._plex_server.version
if config_entry_update_needed:
raise ShouldUpdateConfigEntry
@callback
def async_refresh_entity(self, machine_identifier, device, session, source):
"""Forward refresh dispatch to media_player."""
unique_id = f"{self.machine_identifier}:{machine_identifier}"
_LOGGER.debug("Refreshing %s", unique_id)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL.format(unique_id),
device,
session,
source,
)
async def async_update_session(self, payload):
"""Process a session payload received from a websocket callback."""
session_payload = payload["PlaySessionStateNotification"][0]
if (state := session_payload["state"]) == "buffering":
return
session_key = int(session_payload["sessionKey"])
offset = int(session_payload["viewOffset"])
rating_key = int(session_payload["ratingKey"])
unique_id, active_session = next(
(
(unique_id, session)
for unique_id, session in self.active_sessions.items()
if session.session_key == session_key
),
(None, None),
)
if not active_session:
await self.async_update_platforms()
return
if state == "stopped":
self.active_sessions.pop(unique_id, None)
else:
active_session.state = state
active_session.media_position = offset
def update_with_new_media():
"""Update an existing session with new media details."""
media = self.fetch_item(rating_key)
active_session.update_media(media)
if active_session.media_content_id != rating_key and state in (
"playing",
"paused",
):
await self.hass.async_add_executor_job(update_with_new_media)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL.format(unique_id),
state,
)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.machine_identifier),
)
def _fetch_platform_data(self):
"""Fetch all data from the Plex server in a single method."""
return (
self._plex_server.clients(),
self._plex_server.sessions(),
self.plextv_clients(),
)
async def _async_update_platforms(self): # noqa: C901
"""Update the platform entities."""
_LOGGER.debug("Updating devices")
await self.hass.data[DOMAIN][GDM_DEBOUNCER]()
available_clients = {}
ignored_clients = set()
new_clients = set()
monitored_users = self.accounts
known_accounts = set(self.option_monitored_users)
if known_accounts:
monitored_users = {
user
for user in self.option_monitored_users
if self.option_monitored_users[user]["enabled"]
}
if not self.option_ignore_new_shared_users:
for new_user in self.accounts - known_accounts:
monitored_users.add(new_user)
try:
devices, sessions, plextv_clients = await self.hass.async_add_executor_job(
self._fetch_platform_data
)
except plexapi.exceptions.Unauthorized:
_LOGGER.debug(
"Token has expired for '%s', reloading integration", self.friendly_name
)
await self.hass.config_entries.async_reload(self.entry_id)
return
except (
plexapi.exceptions.BadRequest,
requests.exceptions.RequestException,
) as ex:
_LOGGER.error(
"Could not connect to Plex server: %s (%s)", self.friendly_name, ex
)
return
def process_device(source, device):
self._known_idle.discard(device.machineIdentifier)
available_clients.setdefault(device.machineIdentifier, {"device": device})
available_clients[device.machineIdentifier].setdefault(
PLAYER_SOURCE, source
)
if (
device.machineIdentifier not in ignored_clients
and self.option_ignore_plexweb_clients
and device.product == "Plex Web"
):
ignored_clients.add(device.machineIdentifier)
if device.machineIdentifier not in self._known_clients:
_LOGGER.debug(
"Ignoring %s %s: %s",
"Plex Web",
source,
device.machineIdentifier,
)
return
if device.machineIdentifier not in (
self._created_clients | ignored_clients | new_clients
):
new_clients.add(device.machineIdentifier)
_LOGGER.debug(
"New %s from %s: %s",
device.product,
source,
device.machineIdentifier,
)
def connect_to_client(source, baseurl, machine_identifier, name="Unknown"):
"""Connect to a Plex client and return a PlexClient instance."""
try:
client = PlexClient(
server=self._plex_server,
baseurl=baseurl,
identifier=machine_identifier,
token=self._plex_server.createToken(),
)
except (NotFound, requests.exceptions.ConnectionError):
_LOGGER.error(
"Direct client connection failed, will try again: %s (%s)",
name,
baseurl,
)
except Unauthorized:
_LOGGER.error(
"Direct client connection unauthorized, ignoring: %s (%s)",
name,
baseurl,
)
self._client_device_cache[machine_identifier] = None
else:
self._client_device_cache[client.machineIdentifier] = client
process_device(source, client)
def connect_to_resource(resource):
"""Connect to a plex.tv resource and return a Plex client."""
try:
client = resource.connect(timeout=3)
_LOGGER.debug("Resource connection successful to plex.tv: %s", client)
except NotFound:
_LOGGER.error(
"Resource connection failed to plex.tv: %s", resource.name
)
else:
client.proxyThroughServer(value=False, server=self._plex_server)
self._client_device_cache[client.machineIdentifier] = client
process_device("plex.tv", client)
def connect_new_clients():
"""Create connections to newly discovered clients."""
for gdm_entry in self.hass.data[DOMAIN][GDM_SCANNER].entries:
machine_identifier = gdm_entry["data"]["Resource-Identifier"]
if machine_identifier in self._client_device_cache:
client = self._client_device_cache[machine_identifier]
if client is not None:
process_device("GDM", client)
elif machine_identifier not in available_clients:
baseurl = (
f"http://{gdm_entry['from'][0]}:{gdm_entry['data']['Port']}"
)
name = gdm_entry["data"]["Name"]
connect_to_client("GDM", baseurl, machine_identifier, name)
for plextv_client in plextv_clients:
if plextv_client.clientIdentifier in self._client_device_cache:
client = self._client_device_cache[plextv_client.clientIdentifier]
if client is not None:
process_device("plex.tv", client)
elif plextv_client.clientIdentifier not in available_clients:
connect_to_resource(plextv_client)
def process_sessions():
live_session_keys = {x.sessionKey for x in sessions}
for unique_id, session in list(self.active_sessions.items()):
if session.session_key not in live_session_keys:
_LOGGER.debug("Purging unknown session: %s", session.session_key)
self.active_sessions.pop(unique_id)
for session in sessions:
if session.TYPE == "photo":
_LOGGER.debug("Photo session detected, skipping: %s", session)
continue
session_username = session.usernames[0]
for player in session.players:
unique_id = f"{self.machine_identifier}:{player.machineIdentifier}"
if unique_id not in self.active_sessions:
_LOGGER.debug("Creating new Plex session: %s", session)
self.active_sessions[unique_id] = PlexSession(self, session)
if session_username and session_username not in monitored_users:
ignored_clients.add(player.machineIdentifier)
_LOGGER.debug(
"Ignoring %s client owned by '%s'",
player.product,
session_username,
)
continue
process_device("session", player)
available_clients[player.machineIdentifier][
"session"
] = self.active_sessions[unique_id]
for device in devices:
process_device("PMS", device)
def sync_tasks():
connect_new_clients()
process_sessions()
await self.hass.async_add_executor_job(sync_tasks)
new_entity_configs = []
for client_id, client_data in available_clients.items():
if client_id in ignored_clients:
continue
if client_id in new_clients:
new_entity_configs.append(client_data)
self._created_clients.add(client_id)
else:
self.async_refresh_entity(
client_id,
client_data["device"],
client_data.get("session"),
client_data.get(PLAYER_SOURCE),
)
self._known_clients.update(new_clients | ignored_clients)
idle_clients = (
self._known_clients - self._known_idle - ignored_clients
).difference(available_clients)
for client_id in idle_clients:
self.async_refresh_entity(client_id, None, None, None)
self._known_idle.add(client_id)
self._client_device_cache.pop(client_id, None)
if new_entity_configs:
async_dispatcher_send(
self.hass,
PLEX_NEW_MP_SIGNAL.format(self.machine_identifier),
new_entity_configs,
)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.machine_identifier),
)
@property
def plex_server(self):
"""Return the plexapi PlexServer instance."""
return self._plex_server
@property
def has_token(self):
"""Return if a token is used to connect to this Plex server."""
return self._token is not None
@property
def accounts(self):
"""Return accounts associated with the Plex server."""
return set(self._accounts)
@property
def owner(self):
"""Return the Plex server owner username."""
return self._owner_username
@property
def version(self):
"""Return the version of the Plex server."""
return self._version
@property
def friendly_name(self):
"""Return name of connected Plex server."""
return self._plex_server.friendlyName
@property
def machine_identifier(self):
"""Return unique identifier of connected Plex server."""
return self._plex_server.machineIdentifier
@property
def url_in_use(self):
"""Return URL used for connected Plex server."""
return self._plex_server._baseurl # pylint: disable=protected-access
@property
def option_ignore_new_shared_users(self):
"""Return ignore_new_shared_users option."""
return self.options[MP_DOMAIN].get(CONF_IGNORE_NEW_SHARED_USERS, False)
@property
def option_use_episode_art(self):
"""Return use_episode_art option."""
return self.options[MP_DOMAIN].get(CONF_USE_EPISODE_ART, False)
@property
def option_monitored_users(self):
"""Return dict of monitored users option."""
return self.options[MP_DOMAIN].get(CONF_MONITORED_USERS, {})
@property
def option_ignore_plexweb_clients(self):
"""Return ignore_plex_web_clients option."""
return self.options[MP_DOMAIN].get(CONF_IGNORE_PLEX_WEB_CLIENTS, False)
@property
def library(self):
"""Return library attribute from server object."""
return self._plex_server.library
def playlist(self, title):
"""Return playlist from server object."""
return self._plex_server.playlist(title)
def playlists(self):
"""Return available playlists from server object."""
return self._plex_server.playlists()
def create_playqueue(self, media, **kwargs):
"""Create playqueue on Plex server."""
return plexapi.playqueue.PlayQueue.create(self._plex_server, media, **kwargs)
def get_playqueue(self, playqueue_id):
"""Retrieve existing playqueue from Plex server."""
return plexapi.playqueue.PlayQueue.get(self._plex_server, playqueue_id)
def fetch_item(self, item):
"""Fetch item from Plex server."""
return self._plex_server.fetchItem(item)
def lookup_media(self, media_type, **kwargs):
"""Lookup a piece of media."""
media_type = media_type.lower()
if isinstance(kwargs.get("plex_key"), int):
key = kwargs["plex_key"]
try:
return self.fetch_item(key)
except NotFound:
_LOGGER.error("Media for key %s not found", key)
return None
if media_type == MEDIA_TYPE_PLAYLIST:
try:
playlist_name = kwargs["playlist_name"]
return self.playlist(playlist_name)
except KeyError:
_LOGGER.error("Must specify 'playlist_name' for this search")
return None
except NotFound:
_LOGGER.error(
"Playlist '%s' not found",
playlist_name,
)
return None
try:
library_name = kwargs.pop("library_name")
library_section = self.library.section(library_name)
except KeyError:
_LOGGER.error("Must specify 'library_name' for this search")
return None
except NotFound:
_LOGGER.error("Library '%s' not found", library_name)
return None
try:
if media_type == MEDIA_TYPE_EPISODE:
return lookup_tv(library_section, **kwargs)
if media_type == MEDIA_TYPE_MOVIE:
return lookup_movie(library_section, **kwargs)
if media_type == MEDIA_TYPE_MUSIC:
return lookup_music(library_section, **kwargs)
if media_type == MEDIA_TYPE_VIDEO:
# Legacy method for compatibility
try:
video_name = kwargs["video_name"]
return library_section.get(video_name)
except KeyError:
_LOGGER.error("Must specify 'video_name' for this search")
return None
except NotFound as err:
raise MediaNotFound(f"Video {video_name}") from err
except MediaNotFound as failed_item:
_LOGGER.error("%s not found in %s", failed_item, library_name)
return None
@property
def sensor_attributes(self):
"""Return active session information for use in activity sensor."""
return {x.sensor_user: x.sensor_title for x in self.active_sessions.values()}
|
"""Dense Prediction Cell class that can be evolved in semantic segmentation.
DensePredictionCell is used as a `layer` in semantic segmentation whose
architecture is determined by the `config`, a dictionary specifying
the architecture.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from third_party.deeplab.core import utils
slim = contrib_slim
_META_ARCHITECTURE_SCOPE = 'meta_architecture'
_CONCAT_PROJECTION_SCOPE = 'concat_projection'
_OP = 'op'
_CONV = 'conv'
_PYRAMID_POOLING = 'pyramid_pooling'
_KERNEL = 'kernel'
_RATE = 'rate'
_GRID_SIZE = 'grid_size'
_TARGET_SIZE = 'target_size'
_INPUT = 'input'
def dense_prediction_cell_hparams():
"""DensePredictionCell HParams.
Returns:
A dictionary of hyper-parameters used for dense prediction cell with keys:
- reduction_size: Integer, the number of output filters for each operation
inside the cell.
- dropout_on_concat_features: Boolean, apply dropout on the concatenated
features or not.
- dropout_on_projection_features: Boolean, apply dropout on the projection
features or not.
- dropout_keep_prob: Float, when `dropout_on_concat_features' or
`dropout_on_projection_features' is True, the `keep_prob` value used
in the dropout operation.
- concat_channels: Integer, the concatenated features will be
channel-reduced to `concat_channels` channels.
- conv_rate_multiplier: Integer, used to multiply the convolution rates.
This is useful in the case when the output_stride is changed from 16
to 8, we need to double the convolution rates correspondingly.
"""
return {
'reduction_size': 256,
'dropout_on_concat_features': True,
'dropout_on_projection_features': False,
'dropout_keep_prob': 0.9,
'concat_channels': 256,
'conv_rate_multiplier': 1,
}
class DensePredictionCell(object):
"""DensePredictionCell class used as a 'layer' in semantic segmentation."""
def __init__(self, config, hparams=None):
"""Initializes the dense prediction cell.
Args:
config: A dictionary storing the architecture of a dense prediction cell.
hparams: A dictionary of hyper-parameters, provided by users. This
dictionary will be used to update the default dictionary returned by
dense_prediction_cell_hparams().
Raises:
ValueError: If `conv_rate_multiplier` has value < 1.
"""
self.hparams = dense_prediction_cell_hparams()
if hparams is not None:
self.hparams.update(hparams)
self.config = config
# Check values in hparams are valid or not.
if self.hparams['conv_rate_multiplier'] < 1:
raise ValueError('conv_rate_multiplier cannot have value < 1.')
def _get_pyramid_pooling_arguments(
self, crop_size, output_stride, image_grid, image_pooling_crop_size=None):
"""Gets arguments for pyramid pooling.
Args:
crop_size: A list of two integers, [crop_height, crop_width] specifying
whole patch crop size.
output_stride: Integer, output stride value for extracted features.
image_grid: A list of two integers, [image_grid_height, image_grid_width],
specifying the grid size of how the pyramid pooling will be performed.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
Returns:
A list of (resize_value, pooled_kernel)
"""
resize_height = utils.scale_dimension(crop_size[0], 1. / output_stride)
resize_width = utils.scale_dimension(crop_size[1], 1. / output_stride)
# If image_pooling_crop_size is not specified, use crop_size.
if image_pooling_crop_size is None:
image_pooling_crop_size = crop_size
pooled_height = utils.scale_dimension(
image_pooling_crop_size[0], 1. / (output_stride * image_grid[0]))
pooled_width = utils.scale_dimension(
image_pooling_crop_size[1], 1. / (output_stride * image_grid[1]))
return ([resize_height, resize_width], [pooled_height, pooled_width])
def _parse_operation(self, config, crop_size, output_stride,
image_pooling_crop_size=None):
"""Parses one operation.
When 'operation' is 'pyramid_pooling', we compute the required
hyper-parameters and save in config.
Args:
config: A dictionary storing required hyper-parameters for one
operation.
crop_size: A list of two integers, [crop_height, crop_width] specifying
whole patch crop size.
output_stride: Integer, output stride value for extracted features.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
Returns:
A dictionary stores the related information for the operation.
"""
if config[_OP] == _PYRAMID_POOLING:
(config[_TARGET_SIZE],
config[_KERNEL]) = self._get_pyramid_pooling_arguments(
crop_size=crop_size,
output_stride=output_stride,
image_grid=config[_GRID_SIZE],
image_pooling_crop_size=image_pooling_crop_size)
return config
def build_cell(self,
features,
output_stride=16,
crop_size=None,
image_pooling_crop_size=None,
weight_decay=0.00004,
reuse=None,
is_training=False,
fine_tune_batch_norm=False,
scope=None):
"""Builds the dense prediction cell based on the config.
Args:
features: Input feature map of size [batch, height, width, channels].
output_stride: Int, output stride at which the features were extracted.
crop_size: A list [crop_height, crop_width], determining the input
features resolution.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
weight_decay: Float, the weight decay for model variables.
reuse: Reuse the model variables or not.
is_training: Boolean, is training or not.
fine_tune_batch_norm: Boolean, fine-tuning batch norm parameters or not.
scope: Optional string, specifying the variable scope.
Returns:
Features after passing through the constructed dense prediction cell with
shape = [batch, height, width, channels] where channels are determined
by `reduction_size` returned by dense_prediction_cell_hparams().
Raises:
ValueError: Use Convolution with kernel size not equal to 1x1 or 3x3 or
the operation is not recognized.
"""
batch_norm_params = {
'is_training': is_training and fine_tune_batch_norm,
'decay': 0.9997,
'epsilon': 1e-5,
'scale': True,
}
hparams = self.hparams
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
padding='SAME',
stride=1,
reuse=reuse):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with tf.variable_scope(scope, _META_ARCHITECTURE_SCOPE, [features]):
depth = hparams['reduction_size']
branch_logits = []
for i, current_config in enumerate(self.config):
scope = 'branch%d' % i
current_config = self._parse_operation(
config=current_config,
crop_size=crop_size,
output_stride=output_stride,
image_pooling_crop_size=image_pooling_crop_size)
tf.logging.info(current_config)
if current_config[_INPUT] < 0:
operation_input = features
else:
operation_input = branch_logits[current_config[_INPUT]]
if current_config[_OP] == _CONV:
if current_config[_KERNEL] == [1, 1] or current_config[
_KERNEL] == 1:
branch_logits.append(
slim.conv2d(operation_input, depth, 1, scope=scope))
else:
conv_rate = [r * hparams['conv_rate_multiplier']
for r in current_config[_RATE]]
branch_logits.append(
utils.split_separable_conv2d(
operation_input,
filters=depth,
kernel_size=current_config[_KERNEL],
rate=conv_rate,
weight_decay=weight_decay,
scope=scope))
elif current_config[_OP] == _PYRAMID_POOLING:
pooled_features = slim.avg_pool2d(
operation_input,
kernel_size=current_config[_KERNEL],
stride=[1, 1],
padding='VALID')
pooled_features = slim.conv2d(
pooled_features,
depth,
1,
scope=scope)
pooled_features = tf.image.resize_bilinear(
pooled_features,
current_config[_TARGET_SIZE],
align_corners=True)
# Set shape for resize_height/resize_width if they are not Tensor.
resize_height = current_config[_TARGET_SIZE][0]
resize_width = current_config[_TARGET_SIZE][1]
if isinstance(resize_height, tf.Tensor):
resize_height = None
if isinstance(resize_width, tf.Tensor):
resize_width = None
pooled_features.set_shape(
[None, resize_height, resize_width, depth])
branch_logits.append(pooled_features)
else:
raise ValueError('Unrecognized operation.')
# Merge branch logits.
concat_logits = tf.concat(branch_logits, 3)
if self.hparams['dropout_on_concat_features']:
concat_logits = slim.dropout(
concat_logits,
keep_prob=self.hparams['dropout_keep_prob'],
is_training=is_training,
scope=_CONCAT_PROJECTION_SCOPE + '_dropout')
concat_logits = slim.conv2d(concat_logits,
self.hparams['concat_channels'],
1,
scope=_CONCAT_PROJECTION_SCOPE)
if self.hparams['dropout_on_projection_features']:
concat_logits = slim.dropout(
concat_logits,
keep_prob=self.hparams['dropout_keep_prob'],
is_training=is_training,
scope=_CONCAT_PROJECTION_SCOPE + '_dropout')
return concat_logits
|
import asyncore, socket, logging, time, asynchat, os
from hdfs_space_common import get_tree_from_cache, get_child_node, TreeNode
FORMAT = '%(asctime)-15s: %(levelname)s %(module)s - %(funcName)s: %(message)s'
logging.basicConfig(format=FORMAT, level=logging.WARNING)
class ChatHandler(asynchat.async_chat):
def __init__(self, sock):
asynchat.async_chat.__init__(self, sock = sock)
self.ibuffer = []
self.obuffer = ''
self.set_terminator("\n")
def collect_incoming_data(self, data):
self.ibuffer.append(data)
logging.info('Received data "%s"' % data)
def found_terminator(self):
self.handle_request()
def handle_request(self):
data = self.ibuffer.pop(0)
#Data should be like:
#metric:path|user|size
# OR
#db:new_path
command = data.split(":")[0]
if command == 'metric':
metric_args = data.split(":")[1].split('|')
hdfs_path = metric_args[0] if len(metric_args) > 0 else "/"
user_name = metric_args[1] if len(metric_args) > 1 else "ALL"
metric = metric_args[2] if len(metric_args) > 2 else "size"
logging.debug('metric_args: %s' % metric_args)
logging.debug('hdfs_path: %s' % hdfs_path)
logging.debug('user_name: %s' % user_name)
logging.debug('metric: %s' % metric)
result = 0
if user_name == "ALL" and metric == 'size':
logging.warning('Rather using this script try command "hdfs dfs -du /"')
elif user_name == "ALL" and metric == 'amount':
logging.info('Calculating the metric')
result = get_child_node(file_tree, hdfs_path).get_amount_for_all()
else:
if metric == "size":
logging.info('Calculating the metric')
result = get_child_node(file_tree, hdfs_path).get_size_by_user(user_name)
elif metric == "amount":
logging.info('Calculating the metric')
result = get_child_node(file_tree, hdfs_path).get_amount_by_user(user_name)
else:
logging.warning("The metric %s not implemented yet" % metric)
logging.info('The result is ready: %s. Pushing it to back' % result)
self.push(str(result))
return
elif command == 'db':
file_path = data.split(":")[1]
if os.path.exists(file_path):
global file_tree
file_tree = get_tree_from_cache(file_path)
os.rename(file_path,MetricServer.db_path)
logging.info('File %s remaned to %s' % (file_path, MetricServer.db_path))
self.push('OK')
else:
logging.warning('File %s could not be found. Doing nothing' % file_path)
self.push('FAIL')
else:
logging.warning("The command %s not implemented yet")
self.push('FAIL')
class MetricServer(asyncore.dispatcher):
sock_path = '/tmp/hdfs_space.sock'
db_path = '/tmp/hdfs_space.data'
def __init__(self):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind(self.sock_path)
logging.info('Starting metric-server')
self.listen(5)
global file_tree
try:
file_tree = get_tree_from_cache(self.db_path)
except KeyError as e:
file_tree = TreeNode('')
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
logging.info('Incoming connection')
handler = ChatHandler(sock)
def handle_close(self):
self.close()
logging.info('The socket is closed')
def handle_expt(self):
logging.info("OOB detected for %s" % self)
if __name__ == '__main__':
file_tree = None
server = MetricServer()
try:
asyncore.loop()
finally:
if os.path.exists(server.sock_path):
os.unlink(server.sock_path)
|
from nltk.tokenize import sent_tokenize,word_tokenize
from nltk.corpus import stopwords
from collections import defaultdict
from string import punctuation
from heapq import nlargest
import re
"""
Modified from http://glowingpython.blogspot.co.uk/2014/09/text-summarization-with-nltk.html
"""
class FrequencySummarizer:
def __init__(self, low_thresh=0.1, high_thresh=0.9):
"""
Initialize the text summarizer.
Words that have a frequency term lower than low_thresh
or higer than high_thresh will be ignored.
"""
ignore = ['fig','figure','ibid', 'et al','cf','NB','N.B.']
self._low_thresh = low_thresh
self._high_thresh = high_thresh
self._stopwords = set(stopwords.words('english') + list(punctuation) + list(ignore))
def _compute_frequencies(self, word_tk):
freq = defaultdict(int)
for s in word_tk:
for word in s:
if word not in self._stopwords:
freq[word] += 1
# frequencies normalization and fitering
m = float(max(freq.values()))
for w in freq.keys():
freq[w] = freq[w]/m
if freq[w] >= self._high_thresh or freq[w] <= self._low_thresh:
del freq[w]
return freq
def summarize(self, text, n):
"""
Return a list of n sentences
which represent the summary of text.
"""
text = "".join([unicode(x) for x in text])
sents = sent_tokenize(text)
if n > len(sents):
n = len(sents)
word_tk = [word_tokenize(s.lower()) for s in sents]
self._freq = self._compute_frequencies(word_tk)
ranking = defaultdict(int)
for i,sent in enumerate(word_tk):
for w in sent:
if w in self._freq and len(w)>4: #Only count words of length>4 as significant
ranking[i] += self._freq[w]
sentsindx = self._rank(ranking, n)
return [sents[j].encode('ascii', errors='backslashreplace') for j in sentsindx]
def _rank(self, ranking, n):
""" return the first n sentences with highest ranking """
return nlargest(n, ranking, key=ranking.get)
|
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
__version__ = 'trunk'
import struct
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def IPAddress(address, version=None):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, 4 or 6. If set, don't try to automatically
determine what the IP address type is. important for things
like IPAddress(1), which could be IPv4, '0.0.0.0.1', or IPv6,
'::1'.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
"""
if version:
if version == 4:
return IPv4Address(address)
elif version == 6:
return IPv6Address(address)
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def IPNetwork(address, version=None, strict=False):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, if set, don't try to automatically
determine what the IP address type is. important for things
like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
'::1/128'.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if a strict network was requested and a strict
network wasn't given.
"""
if version:
if version == 4:
return IPv4Network(address, strict)
elif version == 6:
return IPv6Network(address, strict)
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def _find_address_range(addresses):
"""Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence.
"""
first = last = addresses[0]
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
else:
break
return (first, last)
def _get_prefix_length(number1, number2, bits):
"""Get the number of leading bits that are same for two numbers.
Args:
number1: an integer.
number2: another integer.
bits: the maximum number of bits to compare.
Returns:
The number of leading bits that are the same for two numbers.
"""
for i in range(bits):
if number1 >> i == number2 >> i:
return bits - i
return 0
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
for i in range(bits):
if (number >> i) % 2:
return i
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> summarize_address_range(IPv4Address('1.1.1.0'),
IPv4Address('1.1.1.130'))
[IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
IPv4Network('1.1.1.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
The address range collapsed to a list of IPv4Network's or
IPv6Network's.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version is not 4 or 6.
"""
if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
str(self), str(other)))
if first > last:
raise ValueError('last IP address must be greater than first')
networks = []
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = _count_righthand_zero_bits(first_int, ip_bits)
current = None
while nbits >= 0:
addend = 2**nbits - 1
current = first_int + addend
nbits -= 1
if current <= last_int:
break
prefix = _get_prefix_length(first_int, current, ip_bits)
net = ip('%s/%d' % (str(first), prefix))
networks.append(net)
if current == ip._ALL_ONES:
break
first_int = current + 1
first = IPAddress(first_int, version=first._version)
return networks
def _collapse_address_list_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network'1.1.0.0/24')
ip2 = IPv4Network'1.1.1.0/24')
ip3 = IPv4Network'1.1.2.0/24')
ip4 = IPv4Network'1.1.3.0/24')
ip5 = IPv4Network'1.1.4.0/24')
ip6 = IPv4Network'1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if cur_addr in ret_array[-1]:
optimized = True
elif cur_addr == ret_array[-1].supernet().subnet()[1]:
ret_array.append(ret_array.pop().supernet())
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return _collapse_address_list_recursive(ret_array)
return ret_array
def collapse_address_list(addresses):
"""Collapse a list of IP objects.
Example:
collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
[IPv4('1.1.0.0/23')]
Args:
addresses: A list of IPv4Network or IPv6Network objects.
Returns:
A list of IPv4Network or IPv6Network objects depending on what we
were passed.
Raises:
TypeError: If passed a list of mixed version objects.
"""
i = 0
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseIP):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip.ip)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
nets = sorted(set(nets))
while i < len(ips):
(first, last) = _find_address_range(ips[i:])
i = ips.index(last) + 1
addrs.extend(summarize_address_range(first, last))
return _collapse_address_list_recursive(sorted(
addrs + nets, key=_BaseNet._get_networks_key))
CollapseAddrList = collapse_address_list
try:
_compat_has_real_bytes = bytes is not str
except NameError: # <Python2.6
_compat_has_real_bytes = False
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('1.1.1.1') <= IPv4Network('1.1.1.1/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddr sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNet):
return obj._get_networks_key()
elif isinstance(obj, _BaseIP):
return obj._get_address_key()
return NotImplemented
class _IPAddrBase(object):
"""The mother class."""
def __index__(self):
return self._ip
def __int__(self):
return self._ip
def __hex__(self):
return hex(self._ip)
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return str(self)
class _BaseIP(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
def __init__(self, address):
if '/' in str(address):
raise AddressValueError(address)
def __eq__(self, other):
try:
return (self._ip == other._ip
and self._version == other._version)
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip < other._ip
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip > other._ip
return False
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __str__(self):
return '%s' % self._string_from_ip_int(self._ip)
def __hash__(self):
return hash(hex(self._ip))
def _get_address_key(self):
return (self._version, self)
@property
def version(self):
raise NotImplementedError('BaseIP has no version')
class _BaseNet(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def iterhosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
cur = int(self.network) + 1
bcast = int(self.broadcast) - 1
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __iter__(self):
cur = int(self.network)
bcast = int(self.broadcast)
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __getitem__(self, n):
network = int(self.network)
broadcast = int(self.broadcast)
if n >= 0:
if network + n > broadcast:
raise IndexError
return IPAddress(network + n, version=self._version)
else:
n += 1
if broadcast + n < network:
raise IndexError
return IPAddress(broadcast + n, version=self._version)
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network < other.network
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network > other.network
if self.netmask != other.netmask:
return self.netmask > other.netmask
return False
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __eq__(self, other):
try:
return (self._version == other._version
and self.network == other.network
and int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __str__(self):
return '%s/%s' % (str(self.ip),
str(self._prefixlen))
def __hash__(self):
return hash(int(self.network) ^ int(self.netmask))
def __contains__(self, other):
# dealing with another network.
if isinstance(other, _BaseNet):
return (int(self.network) <= int(other._ip) and
int(self.broadcast) >= int(other.broadcast))
# dealing with another address
else:
return (int(self.network) <= int(other._ip) <=
int(self.broadcast))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network in other or self.broadcast in other or (
other.network in self or other.broadcast in self)
@property
def network(self):
x = self._cache.get('network')
if x is None:
x = IPAddress(self._ip & int(self.netmask), version=self._version)
self._cache['network'] = x
return x
@property
def broadcast(self):
x = self._cache.get('broadcast')
if x is None:
x = IPAddress(self._ip | int(self.hostmask), version=self._version)
self._cache['broadcast'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = IPAddress(int(self.netmask) ^ self._ALL_ONES,
version=self._version)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (str(self.ip), self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (str(self.ip), str(self.netmask))
@property
def with_hostmask(self):
return '%s/%s' % (str(self.ip), str(self.hostmask))
@property
def numhosts(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast) - int(self.network) + 1
@property
def version(self):
raise NotImplementedError('BaseNet has no version')
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = IP('10.1.1.0/24')
addr2 = IP('10.1.1.0/26')
addr1.address_exclude(addr2) =
[IP('10.1.1.64/26'), IP('10.1.1.128/25')]
or IPv6:
addr1 = IP('::1/32')
addr2 = IP('::1/128')
addr1.address_exclude(addr2) = [IP('::0/128'),
IP('::2/127'),
IP('::4/126'),
IP('::8/125'),
...
IP('0:0:8000::/33')]
Args:
other: An IP object of the same type.
Returns:
A sorted list of IP objects addresses which is self minus
other.
Raises:
TypeError: If self and other are of difffering address
versions.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
str(self), str(other)))
if other not in self:
raise ValueError('%s not contained in %s' % (str(other),
str(self)))
ret_addrs = []
# Make sure we're comparing the network of other.
other = IPNetwork('%s/%s' % (str(other.network), str(other.prefixlen)),
version=other._version)
s1, s2 = self.subnet()
while s1 != other and s2 != other:
if other in s1:
ret_addrs.append(s2)
s1, s2 = s1.subnet()
elif other in s2:
ret_addrs.append(s1)
s1, s2 = s2.subnet()
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
if s1 == other:
ret_addrs.append(s2)
elif s2 == other:
ret_addrs.append(s1)
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
return sorted(ret_addrs, key=_BaseNet._get_networks_key)
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
0 if self == other
eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
1 if self > other
eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
IPv6('1080::1:200C:417A/112') >
IPv6('1080::0:200C:417A/112')
If the IP versions of self and other are different, returns:
-1 if self._version < other._version
eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
1 if self._version > other._version
eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
"""
if self._version < other._version:
return -1
if self._version > other._version:
return 1
# self._version == other._version below here:
if self.network < other.network:
return -1
if self.network > other.network:
return 1
# self.network == other.network below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
# self.network == other.network and self.netmask == other.netmask
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network, self.netmask)
def _ip_int_from_prefix(self, prefixlen=None):
"""Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
if not prefixlen and prefixlen != 0:
prefixlen = self._prefixlen
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
def _prefix_from_ip_int(self, ip_int, mask=32):
"""Return prefix length from the decimal netmask.
Args:
ip_int: An integer, the IP address.
mask: The netmask. Defaults to 32.
Returns:
An integer, the prefix length.
"""
while mask:
if ip_int & 1 == 1:
break
ip_int >>= 1
mask -= 1
return mask
def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
def iter_subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), return a list with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if not self._is_valid_netmask(str(new_prefixlen)):
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, str(self)))
first = IPNetwork('%s/%s' % (str(self.network),
str(self._prefixlen + prefixlen_diff)),
version=self._version)
yield first
current = first
while True:
broadcast = current.broadcast
if broadcast == self.broadcast:
return
new_addr = IPAddress(int(broadcast) + 1, version=self._version)
current = IPNetwork('%s/%s' % (str(new_addr), str(new_prefixlen)),
version=self._version)
yield current
def subnet(self, prefixlen_diff=1, new_prefix=None):
"""Return a list of subnets, rather than an interator."""
return list(self.iter_subnets(prefixlen_diff, new_prefix))
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a
negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
if self.prefixlen - prefixlen_diff < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return IPNetwork('%s/%s' % (str(self.network),
str(self.prefixlen - prefixlen_diff)),
version=self._version)
# backwards compatibility
Subnet = subnet
Supernet = supernet
AddressExclude = address_exclude
CompareNetworks = compare_networks
Contains = __contains__
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2**32) - 1
def __init__(self, address):
self._version = 4
self._max_prefixlen = 32
def _explode_shorthand_ip_string(self, ip_str=None):
if not ip_str:
ip_str = str(self)
return ip_str
def _ip_int_from_string(self, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if the string isn't a valid IP string.
"""
packed_ip = 0
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError(ip_str)
for oc in octets:
try:
packed_ip = (packed_ip << 8) | int(oc)
except ValueError:
raise AddressValueError(ip_str)
return packed_ip
def _string_from_ip_int(self, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
octets = []
for _ in xrange(4):
octets.insert(0, str(ip_int & 0xFF))
ip_int >>= 8
return '.'.join(octets)
def _is_valid_ip(self, address):
"""Validate the dotted decimal notation IP/netmask string.
Args:
address: A string, either representing a quad-dotted ip
or an integer which is a valid IPv4 IP address.
Returns:
A boolean, True if the string is a valid dotted decimal IP
string.
"""
octets = address.split('.')
if len(octets) == 1:
# We have an integer rather than a dotted decimal IP.
try:
return int(address) >= 0 and int(address) <= self._ALL_ONES
except ValueError:
return False
if len(octets) != 4:
return False
for octet in octets:
try:
if not 0 <= int(octet) <= 255:
return False
except ValueError:
return False
return True
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return struct.pack('!I', self._ip)
@property
def version(self):
return self._version
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in IPv4Network('240.0.0.0/4')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 1918.
"""
return (self in IPv4Network('10.0.0.0/8') or
self in IPv4Network('172.16.0.0/12') or
self in IPv4Network('192.168.0.0/16'))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in IPv4Network('224.0.0.0/4')
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self in IPv4Network('0.0.0.0')
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in IPv4Network('127.0.0.0/8')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in IPv4Network('169.254.0.0/16')
class IPv4Address(_BaseV4, _BaseIP):
"""Represent and manipulate single IPv4 Addresses."""
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
'192.168.1.1'
Additionally, an integer can be passed, so
IPv4Address('192.168.1.1') == IPv4Address(3232235777).
or, more generally
IPv4Address(int(IPv4Address('192.168.1.1'))) ==
IPv4Address('192.168.1.1')
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
"""
_BaseIP.__init__(self, address)
_BaseV4.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 4:
self._ip = struct.unpack('!I', address)[0]
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
if not self._is_valid_ip(addr_str):
raise AddressValueError(addr_str)
self._ip = self._ip_int_from_string(addr_str)
class IPv4Network(_BaseV4, _BaseNet):
"""This class represents and manipulates 32-bit IPv4 networks.
Attributes: [examples for IPv4Network('1.2.3.4/27')]
._ip: 16909060
.ip: IPv4Address('1.2.3.4')
.network: IPv4Address('1.2.3.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast: IPv4Address('1.2.3.31')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = set((255, 254, 252, 248, 240, 224, 192, 128, 0))
def __init__(self, address, strict=False):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.168.1.1/24'
'192.168.1.1/255.255.255.0'
'192.168.1.1/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.168.1.1'
'192.168.1.1/255.255.255.255'
'192.168.1.1/32'
are also functionaly equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.168.1.1') == IPv4Network(3232235777).
or, more generally
IPv4Network(int(IPv4Network('192.168.1.1'))) ==
IPv4Network('192.168.1.1')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV4.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
self.ip = IPv4Address(self._ip)
self._prefixlen = 32
self.netmask = IPv4Address(self._ALL_ONES)
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 4:
self._ip = struct.unpack('!I', address)[0]
self.ip = IPv4Address(self._ip)
self._prefixlen = 32
self.netmask = IPv4Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
if not self._is_valid_ip(addr[0]):
raise AddressValueError(addr[0])
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv4Address(self._ip)
if len(addr) == 2:
mask = addr[1].split('.')
if len(mask) == 4:
# We have dotted decimal netmask.
if self._is_valid_netmask(addr[1]):
self.netmask = IPv4Address(self._ip_int_from_string(
addr[1]))
elif self._is_hostmask(addr[1]):
self.netmask = IPv4Address(
self._ip_int_from_string(addr[1]) ^ self._ALL_ONES)
else:
raise NetmaskValueError('%s is not a valid netmask'
% addr[1])
self._prefixlen = self._prefix_from_ip_int(int(self.netmask))
else:
# We have a netmask in prefix length form.
if not self._is_valid_netmask(addr[1]):
raise NetmaskValueError(addr[1])
self._prefixlen = int(addr[1])
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
else:
self._prefixlen = 32
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [int(x) for x in bits if int(x) in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _is_valid_netmask(self, netmask):
"""Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
"""
mask = netmask.split('.')
if len(mask) == 4:
if [x for x in mask if int(x) not in self._valid_mask_octets]:
return False
if [x for idx, y in enumerate(mask) if idx > 0 and
y > mask[idx - 1]]:
return False
return True
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= 32
# backwards compatibility
IsRFC1918 = lambda self: self.is_private
IsMulticast = lambda self: self.is_multicast
IsLoopback = lambda self: self.is_loopback
IsLinkLocal = lambda self: self.is_link_local
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
_ALL_ONES = (2**128) - 1
def __init__(self, address):
self._version = 6
self._max_prefixlen = 128
def _ip_int_from_string(self, ip_str=None):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
A long, the IPv6 ip_str.
Raises:
AddressValueError: if ip_str isn't a valid IP Address.
"""
if not ip_str:
ip_str = str(self.ip)
ip_int = 0
fields = self._explode_shorthand_ip_string(ip_str).split(':')
# Do we have an IPv4 mapped (::ffff:a.b.c.d) or compact (::a.b.c.d)
# ip_str?
if fields[-1].count('.') == 3:
ipv4_string = fields.pop()
ipv4_int = IPv4Network(ipv4_string)._ip
octets = []
for _ in xrange(2):
octets.append(hex(ipv4_int & 0xFFFF).lstrip('0x').rstrip('L'))
ipv4_int >>= 16
fields.extend(reversed(octets))
for field in fields:
try:
ip_int = (ip_int << 16) + int(field or '0', 16)
except ValueError:
raise AddressValueError(ip_str)
return ip_int
def _compress_hextets(self, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index in range(len(hextets)):
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
def _string_from_ip_int(self, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if not ip_int and ip_int != 0:
ip_int = int(self._ip)
if ip_int > self._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = []
for x in range(0, 32, 4):
hextets.append('%x' % int(hex_str[x:x+4], 16))
hextets = self._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self, ip_str=None):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not ip_str:
ip_str = str(self)
if isinstance(self, _BaseNet):
ip_str = str(self.ip)
if self._is_shorthand_ip(ip_str):
new_ip = []
hextet = ip_str.split('::')
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in xrange(8 - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
# We've already got a longhand ip_str.
return ip_str
def _is_valid_ip(self, ip_str):
"""Ensure we have a valid IPv6 address.
Probably not as exhaustive as it should be.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = self._explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
IPv4Network(hextet)
except AddressValueError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _is_shorthand_ip(self, ip_str=None):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
return False
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return struct.pack('!QQ', self._ip >> 64, self._ip & (2**64 - 1))
@property
def version(self):
return self._version
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in IPv6Network('ff00::/8')
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self in IPv6Network('::/8') or
self in IPv6Network('100::/8') or
self in IPv6Network('200::/7') or
self in IPv6Network('400::/6') or
self in IPv6Network('800::/5') or
self in IPv6Network('1000::/4') or
self in IPv6Network('4000::/3') or
self in IPv6Network('6000::/3') or
self in IPv6Network('8000::/3') or
self in IPv6Network('A000::/3') or
self in IPv6Network('C000::/3') or
self in IPv6Network('E000::/4') or
self in IPv6Network('F000::/5') or
self in IPv6Network('F800::/6') or
self in IPv6Network('FE00::/9'))
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self == IPv6Network('::')
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self == IPv6Network('::1')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in IPv6Network('fe80::/10')
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in IPv6Network('fec0::/10')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
return self in IPv6Network('fc00::/7')
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
hextets = self._explode_shorthand_ip_string().split(':')
if hextets[-3] != 'ffff':
return None
try:
return IPv4Address(int('%s%s' % (hextets[-2], hextets[-1]), 16))
except IPv4IpvalidationError:
return None
class IPv6Address(_BaseV6, _BaseIP):
"""Represent and manipulate single IPv6 Addresses.
"""
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:4860::') ==
IPv6Address(42541956101370907050197289607612071936L).
or, more generally
IPv6Address(IPv6Address('2001:4860::')._ip) ==
IPv6Address('2001:4860::')
Raises:
IPv6IpValidationError: If address isn't a valid IPv6 address.
"""
_BaseIP.__init__(self, address)
_BaseV6.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 16:
tmp = struct.unpack('!QQ', address)
self._ip = (tmp[0] << 64) | tmp[1]
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
if not addr_str:
raise AddressValueError('')
self._ip = self._ip_int_from_string(addr_str)
class IPv6Network(_BaseV6, _BaseNet):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:658:22A:CAFE:200::1/64')]
.ip: IPv6Address('2001:658:22a:cafe:200::1')
.network: IPv6Address('2001:658:22a:cafe::')
.hostmask: IPv6Address('::ffff:ffff:ffff:ffff')
.broadcast: IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')
.netmask: IPv6Address('ffff:ffff:ffff:ffff::')
.prefixlen: 64
"""
def __init__(self, address, strict=False):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the IP
and prefix/netmask.
'2001:4860::/128'
'2001:4860:0000:0000:0000:0000:0000:0000/128'
'2001:4860::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:4860::') ==
IPv6Network(42541956101370907050197289607612071936L).
or, more generally
IPv6Network(IPv6Network('2001:4860::')._ip) ==
IPv6Network('2001:4860::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV6.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
self.ip = IPv6Address(self._ip)
self._prefixlen = 128
self.netmask = IPv6Address(self._ALL_ONES)
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 16:
tmp = struct.unpack('!QQ', address)
self._ip = (tmp[0] << 64) | tmp[1]
self.ip = IPv6Address(self._ip)
self._prefixlen = 128
self.netmask = IPv6Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
if not self._is_valid_ip(addr[0]):
raise AddressValueError(addr[0])
if len(addr) == 2:
if self._is_valid_netmask(addr[1]):
self._prefixlen = int(addr[1])
else:
raise NetmaskValueError(addr[1])
else:
self._prefixlen = 128
self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv6Address(self._ip)
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
def _is_valid_netmask(self, prefixlen):
"""Verify that the netmask/prefixlen is valid.
Args:
prefixlen: A string, the netmask in prefix length format.
Returns:
A boolean, True if the prefix represents a valid IPv6
netmask.
"""
try:
prefixlen = int(prefixlen)
except ValueError:
return False
return 0 <= prefixlen <= 128
|
directory = ''
text_file = directory + 'billtext_org.json'
labels_file = directory + 'train.json'
output_dir = '/Users/katya/datasets/congress_bills_2/'
import sys
def skip_ahead_n_quotes(line, char_counter, maximum):
quote_counter = 0
while quote_counter < maximum:
if line[char_counter:char_counter+1] == '\"':
quote_counter += 1
char_counter += 1
# print 'to',line[char_counter:char_counter+10]
return char_counter
def parse_inside_char(line, char_counter, char):
string = ''
while line[char_counter] != char:
string += line[char_counter]
char_counter += 1
return string, char_counter
def rm_newlines(string):
# string.replace('\\\n', ' ')
string = string.replace('\\' + 'n', ' ')
for i in range(1,10):
string = string.replace(' ', ' ')
return string
def tokenize(line):
list_of_words = []
word = ''
for char in line:
if char == ' ':
list_of_words.append(word)
word = ''
else:
word += char
list_of_words.append(word.strip())
return tuple(list_of_words)
d = {}
for line in open(text_file):
if "\"\"" in line:
d[name] = ''
else:
# d = json.load(json_data)
# print d
char_counter = 0
# print "success"
name, char_counter = parse_inside_char(line, char_counter, '\t')
# print 'parse'
if '\"body\"' in line:
char_counter = skip_ahead_n_quotes(line, char_counter, 2)
# print 'skip ahead'
char_counter += 3
body, char_counter = parse_inside_char(line, char_counter, '\"')
# print 'parsed'
else:
body = ''
char_counter = skip_ahead_n_quotes(line, char_counter, 3)
char_counter += 3
# print 'skip 2'
title, char_counter = parse_inside_char(line, char_counter, '\"')
# print 'parsed2'
d[name] = rm_newlines(title) + ' ' + rm_newlines(body)
print 'quit'
with open(labels_file, 'r') as labels, open(output_dir + 'train.data', 'w') as data_out, open(output_dir + 'train.labels', 'w') as labels_out:
for line in labels:
line = line.replace('\t', ' ')
example_name, label = tokenize(line)
try:
data_out.write(d[example_name] + '\n')
except KeyError:
print example_name
else:
labels_out.write(label + '\n')
sys.stdout.flush()
print 'done'
|
"""Create a Python package of the Linux guest environment."""
import glob
import sys
import setuptools
install_requires = ['setuptools']
if sys.version_info < (3, 0):
install_requires += ['boto']
if sys.version_info >= (3, 7):
install_requires += ['distro']
setuptools.setup(
author='Google Compute Engine Team',
author_email='gc-team@google.com',
description='Google Compute Engine',
include_package_data=True,
install_requires=install_requires,
license='Apache Software License',
long_description='Google Compute Engine guest environment.',
name='google-compute-engine',
packages=setuptools.find_packages(),
url='https://github.com/GoogleCloudPlatform/compute-image-packages',
version='20191112.0',
# Entry points create scripts in /usr/bin that call a function.
entry_points={
'console_scripts': [
'google_accounts_daemon=google_compute_engine.accounts.accounts_daemon:main',
'google_clock_skew_daemon=google_compute_engine.clock_skew.clock_skew_daemon:main',
'google_instance_setup=google_compute_engine.instance_setup.instance_setup:main',
'google_network_daemon=google_compute_engine.networking.network_daemon:main',
'google_metadata_script_runner=google_compute_engine.metadata_scripts.script_manager:main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
],
)
|
from webapp.web import Application
from handlers.index import IndexHandler
from handlers.register import RegisterHandler
from handlers.user import UserHandler
from handlers.signin import SigninHandler
from handlers.signout import SignoutHandler
from handlers.upload import UploadHandler
from handlers.avatar import AvatarHandler
from handlers.error import ErrorHandler
from handlers.password import PasswordHandler
from handlers.ftypeerror import FiletypeErrorHandler
URLS = (
("/", "IndexHandler"),
("/register?", "RegisterHandler"),
("/user", "UserHandler"),
("/signin", "SigninHandler"),
("/signout", "SignoutHandler"),
("/upload", "UploadHandler"),
("/avatar/(.*)", "AvatarHandler"),
("/error", "ErrorHandler"),
("/pwdchange", "PasswordHandler"),
("/ftypeerror", "FiletypeErrorHandler")
)
if __name__ == '__main__':
app = Application(globals(), URLS)
app.run()
|
from __future__ import print_function
import argparse
from os import path, environ
from subprocess import check_output, CalledProcessError
from sys import stderr
parser = argparse.ArgumentParser()
parser.add_argument('--repository', help='maven repository id')
parser.add_argument('--url', help='maven repository url')
parser.add_argument('-o')
parser.add_argument('-a', help='action (valid actions are: install,deploy)')
parser.add_argument('-v', help='gerrit version')
parser.add_argument('-s', action='append', help='triplet of artifactId:type:path')
args = parser.parse_args()
if not args.v:
print('version is empty', file=stderr)
exit(1)
root = path.abspath(__file__)
while not path.exists(path.join(root, 'WORKSPACE')):
root = path.dirname(root)
if 'install' == args.a:
cmd = [
'mvn',
'install:install-file',
'-Dversion=%s' % args.v,
]
elif 'deploy' == args.a:
cmd = [
'mvn',
'gpg:sign-and-deploy-file',
'-Dversion=%s' % args.v,
'-DrepositoryId=%s' % args.repository,
'-Durl=%s' % args.url,
]
else:
print("unknown action -a %s" % args.a, file=stderr)
exit(1)
for spec in args.s:
artifact, packaging_type, src = spec.split(':')
exe = cmd + [
'-DpomFile=%s' % path.join(root, 'tools', 'maven',
'%s_pom.xml' % artifact),
'-Dpackaging=%s' % packaging_type,
'-Dfile=%s' % src,
]
try:
if environ.get('VERBOSE'):
print(' '.join(exe), file=stderr)
check_output(exe)
except Exception as e:
print('%s command failed: %s\n%s' % (args.a, ' '.join(exe), e),
file=stderr)
if environ.get('VERBOSE') and isinstance(e, CalledProcessError):
print('Command output\n%s' % e.output, file=stderr)
exit(1)
out = stderr
if args.o:
out = open(args.o, 'w')
with out as fd:
if args.repository:
print('Repository: %s' % args.repository, file=fd)
if args.url:
print('URL: %s' % args.url, file=fd)
print('Version: %s' % args.v, file=fd)
|
import datetime
import faulthandler
import logging
import os
import signal
import sys
import threading
import traceback
from contextlib import contextmanager
from typing import Callable, Iterator, Optional
import setproctitle
from pants.base.exiter import Exiter
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.osutil import Pid
logger = logging.getLogger(__name__)
class SignalHandler:
"""A specification for how to handle a fixed set of nonfatal signals.
This is subclassed and registered with ExceptionSink.reset_signal_handler() whenever the signal
handling behavior is modified for different pants processes, for example in the remote client when
pantsd is enabled. The default behavior is to exit "gracefully" by leaving a detailed log of which
signal was received, then exiting with failure.
Note that the terminal will convert a ctrl-c from the user into a SIGINT.
"""
@property
def signal_handler_mapping(self):
"""A dict mapping (signal number) -> (a method handling the signal)."""
# Could use an enum here, but we never end up doing any matching on the specific signal value,
# instead just iterating over the registered signals to set handlers, so a dict is probably
# better.
return {
signal.SIGINT: self._handle_sigint_if_enabled,
signal.SIGQUIT: self.handle_sigquit,
signal.SIGTERM: self.handle_sigterm,
}
def __init__(self):
self._ignore_sigint_lock = threading.Lock()
self._threads_ignoring_sigint = 0
self._ignoring_sigint_v2_engine = False
def _check_sigint_gate_is_correct(self):
assert (
self._threads_ignoring_sigint >= 0
), "This should never happen, someone must have modified the counter outside of SignalHandler."
def _handle_sigint_if_enabled(self, signum, _frame):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
threads_ignoring_sigint = self._threads_ignoring_sigint
ignoring_sigint_v2_engine = self._ignoring_sigint_v2_engine
if threads_ignoring_sigint == 0 and not ignoring_sigint_v2_engine:
self.handle_sigint(signum, _frame)
def _toggle_ignoring_sigint_v2_engine(self, toggle: bool):
with self._ignore_sigint_lock:
self._ignoring_sigint_v2_engine = toggle
@contextmanager
def _ignoring_sigint(self):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
self._threads_ignoring_sigint += 1
try:
yield
finally:
with self._ignore_sigint_lock:
self._threads_ignoring_sigint -= 1
self._check_sigint_gate_is_correct()
def handle_sigint(self, signum, _frame):
raise KeyboardInterrupt("User interrupted execution with control-c!")
# TODO(#7406): figure out how to let sys.exit work in a signal handler instead of having to raise
# this exception!
class SignalHandledNonLocalExit(Exception):
"""Raised in handlers for non-fatal signals to overcome Python limitations.
When waiting on a subprocess and in a signal handler, sys.exit appears to be ignored, and
causes the signal handler to return. We want to (eventually) exit after these signals, not
ignore them, so we raise this exception instead and check it in our sys.excepthook override.
"""
def __init__(self, signum, signame):
self.signum = signum
self.signame = signame
self.traceback_lines = traceback.format_stack()
super(SignalHandler.SignalHandledNonLocalExit, self).__init__()
def handle_sigquit(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, "SIGQUIT")
def handle_sigterm(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, "SIGTERM")
class ExceptionSink:
"""A mutable singleton object representing where exceptions should be logged to."""
# NB: see the bottom of this file where we call reset_log_location() and other mutators in order
# to properly setup global state.
_log_dir = None
# We need an exiter in order to know what to do after we log a fatal exception or handle a
# catchable signal.
_exiter: Optional[Exiter] = None
# Where to log stacktraces to in a SIGUSR2 handler.
_interactive_output_stream = None
# Whether to print a stacktrace in any fatal error message printed to the terminal.
_should_print_backtrace_to_terminal = True
# An instance of `SignalHandler` which is invoked to handle a static set of specific
# nonfatal signals (these signal handlers are allowed to make pants exit, but unlike SIGSEGV they
# don't need to exit immediately).
_signal_handler: Optional[SignalHandler] = None
# These persistent open file descriptors are kept so the signal handler can do almost no work
# (and lets faulthandler figure out signal safety).
_pid_specific_error_fileobj = None
_shared_error_fileobj = None
def __new__(cls, *args, **kwargs):
raise TypeError("Instances of {} are not allowed to be constructed!".format(cls.__name__))
class ExceptionSinkError(Exception):
pass
@classmethod
def reset_should_print_backtrace_to_terminal(cls, should_print_backtrace):
"""Set whether a backtrace gets printed to the terminal error stream on a fatal error.
Class state:
- Overwrites `cls._should_print_backtrace_to_terminal`.
"""
cls._should_print_backtrace_to_terminal = should_print_backtrace
# All reset_* methods are ~idempotent!
@classmethod
def reset_log_location(cls, new_log_location: str) -> None:
"""Re-acquire file handles to error logs based in the new location.
Class state:
- Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and
`cls._shared_error_fileobj`.
OS state:
- May create a new directory.
- Overwrites signal handlers for many fatal and non-fatal signals (but not SIGUSR2).
:raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not
writable.
"""
# We could no-op here if the log locations are the same, but there's no reason not to have the
# additional safety of re-acquiring file descriptors each time (and erroring out early if the
# location is no longer writable).
try:
safe_mkdir(new_log_location)
except Exception as e:
raise cls.ExceptionSinkError(
"The provided log location path at '{}' is not writable or could not be created: {}.".format(
new_log_location, str(e)
),
e,
)
pid = os.getpid()
pid_specific_log_path = cls.exceptions_log_path(for_pid=pid, in_dir=new_log_location)
shared_log_path = cls.exceptions_log_path(in_dir=new_log_location)
assert pid_specific_log_path != shared_log_path
try:
pid_specific_error_stream = safe_open(pid_specific_log_path, mode="w")
shared_error_stream = safe_open(shared_log_path, mode="a")
except Exception as e:
raise cls.ExceptionSinkError(
"Error opening fatal error log streams for log location '{}': {}".format(
new_log_location, str(e)
)
)
# NB: mutate process-global state!
if faulthandler.is_enabled():
logger.debug("re-enabling faulthandler")
# Call Py_CLEAR() on the previous error stream:
# https://github.com/vstinner/faulthandler/blob/master/faulthandler.c
faulthandler.disable()
# Send a stacktrace to this file if interrupted by a fatal error.
faulthandler.enable(file=pid_specific_error_stream, all_threads=True)
# NB: mutate the class variables!
cls._log_dir = new_log_location
cls._pid_specific_error_fileobj = pid_specific_error_stream
cls._shared_error_fileobj = shared_error_stream
class AccessGlobalExiterMixin:
@property
def _exiter(self) -> Optional[Exiter]:
return ExceptionSink.get_global_exiter()
@classmethod
def get_global_exiter(cls) -> Optional[Exiter]:
return cls._exiter
@classmethod
@contextmanager
def exiter_as(cls, new_exiter_fun: Callable[[Optional[Exiter]], Exiter]) -> Iterator[None]:
"""Temporarily override the global exiter.
NB: We don't want to try/finally here, because we want exceptions to propagate
with the most recent exiter installed in sys.excepthook.
If we wrap this in a try:finally, exceptions will be caught and exiters unset.
"""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
cls._reset_exiter(new_exiter)
yield
cls._reset_exiter(previous_exiter)
@classmethod
@contextmanager
def exiter_as_until_exception(
cls, new_exiter_fun: Callable[[Optional[Exiter]], Exiter]
) -> Iterator[None]:
"""Temporarily override the global exiter, except this will unset it when an exception
happens."""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
try:
cls._reset_exiter(new_exiter)
yield
finally:
cls._reset_exiter(previous_exiter)
@classmethod
def _reset_exiter(cls, exiter: Optional[Exiter]) -> None:
"""Class state:
- Overwrites `cls._exiter`.
Python state:
- Overwrites sys.excepthook.
"""
logger.debug(f"overriding the global exiter with {exiter} (from {cls._exiter})")
# NB: mutate the class variables! This is done before mutating the exception hook, because the
# uncaught exception handler uses cls._exiter to exit.
cls._exiter = exiter
# NB: mutate process-global state!
sys.excepthook = cls._log_unhandled_exception_and_exit
@classmethod
def reset_interactive_output_stream(
cls, interactive_output_stream, override_faulthandler_destination=True
):
"""Class state:
- Overwrites `cls._interactive_output_stream`.
OS state:
- Overwrites the SIGUSR2 handler.
This method registers a SIGUSR2 handler, which permits a non-fatal `kill -31 <pants pid>` for
stacktrace retrieval. This is also where the the error message on fatal exit will be printed to.
"""
try:
# NB: mutate process-global state!
# This permits a non-fatal `kill -31 <pants pid>` for stacktrace retrieval.
if override_faulthandler_destination:
faulthandler.register(
signal.SIGUSR2, interactive_output_stream, all_threads=True, chain=False
)
# NB: mutate the class variables!
cls._interactive_output_stream = interactive_output_stream
except ValueError:
# Warn about "ValueError: IO on closed file" when the stream is closed.
cls.log_exception(
"Cannot reset interactive_output_stream -- stream (probably stderr) is closed"
)
@classmethod
def exceptions_log_path(cls, for_pid=None, in_dir=None):
"""Get the path to either the shared or pid-specific fatal errors log file."""
if for_pid is None:
intermediate_filename_component = ""
else:
assert isinstance(for_pid, Pid)
intermediate_filename_component = ".{}".format(for_pid)
in_dir = in_dir or cls._log_dir
return os.path.join(
in_dir, ".pids", "exceptions{}.log".format(intermediate_filename_component)
)
@classmethod
def log_exception(cls, msg):
"""Try to log an error message to this process's error log and the shared error log.
NB: Doesn't raise (logs an error instead).
"""
pid = os.getpid()
fatal_error_log_entry = cls._format_exception_message(msg, pid)
# We care more about this log than the shared log, so write to it first.
try:
cls._try_write_with_flush(cls._pid_specific_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the pid-specific file handle for {} at pid {}:\n{}".format(
msg, cls._log_dir, pid, e
)
)
# Write to the shared log.
try:
# TODO: we should probably guard this against concurrent modification by other pants
# subprocesses somehow.
cls._try_write_with_flush(cls._shared_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the shared file handle for {} at pid {}:\n{}".format(
msg, cls._log_dir, pid, e
)
)
@classmethod
def _try_write_with_flush(cls, fileobj, payload):
"""This method is here so that it can be patched to simulate write errors.
This is because mock can't patch primitive objects like file objects.
"""
fileobj.write(payload)
fileobj.flush()
@classmethod
def reset_signal_handler(cls, signal_handler):
"""Class state:
- Overwrites `cls._signal_handler`.
OS state:
- Overwrites signal handlers for SIGINT, SIGQUIT, and SIGTERM.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
:returns: The :class:`SignalHandler` that was previously registered, or None if this is
the first time this method was called.
"""
assert isinstance(signal_handler, SignalHandler)
# NB: Modify process-global state!
for signum, handler in signal_handler.signal_handler_mapping.items():
signal.signal(signum, handler)
# Retry any system calls interrupted by any of the signals we just installed handlers for
# (instead of having them raise EINTR). siginterrupt(3) says this is the default behavior on
# Linux and OSX.
signal.siginterrupt(signum, False)
previous_signal_handler = cls._signal_handler
# NB: Mutate the class variables!
cls._signal_handler = signal_handler
return previous_signal_handler
@classmethod
@contextmanager
def trapped_signals(cls, new_signal_handler):
"""A contextmanager which temporarily overrides signal handling.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
"""
previous_signal_handler = cls.reset_signal_handler(new_signal_handler)
try:
yield
finally:
cls.reset_signal_handler(previous_signal_handler)
@classmethod
@contextmanager
def ignoring_sigint(cls):
"""A contextmanager which disables handling sigint in the current signal handler. This
allows threads that are not the main thread to ignore sigint.
NB: Only use this if you can't use ExceptionSink.trapped_signals().
Class state:
- Toggles `self._ignore_sigint` in `cls._signal_handler`.
"""
with cls._signal_handler._ignoring_sigint():
yield
@classmethod
def toggle_ignoring_sigint_v2_engine(cls, toggle: bool) -> None:
assert cls._signal_handler is not None
cls._signal_handler._toggle_ignoring_sigint_v2_engine(toggle)
@classmethod
def _iso_timestamp_for_now(cls):
return datetime.datetime.now().isoformat()
# NB: This includes a trailing newline, but no leading newline.
_EXCEPTION_LOG_FORMAT = """\
timestamp: {timestamp}
process title: {process_title}
sys.argv: {args}
pid: {pid}
{message}
"""
@classmethod
def _format_exception_message(cls, msg, pid):
return cls._EXCEPTION_LOG_FORMAT.format(
timestamp=cls._iso_timestamp_for_now(),
process_title=setproctitle.getproctitle(),
args=sys.argv,
pid=pid,
message=msg,
)
_traceback_omitted_default_text = "(backtrace omitted)"
@classmethod
def _format_traceback(cls, traceback_lines, should_print_backtrace):
if should_print_backtrace:
traceback_string = "\n{}".format("".join(traceback_lines))
else:
traceback_string = " {}".format(cls._traceback_omitted_default_text)
return traceback_string
_UNHANDLED_EXCEPTION_LOG_FORMAT = """\
Exception caught: ({exception_type}){backtrace}
Exception message: {exception_message}{maybe_newline}
"""
@classmethod
def _format_unhandled_exception_log(cls, exc, tb, add_newline, should_print_backtrace):
exc_type = type(exc)
exception_full_name = "{}.{}".format(exc_type.__module__, exc_type.__name__)
exception_message = str(exc) if exc else "(no message)"
maybe_newline = "\n" if add_newline else ""
return cls._UNHANDLED_EXCEPTION_LOG_FORMAT.format(
exception_type=exception_full_name,
backtrace=cls._format_traceback(
traceback_lines=traceback.format_tb(tb),
should_print_backtrace=should_print_backtrace,
),
exception_message=exception_message,
maybe_newline=maybe_newline,
)
_EXIT_FAILURE_TERMINAL_MESSAGE_FORMAT = """\
{timestamp_msg}{terminal_msg}{details_msg}
"""
@classmethod
def _exit_with_failure(cls, terminal_msg):
timestamp_msg = (
f"timestamp: {cls._iso_timestamp_for_now()}\n"
if cls._should_print_backtrace_to_terminal
else ""
)
details_msg = (
""
if cls._should_print_backtrace_to_terminal
else "\n\n(Use --print-exception-stacktrace to see more error details.)"
)
terminal_msg = terminal_msg or "<no exit reason provided>"
formatted_terminal_msg = cls._EXIT_FAILURE_TERMINAL_MESSAGE_FORMAT.format(
timestamp_msg=timestamp_msg, terminal_msg=terminal_msg, details_msg=details_msg
)
# Exit with failure, printing a message to the terminal (or whatever the interactive stream is).
cls._exiter.exit_and_fail(msg=formatted_terminal_msg, out=cls._interactive_output_stream)
@classmethod
def _log_unhandled_exception_and_exit(
cls, exc_class=None, exc=None, tb=None, add_newline=False
):
"""A sys.excepthook implementation which logs the error and exits with failure."""
exc_class = exc_class or sys.exc_info()[0]
exc = exc or sys.exc_info()[1]
tb = tb or sys.exc_info()[2]
# This exception was raised by a signal handler with the intent to exit the program.
if exc_class == SignalHandler.SignalHandledNonLocalExit:
return cls._handle_signal_gracefully(exc.signum, exc.signame, exc.traceback_lines)
extra_err_msg = None
try:
# Always output the unhandled exception details into a log file, including the traceback.
exception_log_entry = cls._format_unhandled_exception_log(
exc, tb, add_newline, should_print_backtrace=True
)
cls.log_exception(exception_log_entry)
except Exception as e:
extra_err_msg = "Additional error logging unhandled exception {}: {}".format(exc, e)
logger.error(extra_err_msg)
# Generate an unhandled exception report fit to be printed to the terminal (respecting the
# Exiter's should_print_backtrace field).
if cls._should_print_backtrace_to_terminal:
stderr_printed_error = cls._format_unhandled_exception_log(
exc, tb, add_newline, should_print_backtrace=cls._should_print_backtrace_to_terminal
)
if extra_err_msg:
stderr_printed_error = "{}\n{}".format(stderr_printed_error, extra_err_msg)
else:
# If the user didn't ask for a backtrace, show a succinct error message without
# all the exception-related preamble. A power-user/pants developer can still
# get all the preamble info along with the backtrace, but the end user shouldn't
# see that boilerplate by default.
error_msgs = getattr(exc, "end_user_messages", lambda: [str(exc)])()
stderr_printed_error = "\n" + "\n".join(f"ERROR: {msg}" for msg in error_msgs)
cls._exit_with_failure(stderr_printed_error)
_CATCHABLE_SIGNAL_ERROR_LOG_FORMAT = """\
Signal {signum} ({signame}) was raised. Exiting with failure.{formatted_traceback}
"""
@classmethod
def _handle_signal_gracefully(cls, signum, signame, traceback_lines):
"""Signal handler for non-fatal signals which raises or logs an error and exits with
failure."""
# Extract the stack, and format an entry to be written to the exception log.
formatted_traceback = cls._format_traceback(
traceback_lines=traceback_lines, should_print_backtrace=True
)
signal_error_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format(
signum=signum, signame=signame, formatted_traceback=formatted_traceback
)
# TODO: determine the appropriate signal-safe behavior here (to avoid writing to our file
# descriptors re-entrantly, which raises an IOError).
# This method catches any exceptions raised within it.
cls.log_exception(signal_error_log_entry)
# Create a potentially-abbreviated traceback for the terminal or other interactive stream.
formatted_traceback_for_terminal = cls._format_traceback(
traceback_lines=traceback_lines,
should_print_backtrace=cls._should_print_backtrace_to_terminal,
)
terminal_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format(
signum=signum, signame=signame, formatted_traceback=formatted_traceback_for_terminal
)
# Exit, printing the output to the terminal.
cls._exit_with_failure(terminal_log_entry)
ExceptionSink.reset_log_location(os.getcwd())
ExceptionSink._reset_exiter(Exiter(exiter=sys.exit))
ExceptionSink.reset_interactive_output_stream(sys.stderr.buffer)
ExceptionSink.reset_signal_handler(SignalHandler())
ExceptionSink.reset_should_print_backtrace_to_terminal(
should_print_backtrace=os.environ.get("PANTS_PRINT_EXCEPTION_STACKTRACE", "True") == "True"
)
|
"""
Add compatibility for gevent and multiprocessing.
Source based on project GIPC 0.6.0
https://bitbucket.org/jgehrcke/gipc/
"""
import os, sys, signal, multiprocessing, multiprocessing.process, multiprocessing.reduction
gevent=None
geventEvent=None
def _tryGevent():
global gevent, geventEvent
if gevent and geventEvent: return False
try:
import gevent
from gevent import event as geventEvent
return True
except ImportError:
raise ValueError('gevent not found')
def Process(target, args=(), kwargs={}, name=None): # daemon=None
# check if gevent availible
try: _tryGevent()
except ValueError:
print 'Gevent not founded, switching to native'
return multiprocessing.Process(target=target, args=args, kwargs=kwargs, name=name)
if int(gevent.__version__[0])<1:
raise NotImplementedError('Gmultiprocessing supports only gevent>=1.0, your version %s'%gevent.__version__)
if not isinstance(args, tuple):
raise TypeError('<args> must be a tuple')
if not isinstance(kwargs, dict):
raise TypeError('<kwargs> must be a dict')
p = _GProcess(
target=_child,
name=name,
kwargs={"target": target, "args": args, "kwargs": kwargs}
)
# if daemon is not None: p.daemon = daemon
return p
def _child(target, args, kwargs):
"""Wrapper function that runs in child process. Resets gevent/libev state
and executes user-given function.
"""
_tryGevent()
_reset_signal_handlers()
gevent.reinit()
hub = gevent.get_hub()
del hub.threadpool
hub._threadpool = None
hub.destroy(destroy_loop=True)
h = gevent.get_hub(default=True)
assert h.loop.default, 'Could not create libev default event loop.'
target(*args, **kwargs)
class _GProcess(multiprocessing.Process):
"""
Compatible with the ``multiprocessing.Process`` API.
"""
try:
from multiprocessing.forking import Popen as mp_Popen
except ImportError:
# multiprocessing's internal structure has changed from 3.3 to 3.4.
from multiprocessing.popen_fork import Popen as mp_Popen
# Monkey-patch and forget about the name.
mp_Popen.poll = lambda *a, **b: None
del mp_Popen
def start(self):
_tryGevent()
# Start grabbing SIGCHLD within libev event loop.
gevent.get_hub().loop.install_sigchld()
# Run new process (based on `fork()` on POSIX-compliant systems).
super(_GProcess, self).start()
# The occurrence of SIGCHLD is recorded asynchronously in libev.
# This guarantees proper behavior even if the child watcher is
# started after the child exits. Start child watcher now.
self._sigchld_watcher = gevent.get_hub().loop.child(self.pid)
self._returnevent = gevent.event.Event()
self._sigchld_watcher.start(self._on_sigchld, self._sigchld_watcher)
def _on_sigchld(self, watcher):
"""Callback of libev child watcher. Called when libev event loop
catches corresponding SIGCHLD signal.
"""
watcher.stop()
# Status evaluation copied from `multiprocessing.forking` in Py2.7.
if os.WIFSIGNALED(watcher.rstatus):
self._popen.returncode = -os.WTERMSIG(watcher.rstatus)
else:
assert os.WIFEXITED(watcher.rstatus)
self._popen.returncode = os.WEXITSTATUS(watcher.rstatus)
self._returnevent.set()
def is_alive(self):
assert self._popen is not None, "Process not yet started."
if self._popen.returncode is None:
return True
return False
@property
def exitcode(self):
if self._popen is None:
return None
return self._popen.returncode
def __repr__(self):
exitcodedict = multiprocessing.process._exitcode_to_name
status = 'started'
if self._parent_pid != os.getpid(): status = 'unknown'
elif self.exitcode is not None: status = self.exitcode
if status == 0: status = 'stopped'
elif isinstance(status, int):
status = 'stopped[%s]' % exitcodedict.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, status, self.daemon and ' daemon' or '')
def join(self, timeout=None):
"""
Wait cooperatively until child process terminates or timeout occurs.
:arg timeout: ``None`` (default) or a a time in seconds. The method
simply returns upon timeout expiration. The state of the process
has to be identified via ``is_alive()``.
"""
assert self._parent_pid == os.getpid(), "I'm not parent of this child."
assert self._popen is not None, 'Can only join a started process.'
# Resemble multiprocessing's join() method while replacing
# `self._popen.wait(timeout)` with
# `self._returnevent.wait(timeout)`
self._returnevent.wait(timeout)
if self._popen.returncode is not None:
if hasattr(multiprocessing.process, '_children'): # This is for Python 3.4.
kids = multiprocessing.process._children
else: # For Python 2.6, 2.7, 3.3.
kids = multiprocessing.process._current_process._children
kids.discard(self)
_signals_to_reset = [getattr(signal, s) for s in
set([s for s in dir(signal) if s.startswith("SIG")]) -
# Exclude constants that are not signals such as SIG_DFL and SIG_BLOCK.
set([s for s in dir(signal) if s.startswith("SIG_")]) -
# Leave handlers for SIG(STOP/KILL/PIPE) untouched.
set(['SIGSTOP', 'SIGKILL', 'SIGPIPE'])]
def _reset_signal_handlers():
for s in _signals_to_reset:
if s < signal.NSIG:
signal.signal(s, signal.SIG_DFL)
PY3 = sys.version_info[0] == 3
if PY3:
def _reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def __exec(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
__exec("""def _reraise(tp, value, tb=None): raise tp, value, tb""")
|
import pytest
import math
import io
import time
import base64
import hashlib
from http import client
from unittest import mock
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import metadata
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.azureblobstorage import AzureBlobStorageProvider
from waterbutler.providers.azureblobstorage.metadata import AzureBlobStorageFileMetadata
from waterbutler.providers.azureblobstorage.metadata import AzureBlobStorageFolderMetadata
from waterbutler.providers.azureblobstorage.provider import (
MAX_UPLOAD_BLOCK_SIZE,
)
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
}
@pytest.fixture
def credentials():
return {
'account_name': 'dontdead',
'account_key': base64.b64encode(b'open inside'),
}
@pytest.fixture
def settings():
return {
'container': 'thatkerning'
}
@pytest.fixture
def mock_time(monkeypatch):
mock_time = mock.Mock(return_value=1454684930.0)
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture
def provider(auth, credentials, settings):
provider = AzureBlobStorageProvider(auth, credentials, settings)
return provider
@pytest.fixture
def file_content():
return b'sleepy'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def large_file_content():
# 71MB (4MB * 17 + 3MB)
return b'a' * (71 * (2 ** 20))
@pytest.fixture
def large_file_like(large_file_content):
return io.BytesIO(large_file_content)
@pytest.fixture
def large_file_stream(large_file_like):
return streams.FileStreamReader(large_file_like)
@pytest.fixture
def folder_metadata():
return b'''<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://vrosf.blob.core.windows.net/" ContainerName="sample-container1">
<Blobs>
<Blob>
<Name>Photos/test-text.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition />
<BlobType>BlockBlob</BlobType>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState>
</Properties>
</Blob>
<Blob>
<Name>Photos/a/test.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition />
<BlobType>BlockBlob</BlobType>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState>
</Properties>
</Blob>
<Blob>
<Name>top.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition />
<BlobType>BlockBlob</BlobType>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState>
</Properties>
</Blob>
</Blobs>
<NextMarker />
</EnumerationResults>'''
@pytest.fixture
def file_metadata():
return {
'CONTENT-LENGTH': '0',
'CONTENT-TYPE': 'text/plain',
'LAST-MODIFIED': 'Thu, 10 Nov 2016 11:04:45 GMT',
'ACCEPT-RANGES': 'bytes',
'ETAG': '"0x8D40959613D32F6"',
'SERVER': 'Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0',
'X-MS-REQUEST-ID': '5b4a3cb6-0001-00ea-4575-895e2c000000',
'X-MS-VERSION': '2015-07-08',
'X-MS-LEASE-STATUS': 'unlocked',
'X-MS-LEASE-STATE': 'available',
'X-MS-BLOB-TYPE': 'BlockBlob',
'DATE': 'Fri, 17 Feb 2017 23:28:33 GMT'
}
@pytest.fixture
def large_file_metadata(large_file_content):
return {
'CONTENT-LENGTH': str(len(large_file_content)),
'CONTENT-TYPE': 'text/plain',
'LAST-MODIFIED': 'Thu, 10 Nov 2016 11:04:45 GMT',
'ACCEPT-RANGES': 'bytes',
'ETAG': '"0x8D40959613D32F6"',
'SERVER': 'Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0',
'X-MS-REQUEST-ID': '5b4a3cb6-0001-00ea-4575-895e2c000000',
'X-MS-VERSION': '2015-07-08',
'X-MS-LEASE-STATUS': 'unlocked',
'X-MS-LEASE-STATE': 'available',
'X-MS-BLOB-TYPE': 'BlockBlob',
'DATE': 'Fri, 17 Feb 2017 23:28:33 GMT'
}
class TestValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file(self, provider, file_metadata,
mock_time):
file_path = 'foobah'
for good_metadata_url in provider.generate_urls(file_path, secondary=True):
aiohttpretty.register_uri('HEAD', good_metadata_url, headers=file_metadata)
for bad_metadata_url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', bad_metadata_url,
params={'restype': 'container', 'comp': 'list'}, status=404)
try:
wb_path_v1 = await provider.validate_v1_path('/' + file_path)
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + file_path + '/')
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + file_path)
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder(self, provider, folder_metadata, mock_time):
folder_path = 'Photos'
for good_metadata_url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri(
'GET', good_metadata_url, params={'restype': 'container', 'comp': 'list'},
body=folder_metadata, headers={'Content-Type': 'application/xml'}
)
for bad_metadata_url in provider.generate_urls(folder_path, secondary=True):
aiohttpretty.register_uri('HEAD', bad_metadata_url, status=404)
try:
wb_path_v1 = await provider.validate_v1_path('/' + folder_path + '/')
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + folder_path)
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + folder_path + '/')
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
async def test_normal_name(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/path.txt')
assert path.name == 'path.txt'
assert path.parent.name == 'a'
assert path.is_file
assert not path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_folder(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_root(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
class TestCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download(self, provider, mock_time):
path = WaterButlerPath('/muhtriangle')
for url in provider.generate_urls(path.path, secondary=True):
aiohttpretty.register_uri('GET', url, body=b'delicious', auto_length=True)
result = await provider.download(path)
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_folder_400s(self, provider, mock_time):
with pytest.raises(exceptions.DownloadError) as e:
await provider.download(WaterButlerPath('/cool/folder/mom/'))
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete(self, provider, mock_time):
path = WaterButlerPath('/some-file')
for url in provider.generate_urls(path.path):
aiohttpretty.register_uri('DELETE', url, status=200)
await provider.delete(path)
assert aiohttpretty.has_call(method='DELETE', uri=url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_folder_delete(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/Photos/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri(
'GET', url, params={'restype': 'container', 'comp': 'list'},
body=folder_metadata, headers={'Content-Type': 'application/xml'}
)
delete_urls = []
for url in provider.generate_urls(path.path + "test-text.txt"):
aiohttpretty.register_uri('DELETE', url, status=200)
delete_urls.append(url)
for url in provider.generate_urls(path.path + "a/test.txt"):
aiohttpretty.register_uri('DELETE', url, status=200)
delete_urls.append(url)
await provider.delete(path)
assert aiohttpretty.has_call(method='DELETE', uri=delete_urls[0])
assert aiohttpretty.has_call(method='DELETE', uri=delete_urls[1])
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_root(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/')
assert path.is_root
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 2
assert result[0].path == '/Photos/'
assert result[0].name == 'Photos'
assert result[0].is_folder
assert result[1].path == '/top.txt'
assert result[1].name == 'top.txt'
assert not result[1].is_folder
assert result[1].extra['md5'] == None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/Photos/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 2
assert result[0].path == '/Photos/a/'
assert result[0].name == 'a'
assert result[0].is_folder
assert result[1].path == '/Photos/test-text.txt'
assert result[1].name == 'test-text.txt'
assert not result[1].is_folder
assert result[1].extra['md5'] == None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file(self, provider, file_metadata, mock_time):
path = WaterButlerPath('/Foo/Bar/my-image.jpg')
provider.url = 'http://test_url'
provider.token = 'test'
for url in provider.generate_urls(path.path, secondary=True):
aiohttpretty.register_uri('HEAD', url, headers=file_metadata)
result = await provider.metadata(path)
assert isinstance(result, metadata.BaseFileMetadata)
assert result.path == str(path)
assert result.name == 'my-image.jpg'
assert result.modified is not None
assert result.extra['md5'] == None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_missing(self, provider, mock_time):
path = WaterButlerPath('/notfound.txt')
provider.url = 'http://test_url'
provider.token = 'test'
for url in provider.generate_urls(path.path, secondary=True):
aiohttpretty.register_uri('HEAD', url, status=404)
with pytest.raises(exceptions.MetadataError):
await provider.metadata(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload(self, provider, file_content, file_stream, file_metadata, mock_time):
path = WaterButlerPath('/foobah')
for url in provider.generate_urls(path.path):
aiohttpretty.register_uri('PUT', url, status=200)
for metadata_url in provider.generate_urls(path.path):
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': file_metadata},
],
)
metadata, created = await provider.upload(file_stream, path)
assert metadata.kind == 'file'
assert created
assert aiohttpretty.has_call(method='PUT', uri=url)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_large(self, provider, large_file_content, large_file_stream, large_file_metadata, mock_time):
# upload 4MB data 17 times and 3MB once, and request block_list
upload_times = math.floor(len(large_file_content) / MAX_UPLOAD_BLOCK_SIZE)
block_id_prefix = 'hogefuga'
block_id_list = [AzureBlobStorageProvider._format_block_id(block_id_prefix, i) for i in range(upload_times)]
block_req_params_list = [{'comp': 'block', 'blockid': block_id} for block_id in block_id_list]
block_list_req_params = {'comp': 'blocklist'}
path = WaterButlerPath('/large_foobah')
for url in provider.generate_urls(path.path):
for block_req_params in block_req_params_list:
aiohttpretty.register_uri('PUT', url, status=200, params=block_req_params)
aiohttpretty.register_uri('PUT', url, status=200, params=block_list_req_params)
for metadata_url in provider.generate_urls(path.path):
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': large_file_metadata},
],
)
metadata, created = await provider.upload(large_file_stream, path, block_id_prefix=block_id_prefix)
assert metadata.kind == 'file'
assert created
for block_req_params in block_req_params_list:
assert aiohttpretty.has_call(method='PUT', uri=url, params=block_req_params)
assert aiohttpretty.has_call(method='PUT', uri=url, params=block_list_req_params)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
class TestCreateFolder:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_must_start_with_slash(self, provider, mock_time):
path = WaterButlerPath('/alreadyexists')
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
assert e.value.code == 400
assert e.value.message == 'Path must be a directory'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_errors_conflict(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/alreadyexists/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
for url in provider.generate_urls('alreadyexists', secondary=True):
aiohttpretty.register_uri('HEAD', url, status=200)
for url in provider.generate_urls('alreadyexists/.osfkeep'):
aiohttpretty.register_uri('PUT', url, status=200)
with pytest.raises(exceptions.FolderNamingConflict) as e:
await provider.create_folder(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_creates(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/doesntalreadyexists/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
for url in provider.generate_urls('doesntalreadyexists', secondary=True):
aiohttpretty.register_uri('HEAD', url, status=404)
for url in provider.generate_urls('doesntalreadyexists/.osfkeep'):
aiohttpretty.register_uri('PUT', url, status=200)
resp = await provider.create_folder(path)
assert resp.kind == 'folder'
assert resp.name == 'doesntalreadyexists'
assert resp.path == '/doesntalreadyexists/'
class TestOperations:
async def test_equality(self, provider, mock_time):
assert provider.can_intra_copy(provider)
assert provider.can_intra_move(provider)
|
__author__ = 'mark'
"""
User Profile Extension based on One-to-One fields code in Django Docs here:
https://docs.djangoproject.com/en/1.7/topics/auth/customizing/
"""
from django.db import models
from django.contrib.auth.models import User
from uuid import uuid4
class Member(models.Model):
user = models.OneToOneField(User)
member_guid = models.CharField(max_length=100, null=True, blank=True)
ext_uid = models.CharField(max_length=100, null=True, blank=True)
user_token = models.CharField(max_length=100, null=True, blank=True)
|
from threading import Timer
from oslo_log import log as logging
from networking_vsphere._i18n import _LI
from networking_vsphere.utils.rpc_translator import update_rules
from neutron.agent import securitygroups_rpc
LOG = logging.getLogger(__name__)
class DVSSecurityGroupRpc(securitygroups_rpc.SecurityGroupAgentRpc):
def __init__(self, context, plugin_rpc,
defer_refresh_firewall=False):
self.context = context
self.plugin_rpc = plugin_rpc
self._devices_to_update = set()
self.init_firewall(defer_refresh_firewall)
def prepare_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_LI("Preparing filters for devices %s"), device_ids)
if self.use_enhanced_rpc:
devices_info = self.plugin_rpc.security_group_info_for_devices(
self.context, list(device_ids))
devices = update_rules(devices_info)
else:
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, list(device_ids))
self.firewall.prepare_port_filter(devices.values())
def remove_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_LI("Remove device filter for %r"), device_ids)
self.firewall.remove_port_filter(device_ids)
def _refresh_ports(self):
device_ids = self._devices_to_update
self._devices_to_update = self._devices_to_update - device_ids
if not device_ids:
return
if self.use_enhanced_rpc:
devices_info = self.plugin_rpc.security_group_info_for_devices(
self.context, device_ids)
devices = update_rules(devices_info)
else:
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, device_ids)
self.firewall.update_port_filter(devices.values())
def refresh_firewall(self, device_ids=None):
LOG.info(_LI("Refresh firewall rules"))
self._devices_to_update |= device_ids
if device_ids:
Timer(2, self._refresh_ports).start()
|
"""
main.py
The entry point for the book reader application.
"""
__version_info__ = (0, 0, 1)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "c.guenther@mac.com"
import time
import sqlite3
import pdb
import signal
import sys, os
import rfid
import config
import RPi.GPIO as GPIO
from player import Player
from status_light import StatusLight
from threading import Thread
class BookReader(object):
"""The main class that controls the player, the GPIO pins and the RFID reader"""
def __init__(self):
"""Initialize all the things"""
self.rfid_reader = rfid.Reader(**config.serial)
# setup signal handlers. SIGINT for KeyboardInterrupt
# and SIGTERM for when running from supervisord
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
self.status_light = StatusLight(config.status_light_pin)
thread = Thread(target=self.status_light.start)
thread.start()
self.setup_db()
self.player = Player(config.mpd_conn, self.status_light)
self.setup_gpio()
def setup_db(self):
"""Setup a connection to the SQLite db"""
self.db_conn = sqlite3.connect(config.db_file)
self.db_cursor = self.db_conn.cursor()
def setup_gpio(self):
"""Setup all GPIO pins"""
GPIO.setmode(GPIO.BCM)
# input pins for buttons
for pin in config.gpio_pins:
GPIO.setup(pin['pin_id'], GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(pin['pin_id'], GPIO.FALLING, callback=getattr(self.player, pin['callback']), bouncetime=pin['bounce_time'])
def signal_handler(self, signal, frame):
"""When quiting, stop playback, close the player and release GPIO pins"""
self.player.close()
self.status_light.exit()
GPIO.cleanup()
sys.exit(0)
def loop(self):
"""The main event loop. This is where we look for new RFID cards on the RFID reader. If one is
present and different from the book that's currently playing, in which case:
1. Stop playback of the current book if one is playing
2. Start playing
"""
while True:
if self.player.is_playing():
self.on_playing()
elif self.player.finished_book():
# when at the end of a book, delete its progress from the db
# so we can listen to it again
self.db_cursor.execute(
'DELETE FROM progress WHERE book_id = %d' % self.player.book.book_id)
self.db_conn.commit()
self.player.book.reset()
rfid_card = self.rfid_reader.read()
if not rfid_card:
continue
book_id = rfid_card.get_id()
if book_id and book_id != self.player.book.book_id: # a change in book id
progress = self.db_cursor.execute(
'SELECT * FROM progress WHERE book_id = "%s"' % book_id).fetchone()
self.player.play(book_id, progress)
def on_playing(self):
"""Executed for each loop execution. Here we update self.player.book with the latest known position
and save the prigress to db"""
status = self.player.get_status()
self.player.book.elapsed = float(status['elapsed'])
self.player.book.part = int(status['song']) + 1
#print "%s second of part %s" % (self.player.book.elapsed, self.player.book.part)
self.db_cursor.execute(
'INSERT OR REPLACE INTO progress (book_id, part, elapsed) VALUES (%s, %d, %f)' %\
(self.player.book.book_id, self.player.book.part, self.player.book.elapsed))
self.db_conn.commit()
if __name__ == '__main__':
reader = BookReader()
reader.loop()
|
from ..user_namespaces import UserNamespaces
from ...parsers.cmdline import CmdLine
from ...parsers.grub_conf import Grub2Config
from ...tests import context_wrap
ENABLE_TOK_A = '''
user_namespaces.enable=1
'''.strip() # noqa
ENABLE_TOK_B = '''
user-namespaces.enable=1
'''.strip() # noqa
CMDLINE = '''
BOOT_IMAGE=/vmlinuz-3.10.0-514.6.1.el7.x86_64 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap {0}
'''.strip() # noqa
GRUB2_CONF = '''
menuentry 'Red Hat Enterprise Linux Server (3.10.0-514.16.1.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586' {{
load_video
set gfxpayload=keep
insmod gzio
insmod part_gpt
insmod xfs
set root='hd0,gpt2'
if [ x$feature_platform_search_hint = xy ]; then
search --no-floppy --fs-uuid --set=root --hint-bios=hd0,gpt2 --hint-efi=hd0,gpt2 --hint-baremetal=ahci0,gpt2 d80fa96c-ffa1-4894-9282-aeda37f0befe
else
search --no-floppy --fs-uuid --set=root d80fa96c-ffa1-4894-9282-aeda37f0befe
fi
linuxefi /vmlinuz-3.10.0-514.16.1.el7.x86_64 root=/dev/mapper/rhel-root ro rd.luks.uuid=luks-a40b320e-0711-4cd6-8f9e-ce32810e2a79 rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8 {0}
initrdefi /initramfs-3.10.0-514.16.1.el7.x86_64.img
}}
menuentry 'Red Hat Enterprise Linux Server (3.10.0-514.10.2.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586' {{
load_video
set gfxpayload=keep
insmod gzio
insmod part_gpt
insmod xfs
set root='hd0,gpt2'
if [ x$feature_platform_search_hint = xy ]; then
search --no-floppy --fs-uuid --set=root --hint-bios=hd0,gpt2 --hint-efi=hd0,gpt2 --hint-baremetal=ahci0,gpt2 d80fa96c-ffa1-4894-9282-aeda37f0befe
else
search --no-floppy --fs-uuid --set=root d80fa96c-ffa1-4894-9282-aeda37f0befe
fi
linuxefi /vmlinuz-3.10.0-514.10.2.el7.x86_64 root=/dev/mapper/rhel-root ro rd.luks.uuid=luks-a40b320e-0711-4cd6-8f9e-ce32810e2a79 rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8 {1}
initrdefi /initramfs-3.10.0-514.10.2.el7.x86_64.img
}}
''' # noqa
MENUENTRY_0 = '''
'Red Hat Enterprise Linux Server (3.10.0-514.16.1.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586'
'''.strip() # noqa
MENUENTRY_1 = '''
'Red Hat Enterprise Linux Server (3.10.0-514.10.2.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586'
'''.strip() # noqa
CASES = [
# noqa
# |-- provided --| |---- expected results ---|
# ((cmdline, grub), (enabled, enabled_configs))
# Not enabled, no grub data
((CMDLINE.format(''), None), (False, [])),
# Not enabled, not enabled in grub
((CMDLINE.format(''), GRUB2_CONF.format('', '')), (False, [])),
# Not enabled, but enabled in menuentry 1
((CMDLINE.format(''), GRUB2_CONF.format('', ENABLE_TOK_A)),
(False, [MENUENTRY_1])),
# Enabled, no grub data
((CMDLINE.format(ENABLE_TOK_A), None), (True, [])),
# Enabled, but not enabled in grub
((CMDLINE.format(ENABLE_TOK_A), GRUB2_CONF.format('', '')),
(True, [])),
# Enabled, enabled in menuentry 0
((CMDLINE.format(ENABLE_TOK_A), GRUB2_CONF.format(ENABLE_TOK_A, '')),
(True, [MENUENTRY_0])),
# Dash syntax, rather than underscore
((CMDLINE.format(ENABLE_TOK_B), GRUB2_CONF.format(ENABLE_TOK_B, '')),
(True, [MENUENTRY_0]))
]
def test_integration():
for case in CASES:
context = {}
context[CmdLine] = CmdLine(context_wrap(case[0][0]))
if case[0][1] is not None:
context[Grub2Config] = Grub2Config(context_wrap(case[0][1]))
un = UserNamespaces(context.get(CmdLine), context.get(Grub2Config))
assert un.enabled() == case[1][0]
assert un.enabled_configs() == case[1][1]
|
import mock
from solum.api import auth
from solum.api.handlers import assembly_handler
from solum.common import exception
from solum.common import repo_utils
from solum.objects import assembly
from solum.openstack.common.fixture import config
from solum.tests import base
from solum.tests import fakes
from solum.tests import utils
STATES = assembly.States
@mock.patch('solum.objects.registry')
class TestAssemblyHandler(base.BaseTestCase):
def setUp(self):
super(TestAssemblyHandler, self).setUp()
self.ctx = utils.dummy_context()
self.CONF = self.useFixture(config.Config())
self.CONF.config(auth_uri='http://fakeidentity.com',
group=auth.OPT_GROUP_NAME)
self.CONF.config(keystone_version='3')
def test_assembly_get(self, mock_registry):
mock_registry.return_value.Assembly.get_by_uuid.return_value = {
'plan_id': '1234'
}
handler = assembly_handler.AssemblyHandler(self.ctx)
res = handler.get('test_id')
self.assertIsNotNone(res)
get_by_uuid = mock_registry.Assembly.get_by_uuid
get_by_uuid.assert_called_once_with(self.ctx, 'test_id')
def test_assembly_get_all(self, mock_registry):
mock_registry.AssemblyList.get_all.return_value = {}
handler = assembly_handler.AssemblyHandler(self.ctx)
res = handler.get_all()
self.assertIsNotNone(res)
mock_registry.AssemblyList.get_all.assert_called_once_with(self.ctx)
def test_update(self, mock_registry):
data = {'user_id': 'new_user_id',
'plan_uuid': 'input_plan_uuid'}
handler = assembly_handler.AssemblyHandler(self.ctx)
handler.update('test_id', data)
mock_registry.Assembly.update_and_save.assert_called_once_with(
self.ctx, 'test_id', data)
@mock.patch('solum.worker.api.API.build_app')
@mock.patch('solum.common.clients.OpenStackClients.keystone')
def test_create(self, mock_kc, mock_pa, mock_registry):
data = {'user_id': 'new_user_id',
'uuid': 'input_uuid',
'plan_uuid': 'input_plan_uuid'}
db_obj = fakes.FakeAssembly()
mock_registry.Assembly.return_value = db_obj
fp = fakes.FakePlan()
mock_registry.Plan.get_by_id.return_value = fp
fp.raw_content = {
'name': 'theplan',
'artifacts': [{'name': 'nodeus',
'artifact_type': 'heroku',
'content': {'private': False,
'href': 'https://example.com/ex.git'},
'language_pack': 'auto'}]}
mock_registry.Image.return_value = fakes.FakeImage()
handler = assembly_handler.AssemblyHandler(self.ctx)
res = handler.create(data)
db_obj.update.assert_called_once_with(data)
db_obj.create.assert_called_once_with(self.ctx)
self.assertEqual(db_obj, res)
git_info = {
'source_url': "https://example.com/ex.git",
'commit_sha': '',
'repo_token': None,
'status_url': None,
}
mock_pa.assert_called_once_with(
verb='launch_workflow', workflow=['unittest', 'build', 'deploy'],
build_id=8, name='nodeus', assembly_id=8,
git_info=git_info, test_cmd=None, ports=[80],
base_image_id='auto', source_format='heroku',
image_format='qcow2', run_cmd=None)
@mock.patch('solum.common.clients.OpenStackClients.keystone')
def test_create_with_username_in_ctx(self, mock_kc, mock_registry):
data = {'plan_uuid': 'input_plan_uuid'}
db_obj = fakes.FakeAssembly()
mock_registry.Assembly.return_value = db_obj
fp = fakes.FakePlan()
mock_registry.Plan.get_by_id.return_value = fp
fp.raw_content = {'name': 'theplan'}
handler = assembly_handler.AssemblyHandler(self.ctx)
res = handler.create(data)
self.assertEqual(res.username, self.ctx.user_name)
@mock.patch('solum.common.clients.OpenStackClients.keystone')
def test_create_without_username_in_ctx(self, mock_kc, mock_registry):
data = {'plan_uuid': 'input_plan_uuid'}
ctx = utils.dummy_context()
ctx.user_name = ''
db_obj = fakes.FakeAssembly()
mock_registry.Assembly.return_value = db_obj
fp = fakes.FakePlan()
mock_registry.Plan.get_by_id.return_value = fp
fp.raw_content = {'name': 'theplan'}
handler = assembly_handler.AssemblyHandler(ctx)
res = handler.create(data)
self.assertEqual(res.username, '')
@mock.patch('solum.worker.api.API.build_app')
@mock.patch('solum.common.clients.OpenStackClients.keystone')
def test_create_with_private_github_repo(self, mock_kc, mock_pa,
mock_registry):
data = {'user_id': 'new_user_id',
'uuid': 'input_uuid',
'plan_uuid': 'input_plan_uuid'}
db_obj = fakes.FakeAssembly()
mock_registry.Assembly.return_value = db_obj
fp = fakes.FakePlan()
mock_registry.Plan.get_by_id.return_value = fp
fp.raw_content = {
'name': 'theplan',
'artifacts': [{'name': 'nodeus',
'artifact_type': 'heroku',
'content': {'private': True,
'href': 'https://example.com/ex.git',
'public_key': 'ssh-rsa abc'},
'language_pack': 'auto'}]}
fp.deploy_keys_uri = 'secret_ref_uri'
mock_registry.Image.return_value = fakes.FakeImage()
handler = assembly_handler.AssemblyHandler(self.ctx)
res = handler.create(data)
db_obj.update.assert_called_once_with(data)
db_obj.create.assert_called_once_with(self.ctx)
self.assertEqual(db_obj, res)
git_info = {
'source_url': "https://example.com/ex.git",
'commit_sha': '',
'repo_token': None,
'status_url': None,
}
mock_pa.assert_called_once_with(
verb='launch_workflow', workflow=['unittest', 'build', 'deploy'],
build_id=8, name='nodeus', assembly_id=8,
git_info=git_info, ports=[80],
test_cmd=None, base_image_id='auto', source_format='heroku',
image_format='qcow2', run_cmd=None)
@mock.patch('solum.common.clients.OpenStackClients.keystone')
@mock.patch('solum.deployer.api.API.destroy_assembly')
@mock.patch('solum.conductor.api.API.update_assembly')
def test_delete(self, mock_cond, mock_deploy, mock_kc, mock_registry):
db_obj = fakes.FakeAssembly()
mock_registry.Assembly.get_by_uuid.return_value = db_obj
handler = assembly_handler.AssemblyHandler(self.ctx)
handler.delete('test_id')
mock_registry.Assembly.get_by_uuid.assert_called_once_with(self.ctx,
'test_id')
mock_cond.assert_called_once_with(db_obj.id, {'status': 'DELETING'})
mock_deploy.assert_called_once_with(assem_id=db_obj.id)
@mock.patch('httplib2.Http.request')
def test_verify_artifact_raise_exp(self, http_mock, mock_registry):
artifact = {"name": "Test",
"artifact_type": "heroku",
"content": {"href": "https://github.com/some/project"},
"language_pack": "auto",
"repo_token": "abcd"}
http_mock.return_value = ({'status': '404'}, '') # Not a collaborator
collab_url = 'https://api.github.com/repos/u/r/collaborators/foo'
self.assertRaises(exception.RequestForbidden,
repo_utils.verify_artifact,
artifact, collab_url)
|
"""
Copyright 2013 OpERA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
sys.path.insert(0, path)
import unittest
import random
from feedbackAlgorithm import FeedbackAlgorithm, ExponentialTimeFeedback, KunstTimeFeedback
from device import radioDevice
from abstractAlgorithm import AbstractAlgorithm
class QaAlgorithm(unittest.TestCase):
"""
Test algorithm module.
"""
def test_feedback_001(self):
"""
Test the feedback algorithm.
"""
mi = 1,
ma = 256
base = 3
obj = ExponentialTimeFeedback(min_time=mi,
max_time=ma,
base=base
)
# Estado inicial
# Initial state.
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 1
# 3 ^ 0 == 1 (wait is 1)
self.assertEqual(True, obj.feedback())
# Testa se voltou direito
# Test if got back correctly.
self.assertEqual(False, obj.feedback())
# Aumentamos o tempo de sensoriamento 3^1 = 3
# We increase the sensing time 3^1 = 3.
obj.increase_time()
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 1
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 2
obj.wait() # wait = 3
self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0
self.assertEqual(False, obj.feedback())
obj.decrease_time() # reset time 3^0 = 1 # reseta tempo 3^0 = 1
obj.wait() # wait = 1
self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0
def test_feedback_002(self):
"""
Test the feedback algorithm
"""
obj = KunstTimeFeedback()
# Estado inicial
# Initial state.
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 1
# 2 ^ 0 == 1
# wait = 0
self.assertEqual(True, obj.feedback())
# Aumentamos o tempo de sensoriamento 2^1 = 2
# We increase the sensing time 2^1 = 2
obj.increase_time()
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 1
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 2
self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0
self.assertEqual(False, obj.feedback()) # wait gets back to 0 volta wait para 0
obj.wait() # wait = 1
obj.wait() # wait = 2
obj.wait() # wait = 3
obj.wait() # wait = 4
obj.increase_time() # 2^2 = 4
self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0
self.assertEqual(False, obj.feedback()) # wait gets back to 0 # volta wait para 0
obj.decrease_time() # Should be 2^1 = 2
obj.wait()
obj.wait()
self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0
self.assertEqual(False, obj.feedback()) # wait gets back to 0 # volta wait para 0
if __name__ == '__main__':
unittest.main()
|
'''
Created on 2015年3月12日
@author: wanhao01
'''
import os
class SpiderFileUtils(object):
'''
deal with file related operations.
'''
def __save_page(self, data, url, outputdir):
'''
save the page content with the specific url to the local path.
'''
if(not os.path.exists(outputdir)):
os.makedirs(outputdir)
filename = self.__validate_name(url)
f = open(outputdir + os.sep + filename, 'w')
f.writelines(data)
f.close()
if __name__ == '__main__':
pass
|
"""Configment interface
>>> class TestCfg(Configment):
... CONFIGSPEC_SOURCE = '''
... [abc]
... x = integer(default=3)
... '''
>>> cfg = TestCfg()
>>> cfg["abc"]["x"]
3
>>>
"""
import os
import validate
import six
from .configobj_wrap import ConfigObjWrap
from .meta_configment import MetaConfigment
from .configment_validator import ConfigmentValidator
from .pathname import Pathname
from .environment import load_configspec
__author__ = "Simone Campagna"
__all__ = [
'create_configment_class',
'Configment',
'ConfigmentValidateError',
]
class ConfigmentValidateError(validate.ValidateError):
def __str__(self):
return "validation failed: {}".format(self.args[0])
class BaseConfigment(ConfigObjWrap):
CONFIGSPEC = None
DEFAULT_MODE_HIDE = "hide"
DEFAULT_MODE_SHOW = "show"
DEFAULT_MODES = [DEFAULT_MODE_HIDE, DEFAULT_MODE_SHOW]
DEFAULT_MODE = DEFAULT_MODE_HIDE
def __init__(self, filename=None, default_mode=None):
super(BaseConfigment, self).__init__(
infile=None,
configspec=self.__class__.CONFIGSPEC,
unrepr=True,
interpolation=False,
indent_type=" ",
stringify=True,
)
if default_mode is None:
default_mode = self.DEFAULT_MODE
self.default_mode = default_mode
self.set_filename(filename)
if self.filename is not None:
self.load_file(filename, throw_on_errors=True)
else:
self.initialize(throw_on_errors=False)
def set_filename(self, filename=None):
super(BaseConfigment, self).set_filename(filename)
if self.filename is None:
self._base_dir = os.getcwd()
else:
self._base_dir = os.path.dirname(os.path.abspath(filename))
def do_validation(self, base_dir=None, reset=False, throw_on_errors=False):
if base_dir is None:
base_dir = self._base_dir
validator = ConfigmentValidator()
copy = self.default_mode == self.DEFAULT_MODE_SHOW
result = super(BaseConfigment, self).validate(validator, preserve_errors=True, copy=copy)
result = self.filter_validation_result(result)
self.set_paths(base_dir, reset=reset)
if throw_on_errors and result:
raise ConfigmentValidateError(result)
c_result = ConfigObjWrap(
infile=result,
stringify=True,
unrepr=True,
indent_type=' ',
)
return c_result
@six.add_metaclass(MetaConfigment)
class Configment(BaseConfigment):
def __init__(self, filename=None, default_mode=None):
super(Configment, self).__init__(
filename=filename,
default_mode=default_mode,
)
def impl_initialize(self, throw_on_errors=False):
try:
return self.do_validation(reset=False, throw_on_errors=throw_on_errors)
except: # pylint: disable=bare-except
return False
def impl_load_file(self, filename, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
Pathname.set_default_base_dir(self._base_dir)
self.set_filename(filename)
self.reload()
try:
result = self.do_validation(base_dir=self._base_dir, reset=True, throw_on_errors=throw_on_errors)
finally:
Pathname.set_default_base_dir(default_base_dir)
return result
def impl_dump_s(self, stream=None, filename=None, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
try:
if filename is not None:
base_dir = os.path.dirname(os.path.normpath(os.path.abspath(filename)))
else:
base_dir = self._base_dir
Pathname.set_default_base_dir(base_dir)
self.do_validation(base_dir=base_dir, reset=False, throw_on_errors=throw_on_errors)
self.write(stream)
finally:
Pathname.set_default_base_dir(default_base_dir)
def create_configment_class(configspec_filename, class_name=None, dir_list=None):
if class_name is None:
class_name = os.path.splitext(os.path.basename(configspec_filename))[0]
class_bases = (Configment, )
class_dict = {
'CONFIGSPEC_SOURCE': load_configspec(configspec_filename, dir_list=dir_list),
}
return MetaConfigment(class_name, class_bases, class_dict)
|
"""Example training a memory neural net on the bAbI dataset.
References Keras and is based off of https://keras.io/examples/babi_memnn/.
"""
from __future__ import print_function
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import (Input, Activation, Dense, Permute,
Dropout)
from tensorflow.keras.layers import add, dot, concatenate
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.utils import get_file
from tensorflow.keras.preprocessing.sequence import pad_sequences
from filelock import FileLock
import os
import argparse
import tarfile
import numpy as np
import re
from ray import tune
def tokenize(sent):
"""Return the tokens of a sentence including punctuation.
>>> tokenize("Bob dropped the apple. Where is the apple?")
["Bob", "dropped", "the", "apple", ".", "Where", "is", "the", "apple", "?"]
"""
return [x.strip() for x in re.split(r"(\W+)?", sent) if x and x.strip()]
def parse_stories(lines, only_supporting=False):
"""Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences
that support the answer are kept.
"""
data = []
story = []
for line in lines:
line = line.decode("utf-8").strip()
nid, line = line.split(" ", 1)
nid = int(nid)
if nid == 1:
story = []
if "\t" in line:
q, a, supporting = line.split("\t")
q = tokenize(q)
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append("")
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
"""Given a file name, read the file,
retrieve the stories,
and then convert the sentences into a single story.
If max_length is supplied,
any stories longer than max_length tokens will be discarded.
"""
def flatten(data):
return sum(data, [])
data = parse_stories(f.readlines(), only_supporting=only_supporting)
data = [(flatten(story), q, answer) for story, q, answer in data
if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(word_idx, story_maxlen, query_maxlen, data):
inputs, queries, answers = [], [], []
for story, query, answer in data:
inputs.append([word_idx[w] for w in story])
queries.append([word_idx[w] for w in query])
answers.append(word_idx[answer])
return (pad_sequences(inputs, maxlen=story_maxlen),
pad_sequences(queries, maxlen=query_maxlen), np.array(answers))
def read_data(finish_fast=False):
# Get the file
try:
path = get_file(
"babi-tasks-v1-2.tar.gz",
origin="https://s3.amazonaws.com/text-datasets/"
"babi_tasks_1-20_v1-2.tar.gz")
except Exception:
print(
"Error downloading dataset, please download it manually:\n"
"$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2" # noqa: E501
".tar.gz\n"
"$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz" # noqa: E501
)
raise
# Choose challenge
challenges = {
# QA1 with 10,000 samples
"single_supporting_fact_10k": "tasks_1-20_v1-2/en-10k/qa1_"
"single-supporting-fact_{}.txt",
# QA2 with 10,000 samples
"two_supporting_facts_10k": "tasks_1-20_v1-2/en-10k/qa2_"
"two-supporting-facts_{}.txt",
}
challenge_type = "single_supporting_fact_10k"
challenge = challenges[challenge_type]
with tarfile.open(path) as tar:
train_stories = get_stories(tar.extractfile(challenge.format("train")))
test_stories = get_stories(tar.extractfile(challenge.format("test")))
if finish_fast:
train_stories = train_stories[:64]
test_stories = test_stories[:64]
return train_stories, test_stories
class MemNNModel(tune.Trainable):
def build_model(self):
"""Helper method for creating the model"""
vocab = set()
for story, q, answer in self.train_stories + self.test_stories:
vocab |= set(story + q + [answer])
vocab = sorted(vocab)
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
story_maxlen = max(
len(x) for x, _, _ in self.train_stories + self.test_stories)
query_maxlen = max(
len(x) for _, x, _ in self.train_stories + self.test_stories)
word_idx = {c: i + 1 for i, c in enumerate(vocab)}
self.inputs_train, self.queries_train, self.answers_train = (
vectorize_stories(word_idx, story_maxlen, query_maxlen,
self.train_stories))
self.inputs_test, self.queries_test, self.answers_test = (
vectorize_stories(word_idx, story_maxlen, query_maxlen,
self.test_stories))
# placeholders
input_sequence = Input((story_maxlen, ))
question = Input((query_maxlen, ))
# encoders
# embed the input sequence into a sequence of vectors
input_encoder_m = Sequential()
input_encoder_m.add(Embedding(input_dim=vocab_size, output_dim=64))
input_encoder_m.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, story_maxlen, embedding_dim)
# embed the input into a sequence of vectors of size query_maxlen
input_encoder_c = Sequential()
input_encoder_c.add(
Embedding(input_dim=vocab_size, output_dim=query_maxlen))
input_encoder_c.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, story_maxlen, query_maxlen)
# embed the question into a sequence of vectors
question_encoder = Sequential()
question_encoder.add(
Embedding(
input_dim=vocab_size, output_dim=64,
input_length=query_maxlen))
question_encoder.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, query_maxlen, embedding_dim)
# encode input sequence and questions (which are indices)
# to sequences of dense vectors
input_encoded_m = input_encoder_m(input_sequence)
input_encoded_c = input_encoder_c(input_sequence)
question_encoded = question_encoder(question)
# compute a "match" between the first input vector sequence
# and the question vector sequence
# shape: `(samples, story_maxlen, query_maxlen)`
match = dot([input_encoded_m, question_encoded], axes=(2, 2))
match = Activation("softmax")(match)
# add the match matrix with the second input vector sequence
response = add(
[match, input_encoded_c]) # (samples, story_maxlen, query_maxlen)
response = Permute(
(2, 1))(response) # (samples, query_maxlen, story_maxlen)
# concatenate the match matrix with the question vector sequence
answer = concatenate([response, question_encoded])
# the original paper uses a matrix multiplication.
# we choose to use a RNN instead.
answer = LSTM(32)(answer) # (samples, 32)
# one regularization layer -- more would probably be needed.
answer = Dropout(self.config.get("dropout", 0.3))(answer)
answer = Dense(vocab_size)(answer) # (samples, vocab_size)
# we output a probability distribution over the vocabulary
answer = Activation("softmax")(answer)
# build the final model
model = Model([input_sequence, question], answer)
return model
def setup(self, config):
with FileLock(os.path.expanduser("~/.tune.lock")):
self.train_stories, self.test_stories = read_data(
config["finish_fast"])
model = self.build_model()
rmsprop = RMSprop(
lr=self.config.get("lr", 1e-3), rho=self.config.get("rho", 0.9))
model.compile(
optimizer=rmsprop,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
self.model = model
def step(self):
# train
self.model.fit(
[self.inputs_train, self.queries_train],
self.answers_train,
batch_size=self.config.get("batch_size", 32),
epochs=self.config.get("epochs", 1),
validation_data=([self.inputs_test, self.queries_test],
self.answers_test),
verbose=0)
_, accuracy = self.model.evaluate(
[self.inputs_train, self.queries_train],
self.answers_train,
verbose=0)
return {"mean_accuracy": accuracy}
def save_checkpoint(self, checkpoint_dir):
file_path = checkpoint_dir + "/model"
self.model.save(file_path)
return file_path
def load_checkpoint(self, path):
# See https://stackoverflow.com/a/42763323
del self.model
self.model = load_model(path)
if __name__ == "__main__":
import ray
from ray.tune.schedulers import PopulationBasedTraining
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using "
"Ray Client.")
args, _ = parser.parse_known_args()
if args.smoke_test:
ray.init(num_cpus=2)
elif args.server_address:
ray.util.connect(args.server_address)
pbt = PopulationBasedTraining(
perturbation_interval=2,
hyperparam_mutations={
"dropout": lambda: np.random.uniform(0, 1),
"lr": lambda: 10**np.random.randint(-10, 0),
"rho": lambda: np.random.uniform(0, 1)
})
results = tune.run(
MemNNModel,
name="pbt_babi_memnn",
scheduler=pbt,
metric="mean_accuracy",
mode="max",
stop={"training_iteration": 4 if args.smoke_test else 100},
num_samples=2,
config={
"finish_fast": args.smoke_test,
"batch_size": 32,
"epochs": 1,
"dropout": 0.3,
"lr": 0.01,
"rho": 0.9
})
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
"""
TR-55 Model Implementation
A mapping between variable/parameter names found in the TR-55 document
and variables used in this program are as follows:
* `precip` is referred to as P in the report
* `runoff` is Q
* `evaptrans` maps to ET, the evapotranspiration
* `inf` is the amount of water that infiltrates into the soil (in inches)
* `init_abs` is Ia, the initial abstraction, another form of infiltration
"""
import copy
import numpy as np
from tr55.tablelookup import lookup_cn, lookup_bmp_storage, \
lookup_ki, is_bmp, is_built_type, make_precolumbian, \
get_pollutants, get_bmps, lookup_pitt_runoff, lookup_bmp_drainage_ratio
from tr55.water_quality import get_volume_of_runoff, get_pollutant_load
from tr55.operations import dict_plus
def runoff_pitt(precip, evaptrans, soil_type, land_use):
"""
The Pitt Small Storm Hydrology method. The output is a runoff
value in inches.
This uses numpy to make a linear interpolation between tabular values to
calculate the exact runoff for a given value
`precip` is the amount of precipitation in inches.
"""
runoff_ratios = lookup_pitt_runoff(soil_type, land_use)
runoff_ratio = np.interp(precip, runoff_ratios['precip'], runoff_ratios['Rv'])
runoff = precip*runoff_ratio
return min(runoff, precip - evaptrans)
def nrcs_cutoff(precip, curve_number):
"""
A function to find the cutoff between precipitation/curve number
pairs that have zero runoff by definition, and those that do not.
"""
if precip <= -1 * (2 * (curve_number - 100.0) / curve_number):
return True
else:
return False
def runoff_nrcs(precip, evaptrans, soil_type, land_use):
"""
The runoff equation from the TR-55 document. The output is a
runoff value in inches.
`precip` is the amount of precipitation in inches.
"""
curve_number = lookup_cn(soil_type, land_use)
if nrcs_cutoff(precip, curve_number):
return 0.0
potential_retention = (1000.0 / curve_number) - 10
initial_abs = 0.2 * potential_retention
precip_minus_initial_abs = precip - initial_abs
numerator = pow(precip_minus_initial_abs, 2)
denominator = (precip_minus_initial_abs + potential_retention)
runoff = numerator / denominator
return min(runoff, precip - evaptrans)
def simulate_cell_day(precip, evaptrans, cell, cell_count):
"""
Simulate a bunch of cells of the same type during a one-day event.
`precip` is the amount of precipitation in inches.
`evaptrans` is evapotranspiration in inches per day - this is the
ET for the cell after taking the crop/landscape factor into account
this is NOT the ETmax.
`cell` is a string which contains a soil type and land use
separated by a colon.
`cell_count` is the number of cells to simulate.
The return value is a dictionary of runoff, evapotranspiration, and
infiltration as a volume (inches * #cells).
"""
def clamp(runoff, et, inf, precip):
"""
This function ensures that runoff + et + inf <= precip.
NOTE: Infiltration is normally independent of the
precipitation level, but this function introduces a slight
dependency (that is, at very low levels of precipitation, this
function can cause infiltration to be smaller than it
ordinarily would be.
"""
total = runoff + et + inf
if (total > precip):
scale = precip / total
runoff *= scale
et *= scale
inf *= scale
return (runoff, et, inf)
precip = max(0.0, precip)
soil_type, land_use, bmp = cell.lower().split(':')
# If there is no precipitation, then there is no runoff or
# infiltration; however, there is evapotranspiration. (It is
# understood that over a period of time, this can lead to the sum
# of the three values exceeding the total precipitation.)
if precip == 0.0:
return {
'runoff-vol': 0.0,
'et-vol': 0.0,
'inf-vol': 0.0,
}
# If the BMP is cluster_housing or no_till, then make it the
# land-use. This is done because those two types of BMPs behave
# more like land-uses than they do BMPs.
if bmp and not is_bmp(bmp):
land_use = bmp or land_use
# When the land-use is a built-type use the Pitt Small Storm Hydrology
# Model until the runoff predicted by the NRCS model is greater than that
# predicted by the NRCS model.
if is_built_type(land_use):
pitt_runoff = runoff_pitt(precip, evaptrans, soil_type, land_use)
nrcs_runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
runoff = max(pitt_runoff, nrcs_runoff)
else:
runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
inf = max(0.0, precip - (evaptrans + runoff))
# (runoff, evaptrans, inf) = clamp(runoff, evaptrans, inf, precip)
return {
'runoff-vol': cell_count * runoff,
'et-vol': cell_count * evaptrans,
'inf-vol': cell_count * inf,
}
def create_unmodified_census(census):
"""
This creates a cell census, ignoring any modifications. The
output is suitable for use as input to `simulate_water_quality`.
"""
unmod = copy.deepcopy(census)
unmod.pop('modifications', None)
return unmod
def create_modified_census(census):
"""
This creates a cell census, with modifications, that is suitable
for use as input to `simulate_water_quality`.
For every type of cell that undergoes modification, the
modifications are indicated with a sub-distribution under that
cell type.
"""
mod = copy.deepcopy(census)
mod.pop('modifications', None)
for (cell, subcensus) in mod['distribution'].items():
n = subcensus['cell_count']
changes = {
'distribution': {
cell: {
'distribution': {
cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
for modification in (census.get('modifications') or []):
for (orig_cell, subcensus) in modification['distribution'].items():
n = subcensus['cell_count']
soil1, land1 = orig_cell.split(':')
soil2, land2, bmp = modification['change'].split(':')
changed_cell = '%s:%s:%s' % (soil2 or soil1, land2 or land1, bmp)
changes = {
'distribution': {
orig_cell: {
'distribution': {
orig_cell: {'cell_count': -n},
changed_cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
return mod
def simulate_water_quality(tree, cell_res, fn,
pct=1.0, current_cell=None, precolumbian=False):
"""
Perform a water quality simulation by doing simulations on each of
the cell types (leaves), then adding them together by summing the
values of a node's subtrees and storing them at that node.
`tree` is the (sub)tree of cell distributions that is currently
under consideration.
`pct` is the percentage of calculated water volume to retain.
`cell_res` is the size of each cell/pixel in meters squared
(used for turning inches of water into volumes of water).
`fn` is a function that takes a cell type and a number of cells
and returns a dictionary containing runoff, et, and inf as
volumes.
`current_cell` is the cell type for the present node.
"""
# Internal node.
if 'cell_count' in tree and 'distribution' in tree:
n = tree['cell_count']
# simulate subtrees
if n != 0:
tally = {}
for cell, subtree in tree['distribution'].items():
simulate_water_quality(subtree, cell_res, fn,
pct, cell, precolumbian)
subtree_ex_dist = subtree.copy()
subtree_ex_dist.pop('distribution', None)
tally = dict_plus(tally, subtree_ex_dist)
tree.update(tally) # update this node
# effectively a leaf
elif n == 0:
for pol in get_pollutants():
tree[pol] = 0.0
# Leaf node.
elif 'cell_count' in tree and 'distribution' not in tree:
# the number of cells covered by this leaf
n = tree['cell_count']
# canonicalize the current_cell string
split = current_cell.split(':')
if (len(split) == 2):
split.append('')
if precolumbian:
split[1] = make_precolumbian(split[1])
current_cell = '%s:%s:%s' % tuple(split)
# run the runoff model on this leaf
result = fn(current_cell, n) # runoff, et, inf
runoff_adjustment = result['runoff-vol'] - (result['runoff-vol'] * pct)
result['runoff-vol'] -= runoff_adjustment
result['inf-vol'] += runoff_adjustment
tree.update(result)
# perform water quality calculation
if n != 0:
soil_type, land_use, bmp = split
runoff_per_cell = result['runoff-vol'] / n
liters = get_volume_of_runoff(runoff_per_cell, n, cell_res)
for pol in get_pollutants():
tree[pol] = get_pollutant_load(land_use, pol, liters)
def postpass(tree):
"""
Remove volume units and replace them with inches.
"""
if 'cell_count' in tree:
if tree['cell_count'] > 0:
n = tree['cell_count']
tree['runoff'] = tree['runoff-vol'] / n
tree['et'] = tree['et-vol'] / n
tree['inf'] = tree['inf-vol'] / n
else:
tree['runoff'] = 0
tree['et'] = 0
tree['inf'] = 0
tree.pop('runoff-vol', None)
tree.pop('et-vol', None)
tree.pop('inf-vol', None)
if 'distribution' in tree:
for subtree in tree['distribution'].values():
postpass(subtree)
def compute_bmp_effect(census, m2_per_pixel, precip):
"""
Compute the overall amount of water retained by infiltration/retention
type BMP's.
Result is a percent of runoff remaining after water is trapped in
infiltration/retention BMP's
"""
meters_per_inch = 0.0254
cubic_meters = census['runoff-vol'] * meters_per_inch * m2_per_pixel
# 'runoff-vol' in census is in inches*#cells
bmp_dict = census.get('BMPs', {})
bmp_keys = set(bmp_dict.keys())
reduction = 0.0
for bmp in set.intersection(set(get_bmps()), bmp_keys):
bmp_area = bmp_dict[bmp]
storage_space = (lookup_bmp_storage(bmp) * bmp_area)
max_reduction = lookup_bmp_drainage_ratio(bmp) * bmp_area * precip * meters_per_inch
bmp_reduction = min(max_reduction, storage_space)
reduction += bmp_reduction
return 0 if not cubic_meters else \
max(0.0, cubic_meters - reduction) / cubic_meters
def simulate_modifications(census, fn, cell_res, precip, pc=False):
"""
Simulate effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`fn` is as described in `simulate_water_quality`.
`cell_res` is as described in `simulate_water_quality`.
"""
mod = create_modified_census(census)
simulate_water_quality(mod, cell_res, fn, precolumbian=pc)
pct = compute_bmp_effect(mod, cell_res, precip)
simulate_water_quality(mod, cell_res, fn, pct=pct, precolumbian=pc)
postpass(mod)
unmod = create_unmodified_census(census)
simulate_water_quality(unmod, cell_res, fn, precolumbian=pc)
postpass(unmod)
return {
'unmodified': unmod,
'modified': mod
}
def simulate_day(census, precip, cell_res=10, precolumbian=False):
"""
Simulate a day, including water quality effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`cell_res` is as described in `simulate_water_quality`.
`precolumbian` indicates that artificial types should be turned
into forest.
"""
et_max = 0.207
# From the EPA WaterSense data finder for the Philadelphia airport (19153)
# Converted to daily number in inches per day.
# http://www3.epa.gov/watersense/new_homes/wb_data_finder.html
# TODO: include Potential Max ET as a data layer from CGIAR
# http://csi.cgiar.org/aridity/Global_Aridity_PET_Methodolgy.asp
if 'modifications' in census:
verify_census(census)
def fn(cell, cell_count):
# Compute et for cell type
split = cell.split(':')
if (len(split) == 2):
(land_use, bmp) = split
else:
(_, land_use, bmp) = split
et = et_max * lookup_ki(bmp or land_use)
# Simulate the cell for one day
return simulate_cell_day(precip, et, cell, cell_count)
return simulate_modifications(census, fn, cell_res, precip, precolumbian)
def verify_census(census):
"""
Assures that there is no soil type/land cover pair
in a modification census that isn't in the AoI census.
"""
for modification in census['modifications']:
for land_cover in modification['distribution']:
if land_cover not in census['distribution']:
raise ValueError("Invalid modification census")
|
'''
Kurgan AI Web Application Security Analyzer.
http://www.kurgan.com.br/
Author: Glaudson Ocampos - <glaudson@vortexai.com.br>
Created in May, 11th 2016.
'''
import db.db as db
import config as cf
class WebServer(object):
banner = None
os = None
server = None
framework = None
version = None
options = None
def set_banner(self, val):
self.banner = val
def get_banner(self):
return self.banner
def set_os(self, val):
self.os = val
def get_os(self):
return self.os
def set_server(self, val):
self.server = val
def get_server(self):
return self.server
def set_version(self,val):
self.version = val
def get_version(self):
return self.version
def set_options(self,val):
self.options = val
def get_options(self):
return self.options
def check_os(self):
os_possibles = {"Debian","Fedora","Windows","SuSE","marrakesh","RedHat","Unix"}
for i in os_possibles:
if i in self.banner:
self.os = i
break
def check_server(self):
#server_possibles = {"nginx", "Apache", "Tomcat", "JBoss", "IIS", "X-Varnish"}
mydb = db.DB();
query = "SELECT DISTINCT name FROM server"
database = cf.DB_WEBSERVERS
servers_in_database = mydb.getData(query,database)
server_possibles = list(servers_in_database)
for j in server_possibles:
for i in j:
if i in self.banner:
self.server = i
break
def check_version(self):
if self.server is None:
return None
else:
mydb = db.DB();
name = self.server;
query = "SELECT DISTINCT version FROM server WHERE name='" + name + "'"
database = cf.DB_WEBSERVERS
servers_in_database = mydb.getData(query,database)
v_possibles = list(servers_in_database)
for j in v_possibles:
for i in j:
if i in self.banner:
self.version = i
break
def check_options(self):
op_possibles = {'GET','POST','PUT','HEAD','OPTIONS','DELETE','TRACE','PATCH','CONNECT'}
op_in_server = []
for i in op_possibles:
if i in self.options:
op_in_server.append(i)
return op_in_server
class Framework(object):
framework = None
X_Powered_By = None
def set_X_Powered_By(self, val):
self.X_Powered_By = val
def get_X_Powered_By(self):
return self.X_Powered_By
def set_framework(self, val):
self.framework = val
def get_framework(self):
return self.framework
#checar extensao tambem
def check_framework(self):
fw_possibles = {"PHP","ASP.NET","JSP","Perl","CGI"}
for i in fw_possibles:
if i in self.X_Powered_By:
self.framework = i
break
class Application(object):
extension = None
cookie = None
has_javascript = None
def set_extension(self, val):
self.extension = val
def get_extension(self):
return self.extension
def set_cookie(self, val):
self.cookie = val
def get_cookie(self):
return self.cookie
def set_has_javascript(self, val):
self.has_javascript = val
def get_has_javascript(self):
return self.has_javascript
def check_extension(self):
if self.extension is 'html':
weight_html_framework += 10
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class MeetingType(object):
def __init__(self,code,descr):
self.code = code
self.description = descr
def __str__(self):
return '%s'%self.description
def __unicode__(self):
return self.description
class MeetingReport(models.Model):
Plenary = MeetingType('P', 'Plenary')
Interim = MeetingType('I', 'Interim')
Special = MeetingType('S', 'Special')
_MEETING_TYPES = [ (b.code,b.description) for b in Plenary, Interim, Special]
id = models.AutoField(primary_key=True)
session = models.DecimalField(unique=True, db_index=True, decimal_places=1, max_digits=5, help_text=_('Session number'))
start = models.DateField(help_text=_('Session start date'))
end = models.DateField(help_text=_('Session end date'))
cancelled = models.BooleanField(default=False,help_text=_(u'Session was cancelled'))
pending = models.BooleanField(default=True,help_text=_(u'Reports are in-progress and will be provided later'))
#null=True, blank=True,
report = models.URLField(null=True, blank=True, help_text=_('URL pointing to meeting report'))
minutes_doc = models.URLField(null=True, blank=True,
help_text=_('URL pointing to meeting minutes in Word format'))
minutes_pdf = models.URLField(null=True, blank=True,
help_text=_('URL pointing to meeting minutes in PDF format'))
venue = models.CharField(max_length=100, help_text=_('Name of meeting venue'))
location = models.CharField(max_length=100, help_text=_('Location of meeting venue'))
meeting_type = models.CharField(max_length=2, choices=_MEETING_TYPES, help_text=_('Plenary or Interim'))
@property
def session_num(self):
s = int(self.session)
return s if s==self.session else self.session
def __unicode__(self):
try:
return '%03.1f: %s'%(int(self.session),self.location)
except (ValueError,TypeError):
return self.location
|
import mock
from oslo_serialization import jsonutils
from magnum.conductor import k8s_monitor
from magnum.conductor import mesos_monitor
from magnum.conductor import monitors
from magnum.conductor import swarm_monitor
from magnum import objects
from magnum.tests import base
from magnum.tests.unit.db import utils
class MonitorsTestCase(base.TestCase):
test_metrics_spec = {
'metric1': {
'unit': 'metric1_unit',
'func': 'metric1_func',
},
'metric2': {
'unit': 'metric2_unit',
'func': 'metric2_func',
},
}
def setUp(self):
super(MonitorsTestCase, self).setUp()
bay = utils.get_test_bay(node_addresses=['1.2.3.4'],
api_address='https://5.6.7.8:2376')
self.bay = objects.Bay(self.context, **bay)
self.monitor = swarm_monitor.SwarmMonitor(self.context, self.bay)
self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.bay)
self.mesos_monitor = mesos_monitor.MesosMonitor(self.context,
self.bay)
p = mock.patch('magnum.conductor.swarm_monitor.SwarmMonitor.'
'metrics_spec', new_callable=mock.PropertyMock)
self.mock_metrics_spec = p.start()
self.mock_metrics_spec.return_value = self.test_metrics_spec
self.addCleanup(p.stop)
@mock.patch('magnum.objects.BayModel.get_by_uuid')
def test_create_monitor_success(self, mock_baymodel_get_by_uuid):
baymodel = mock.MagicMock()
baymodel.coe = 'swarm'
mock_baymodel_get_by_uuid.return_value = baymodel
monitor = monitors.create_monitor(self.context, self.bay)
self.assertIsInstance(monitor, swarm_monitor.SwarmMonitor)
@mock.patch('magnum.objects.BayModel.get_by_uuid')
def test_create_monitor_k8s_bay(self, mock_baymodel_get_by_uuid):
baymodel = mock.MagicMock()
baymodel.coe = 'kubernetes'
mock_baymodel_get_by_uuid.return_value = baymodel
monitor = monitors.create_monitor(self.context, self.bay)
self.assertIsInstance(monitor, k8s_monitor.K8sMonitor)
@mock.patch('magnum.objects.BayModel.get_by_uuid')
def test_create_monitor_mesos_bay(self, mock_baymodel_get_by_uuid):
baymodel = mock.MagicMock()
baymodel.coe = 'mesos'
mock_baymodel_get_by_uuid.return_value = baymodel
monitor = monitors.create_monitor(self.context, self.bay)
self.assertIsInstance(monitor, mesos_monitor.MesosMonitor)
@mock.patch('magnum.objects.BayModel.get_by_uuid')
def test_create_monitor_unsupported_coe(self, mock_baymodel_get_by_uuid):
baymodel = mock.MagicMock()
baymodel.coe = 'unsupported'
mock_baymodel_get_by_uuid.return_value = baymodel
monitor = monitors.create_monitor(self.context, self.bay)
self.assertIsNone(monitor)
@mock.patch('magnum.common.docker_utils.docker_for_bay')
def test_swarm_monitor_pull_data_success(self, mock_docker_for_bay):
mock_docker = mock.MagicMock()
mock_docker.info.return_value = {'DriverStatus': [[
u' \u2514 Reserved Memory', u'0 B / 1 GiB']]}
mock_docker.containers.return_value = [mock.MagicMock()]
mock_docker.inspect_container.return_value = 'test_container'
mock_docker_for_bay.return_value.__enter__.return_value = mock_docker
self.monitor.pull_data()
self.assertEqual([{'MemTotal': 1073741824.0}],
self.monitor.data['nodes'])
self.assertEqual(['test_container'], self.monitor.data['containers'])
@mock.patch('magnum.common.docker_utils.docker_for_bay')
def test_swarm_monitor_pull_data_raise(self, mock_docker_for_bay):
mock_container = mock.MagicMock()
mock_docker = mock.MagicMock()
mock_docker.info.return_value = {'DriverStatus': [[
u' \u2514 Reserved Memory', u'0 B / 1 GiB']]}
mock_docker.containers.return_value = [mock_container]
mock_docker.inspect_container.side_effect = Exception("inspect error")
mock_docker_for_bay.return_value.__enter__.return_value = mock_docker
self.monitor.pull_data()
self.assertEqual([{'MemTotal': 1073741824.0}],
self.monitor.data['nodes'])
self.assertEqual([mock_container], self.monitor.data['containers'])
def test_swarm_monitor_get_metric_names(self):
names = self.monitor.get_metric_names()
self.assertEqual(sorted(['metric1', 'metric2']), sorted(names))
def test_swarm_monitor_get_metric_unit(self):
unit = self.monitor.get_metric_unit('metric1')
self.assertEqual('metric1_unit', unit)
def test_swarm_monitor_compute_metric_value(self):
mock_func = mock.MagicMock()
mock_func.return_value = 'metric1_value'
self.monitor.metric1_func = mock_func
value = self.monitor.compute_metric_value('metric1')
self.assertEqual('metric1_value', value)
def test_swarm_monitor_compute_memory_util(self):
test_data = {
'nodes': [
{
'Name': 'node',
'MemTotal': 20,
},
],
'containers': [
{
'Name': 'container',
'HostConfig': {
'Memory': 10,
},
},
],
}
self.monitor.data = test_data
mem_util = self.monitor.compute_memory_util()
self.assertEqual(50, mem_util)
test_data = {
'nodes': [],
'containers': [],
}
self.monitor.data = test_data
mem_util = self.monitor.compute_memory_util()
self.assertEqual(0, mem_util)
@mock.patch('magnum.conductor.k8s_api.create_k8s_api')
def test_k8s_monitor_pull_data_success(self, mock_k8s_api):
mock_nodes = mock.MagicMock()
mock_node = mock.MagicMock()
mock_node.status = mock.MagicMock()
mock_node.status.capacity = "{'memory': '2000Ki'}"
mock_nodes.items = [mock_node]
mock_k8s_api.return_value.list_namespaced_node.return_value = (
mock_nodes)
mock_pods = mock.MagicMock()
mock_pod = mock.MagicMock()
mock_pod.spec = mock.MagicMock()
mock_container = mock.MagicMock()
mock_container.resources = mock.MagicMock()
mock_container.resources.limits = "{'memory':'100Mi'}"
mock_pod.spec.containers = [mock_container]
mock_pods.items = [mock_pod]
mock_k8s_api.return_value.list_namespaced_pod.return_value = mock_pods
self.k8s_monitor.pull_data()
self.assertEqual(self.k8s_monitor.data['nodes'],
[{'Memory': 2048000.0}])
self.assertEqual(self.k8s_monitor.data['pods'],
[{'Memory': 104857600.0}])
def test_k8s_monitor_get_metric_names(self):
k8s_metric_spec = 'magnum.conductor.k8s_monitor.K8sMonitor.'\
'metrics_spec'
with mock.patch(k8s_metric_spec,
new_callable=mock.PropertyMock) as mock_k8s_metric:
mock_k8s_metric.return_value = self.test_metrics_spec
names = self.k8s_monitor.get_metric_names()
self.assertEqual(sorted(['metric1', 'metric2']), sorted(names))
def test_k8s_monitor_get_metric_unit(self):
k8s_metric_spec = 'magnum.conductor.k8s_monitor.K8sMonitor.' \
'metrics_spec'
with mock.patch(k8s_metric_spec,
new_callable=mock.PropertyMock) as mock_k8s_metric:
mock_k8s_metric.return_value = self.test_metrics_spec
unit = self.k8s_monitor.get_metric_unit('metric1')
self.assertEqual('metric1_unit', unit)
def test_k8s_monitor_compute_memory_util(self):
test_data = {
'nodes': [
{
'Memory': 20,
},
],
'pods': [
{
'Memory': 10,
},
],
}
self.k8s_monitor.data = test_data
mem_util = self.k8s_monitor.compute_memory_util()
self.assertEqual(50, mem_util)
test_data = {
'nodes': [],
'pods': [],
}
self.k8s_monitor.data = test_data
mem_util = self.k8s_monitor.compute_memory_util()
self.assertEqual(0, mem_util)
@mock.patch('magnum.common.urlfetch.get')
def test_mesos_monitor_pull_data_success(self, mock_url_get):
state_json = {
'slaves': [{
'resources': {
'mem': 100
},
'used_resources': {
'mem': 50
}
}]
}
state_json = jsonutils.dumps(state_json)
mock_url_get.return_value = state_json
self.mesos_monitor.pull_data()
self.assertEqual(self.mesos_monitor.data['mem_total'],
100)
self.assertEqual(self.mesos_monitor.data['mem_used'],
50)
def test_mesos_monitor_get_metric_names(self):
mesos_metric_spec = 'magnum.conductor.mesos_monitor.MesosMonitor.'\
'metrics_spec'
with mock.patch(mesos_metric_spec,
new_callable=mock.PropertyMock) as mock_mesos_metric:
mock_mesos_metric.return_value = self.test_metrics_spec
names = self.mesos_monitor.get_metric_names()
self.assertEqual(sorted(['metric1', 'metric2']), sorted(names))
def test_mesos_monitor_get_metric_unit(self):
mesos_metric_spec = 'magnum.conductor.mesos_monitor.MesosMonitor.' \
'metrics_spec'
with mock.patch(mesos_metric_spec,
new_callable=mock.PropertyMock) as mock_mesos_metric:
mock_mesos_metric.return_value = self.test_metrics_spec
unit = self.mesos_monitor.get_metric_unit('metric1')
self.assertEqual('metric1_unit', unit)
def test_mesos_monitor_compute_memory_util(self):
test_data = {
'mem_total': 100,
'mem_used': 50
}
self.mesos_monitor.data = test_data
mem_util = self.mesos_monitor.compute_memory_util()
self.assertEqual(50, mem_util)
test_data = {
'mem_total': 0,
'pods': 0,
}
self.mesos_monitor.data = test_data
mem_util = self.mesos_monitor.compute_memory_util()
self.assertEqual(0, mem_util)
|
import copy
from deckhand.db.sqlalchemy import api as db_api
from deckhand.tests import test_utils
from deckhand.tests.unit.db import base
class TestRevisionDiffing(base.TestDbBase):
def _verify_buckets_status(self, revision_id, comparison_revision_id,
expected):
# Verify that actual and expected results match, despite the order of
# `comparison_revision_id` and `revision_id` args.
revision_ids = [revision_id, comparison_revision_id]
for rev_ids in (revision_ids, reversed(revision_ids)):
actual = db_api.revision_diff(*rev_ids)
self.assertEqual(expected, actual)
def test_revision_diff_null(self):
self._verify_buckets_status(0, 0, {})
def test_revision_diff_created(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
self._verify_buckets_status(
0, revision_id, {bucket_name: 'created'})
def test_revision_diff_multi_bucket_created(self):
revision_ids = []
bucket_names = []
for _ in range(3):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
bucket_names.append(bucket_name)
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
revision_ids.append(revision_id)
# Between revision 1 and 0, 1 bucket is created.
self._verify_buckets_status(
0, revision_ids[0], {b: 'created' for b in bucket_names[:1]})
# Between revision 2 and 0, 2 buckets are created.
self._verify_buckets_status(
0, revision_ids[1], {b: 'created' for b in bucket_names[:2]})
# Between revision 3 and 0, 3 buckets are created.
self._verify_buckets_status(
0, revision_ids[2], {b: 'created' for b in bucket_names})
def test_revision_diff_self(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
self._verify_buckets_status(
revision_id, revision_id, {bucket_name: 'unmodified'})
def test_revision_diff_multi_bucket_self(self):
bucket_names = []
revision_ids = []
for _ in range(3):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
# Store each bucket that was created.
bucket_names.append(bucket_name)
documents = self.create_documents(bucket_name, payload)
# Store each revision that was created.
revision_id = documents[0]['revision_id']
revision_ids.append(revision_id)
# The last revision should contain history for the previous 2 revisions
# such that its diff history will show history for 3 buckets. Similarly
# the 2nd revision will have history for 2 buckets and the 1st revision
# for 1 bucket.
# 1st revision has revision history for 1 bucket.
self._verify_buckets_status(
revision_ids[0], revision_ids[0], {bucket_names[0]: 'unmodified'})
# 2nd revision has revision history for 2 buckets.
self._verify_buckets_status(
revision_ids[1], revision_ids[1],
{b: 'unmodified' for b in bucket_names[:2]})
# 3rd revision has revision history for 3 buckets.
self._verify_buckets_status(
revision_ids[2], revision_ids[2],
{b: 'unmodified' for b in bucket_names})
def test_revision_diff_modified(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
payload[0]['data'] = {'modified': 'modified'}
comparison_documents = self.create_documents(bucket_name, payload)
comparison_revision_id = comparison_documents[0]['revision_id']
self._verify_buckets_status(
revision_id, comparison_revision_id, {bucket_name: 'modified'})
def test_revision_diff_multi_revision_modified(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
revision_ids = []
for _ in range(3):
payload[0]['data'] = {'modified': test_utils.rand_name('modified')}
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
revision_ids.append(revision_id)
for pair in [(0, 1), (0, 2), (1, 2)]:
self._verify_buckets_status(
revision_ids[pair[0]], revision_ids[pair[1]],
{bucket_name: 'modified'})
def test_revision_diff_multi_revision_multi_bucket_modified(self):
revision_ids = []
bucket_name = test_utils.rand_name('bucket')
alt_bucket_name = test_utils.rand_name('bucket')
bucket_names = [bucket_name, alt_bucket_name] * 2
# Create revisions by modifying documents in `bucket_name` and
# `alt_bucket_name`.
for bucket_idx in range(4):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
documents = self.create_documents(
bucket_names[bucket_idx], payload)
revision_id = documents[0]['revision_id']
revision_ids.append(revision_id)
# Between revision_ids[0] and [1], bucket_name is unmodified and
# alt_bucket_name is created.
self._verify_buckets_status(
revision_ids[0], revision_ids[1],
{bucket_name: 'unmodified', alt_bucket_name: 'created'})
# Between revision_ids[0] and [2], bucket_name is modified (by 2) and
# alt_bucket_name is created (by 1).
self._verify_buckets_status(
revision_ids[0], revision_ids[2],
{bucket_name: 'modified', alt_bucket_name: 'created'})
# Between revision_ids[0] and [3], bucket_name is modified (by [2]) and
# alt_bucket_name is created (by [1]) (as well as modified by [3]).
self._verify_buckets_status(
revision_ids[0], revision_ids[3],
{bucket_name: 'modified', alt_bucket_name: 'created'})
# Between revision_ids[1] and [2], bucket_name is modified but
# alt_bucket_name remains unmodified.
self._verify_buckets_status(
revision_ids[1], revision_ids[2],
{bucket_name: 'modified', alt_bucket_name: 'unmodified'})
# Between revision_ids[1] and [3], bucket_name is modified (by [2]) and
# alt_bucket_name is modified by [3].
self._verify_buckets_status(
revision_ids[1], revision_ids[3],
{bucket_name: 'modified', alt_bucket_name: 'modified'})
# Between revision_ids[2] and [3], alt_bucket_name is modified but
# bucket_name remains unmodified.
self._verify_buckets_status(
revision_ids[2], revision_ids[3],
{bucket_name: 'unmodified', alt_bucket_name: 'modified'})
def test_revision_diff_ignore_bucket_with_unrelated_documents(self):
payload = base.DocumentFixture.get_minimal_fixture()
alt_payload = base.DocumentFixture.get_minimal_fixture()
bucket_name = test_utils.rand_name('bucket')
alt_bucket_name = test_utils.rand_name('bucket')
# Create a bucket with a single document.
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
# Create another bucket with an entirely different document (different
# schema and metadata.name).
self.create_documents(alt_bucket_name, alt_payload)
# Modify the document from the 1st bucket.
payload['data'] = {'modified': 'modified'}
documents = self.create_documents(bucket_name, payload)
comparison_revision_id = documents[0]['revision_id']
# The `alt_bucket_name` should be created.
self._verify_buckets_status(
revision_id, comparison_revision_id,
{bucket_name: 'modified', alt_bucket_name: 'created'})
def test_revision_diff_ignore_bucket_with_all_unrelated_documents(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
alt_payload = copy.deepcopy(payload)
bucket_name = test_utils.rand_name('bucket')
alt_bucket_name = test_utils.rand_name('bucket')
# Create a bucket with 3 documents.
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
# Modify all 3 documents from first bucket.
for idx in range(3):
alt_payload[idx]['name'] = test_utils.rand_name('name')
alt_payload[idx]['schema'] = test_utils.rand_name('schema')
self.create_documents(
alt_bucket_name, alt_payload)
# Modify the document from the 1st bucket.
payload[0]['data'] = {'modified': 'modified'}
documents = self.create_documents(bucket_name, payload)
comparison_revision_id = documents[0]['revision_id']
# The alt_bucket_name should be created.
self._verify_buckets_status(
revision_id, comparison_revision_id,
{bucket_name: 'modified', alt_bucket_name: 'created'})
def test_revision_diff_deleted(self):
payload = base.DocumentFixture.get_minimal_fixture()
bucket_name = test_utils.rand_name('bucket')
created_documents = self.create_documents(bucket_name, payload)
revision_id = created_documents[0]['revision_id']
# Delete the previously created document.
deleted_documents = self.create_documents(bucket_name, [])
comparison_revision_id = deleted_documents[0]['revision_id']
self._verify_buckets_status(
revision_id, comparison_revision_id, {bucket_name: 'deleted'})
def test_revision_diff_delete_then_recreate(self):
payload = base.DocumentFixture.get_minimal_fixture()
bucket_name = test_utils.rand_name('bucket')
created_documents = self.create_documents(bucket_name, payload)
revision_id_1 = created_documents[0]['revision_id']
# Delete the previously created document.
deleted_documents = self.create_documents(bucket_name, [])
revision_id_2 = deleted_documents[0]['revision_id']
# Recreate the previously deleted document.
recreated_documents = self.create_documents(bucket_name, payload)
revision_id_3 = recreated_documents[0]['revision_id']
# Verify that the revision for recreated document compared to revision
# for deleted document is created, ignoring order.
self._verify_buckets_status(
revision_id_2, revision_id_3, {bucket_name: 'created'})
# Verify that the revision for recreated document compared to revision
# for created document is unmodified, ignoring order.
self._verify_buckets_status(
revision_id_1, revision_id_3, {bucket_name: 'unmodified'})
def test_revision_diff_ignore_mistake_document(self):
payload = base.DocumentFixture.get_minimal_fixture()
bucket_name = test_utils.rand_name('first_bucket')
created_documents = self.create_documents(bucket_name, payload)
revision_id_1 = created_documents[0]['revision_id']
# Create then delete an "accidental" document create request.
alt_payload = base.DocumentFixture.get_minimal_fixture()
alt_bucket_name = test_utils.rand_name('mistake_bucket')
created_documents = self.create_documents(alt_bucket_name, alt_payload)
revision_id_2 = created_documents[0]['revision_id']
deleted_documents = self.create_documents(alt_bucket_name, [])
revision_id_3 = deleted_documents[0]['revision_id']
alt_payload_2 = base.DocumentFixture.get_minimal_fixture()
alt_bucket_name_2 = test_utils.rand_name('second_bucket')
created_documents = self.create_documents(
alt_bucket_name_2, alt_payload_2)
revision_id_4 = created_documents[0]['revision_id']
self._verify_buckets_status(
revision_id_1, revision_id_2, {bucket_name: 'unmodified',
alt_bucket_name: 'created'})
self._verify_buckets_status(
revision_id_2, revision_id_3, {bucket_name: 'unmodified',
alt_bucket_name: 'deleted'})
self._verify_buckets_status(
revision_id_1, revision_id_3, {bucket_name: 'unmodified'})
# Should not contain information about `alt_bucket_name` as it was a
# "mistake": created then deleted between the revisions in question.
self._verify_buckets_status(
revision_id_1, revision_id_4,
{bucket_name: 'unmodified', alt_bucket_name_2: 'created'})
|
"""Unit tests to cover CampaignTargetService."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..'))
import unittest
from adspygoogle.common import Utils
from tests.adspygoogle.adwords import HTTP_PROXY
from tests.adspygoogle.adwords import SERVER_V201109
from tests.adspygoogle.adwords import TEST_VERSION_V201109
from tests.adspygoogle.adwords import VERSION_V201109
from tests.adspygoogle.adwords import client
class CampaignTargetServiceTestV201109(unittest.TestCase):
"""Unittest suite for CampaignTargetService using v201109."""
SERVER = SERVER_V201109
VERSION = VERSION_V201109
client.debug = False
service = None
campaign_id = '0'
def setUp(self):
"""Prepare unittest."""
print self.id()
if not self.__class__.service:
self.__class__.service = client.GetCampaignTargetService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
if self.__class__.campaign_id == '0':
campaign_service = client.GetCampaignService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Campaign #%s' % Utils.GetUniqueName(),
'status': 'PAUSED',
'biddingStrategy': {
'xsi_type': 'ManualCPC'
},
'budget': {
'period': 'DAILY',
'amount': {
'microAmount': '1000000'
},
'deliveryMethod': 'STANDARD'
}
}
}]
self.__class__.campaign_id = campaign_service.Mutate(
operations)[0]['value'][0]['id']
def testGetAllTargets(self):
"""Test whether we can fetch all existing targets for given campaign."""
selector = {
'campaignIds': [self.__class__.campaign_id]
}
self.assert_(isinstance(self.__class__.service.Get(selector), tuple))
def testAddAdScheduleTarget(self):
"""Test whether we can add an ad schedule target to campaign."""
operations = [{
'operator': 'SET',
'operand': {
'xsi_type': 'AdScheduleTargetList',
'campaignId': self.__class__.campaign_id,
'targets': [{
'xsi_type': 'AdScheduleTarget',
'dayOfWeek': 'MONDAY',
'startHour': '8',
'startMinute': 'ZERO',
'endHour': '17',
'endMinute': 'ZERO',
'bidMultiplier': '1.0',
}]
}
}]
self.assert_(isinstance(self.__class__.service.Mutate(operations), tuple))
def makeTestSuiteV201109():
"""Set up test suite using v201109.
Returns:
TestSuite test suite using v201109.
"""
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(CampaignTargetServiceTestV201109))
return suite
if __name__ == '__main__':
suites = []
if TEST_VERSION_V201109:
suites.append(makeTestSuiteV201109())
if suites:
alltests = unittest.TestSuite(suites)
unittest.main(defaultTest='alltests')
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BadgeByCourse.title_en'
db.add_column('badges_badgebycourse', 'title_en',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.title_es'
db.add_column('badges_badgebycourse', 'title_es',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.title_it'
db.add_column('badges_badgebycourse', 'title_it',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.title_pt'
db.add_column('badges_badgebycourse', 'title_pt',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.title_fr'
db.add_column('badges_badgebycourse', 'title_fr',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.title_de'
db.add_column('badges_badgebycourse', 'title_de',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_en'
db.add_column('badges_badgebycourse', 'description_en',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_es'
db.add_column('badges_badgebycourse', 'description_es',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_it'
db.add_column('badges_badgebycourse', 'description_it',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_pt'
db.add_column('badges_badgebycourse', 'description_pt',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_fr'
db.add_column('badges_badgebycourse', 'description_fr',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_de'
db.add_column('badges_badgebycourse', 'description_de',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'BadgeByCourse.title_en'
db.delete_column('badges_badgebycourse', 'title_en')
# Deleting field 'BadgeByCourse.title_es'
db.delete_column('badges_badgebycourse', 'title_es')
# Deleting field 'BadgeByCourse.title_it'
db.delete_column('badges_badgebycourse', 'title_it')
# Deleting field 'BadgeByCourse.title_pt'
db.delete_column('badges_badgebycourse', 'title_pt')
# Deleting field 'BadgeByCourse.title_fr'
db.delete_column('badges_badgebycourse', 'title_fr')
# Deleting field 'BadgeByCourse.title_de'
db.delete_column('badges_badgebycourse', 'title_de')
# Deleting field 'BadgeByCourse.description_en'
db.delete_column('badges_badgebycourse', 'description_en')
# Deleting field 'BadgeByCourse.description_es'
db.delete_column('badges_badgebycourse', 'description_es')
# Deleting field 'BadgeByCourse.description_it'
db.delete_column('badges_badgebycourse', 'description_it')
# Deleting field 'BadgeByCourse.description_pt'
db.delete_column('badges_badgebycourse', 'description_pt')
# Deleting field 'BadgeByCourse.description_fr'
db.delete_column('badges_badgebycourse', 'description_fr')
# Deleting field 'BadgeByCourse.description_de'
db.delete_column('badges_badgebycourse', 'description_de')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254'})
},
'badges.alignment': {
'Meta': {'object_name': 'Alignment'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'badges.award': {
'Meta': {'ordering': "['-modified', '-awarded']", 'unique_together': "(('user', 'badge'),)", 'object_name': 'Award'},
'awarded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'awards_set'", 'to': "orm['badges.Badge']"}),
'evidence': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity_hash': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'identity_hashed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'identity_salt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'identity_type': ('django.db.models.fields.CharField', [], {'default': "'email'", 'max_length': '255', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_awards'", 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'4d5a6e5e-c0cb-11e4-a589-08002759738a'", 'max_length': '255', 'db_index': 'True'})
},
'badges.badge': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'Badge'},
'alignments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'alignments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Alignment']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'criteria': ('django.db.models.fields.URLField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'tags'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'badges.badgebycourse': {
'Meta': {'object_name': 'BadgeByCourse'},
'color': ('django.db.models.fields.TextField', [], {}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']"}),
'criteria': ('django.db.models.fields.TextField', [], {}),
'criteria_type': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'description_de': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_es': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_it': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_pt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title_de': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title_es': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title_it': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title_pt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'badges.identity': {
'Meta': {'object_name': 'Identity'},
'hashed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity_hash': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'email'", 'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'identity'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'badges.revocation': {
'Meta': {'object_name': 'Revocation'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revocations'", 'to': "orm['badges.Award']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'badges.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courses.course': {
'Meta': {'ordering': "['order']", 'object_name': 'Course'},
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'certification_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'certification_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'completion_badge': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'course'", 'null': 'True', 'to': "orm['badges.Badge']"}),
'created_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'courses_created_of'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['courses.Course']"}),
'description': ('tinymce.models.HTMLField', [], {}),
'description_de': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'description_es': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'description_it': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'description_pt': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'ects': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_method': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '200'}),
'estimated_effort': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_de': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_en': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_es': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_it': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_pt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'external_certification_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forum_slug': ('django.db.models.fields.CharField', [], {'max_length': '350', 'null': 'True', 'blank': 'True'}),
'group_max_size': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '50'}),
'has_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hashtag': ('django.db.models.fields.CharField', [], {'default': "'Hashtag'", 'max_length': '128'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_audience': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'is_activity_clonable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['courses.Language']", 'symmetrical': 'False'}),
'learning_goals': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'learning_goals_de': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals_en': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals_es': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals_fr': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals_it': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals_pt': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'max_mass_emails_month': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_it': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_pt': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'official_course': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'courses_as_owner'", 'to': "orm['auth.User']"}),
'promotion_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'promotion_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'requirements': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'requirements_de': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'requirements_en': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'requirements_es': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'requirements_fr': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'requirements_it': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'requirements_pt': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'static_page': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['courses.StaticPage']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'courses_as_student'", 'blank': 'True', 'through': "orm['courses.CourseStudent']", 'to': "orm['auth.User']"}),
'teachers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'courses_as_teacher'", 'symmetrical': 'False', 'through': "orm['courses.CourseTeacher']", 'to': "orm['auth.User']"}),
'threshold': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_alt': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'courses.coursestudent': {
'Meta': {'object_name': 'CourseStudent'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_course_status': ('django.db.models.fields.CharField', [], {'default': "'f'", 'max_length': '1'}),
'pos_lat': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'pos_lon': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'progress': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rate': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'timestamp': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'})
},
'courses.courseteacher': {
'Meta': {'ordering': "['order']", 'object_name': 'CourseTeacher'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courses.language': {
'Meta': {'object_name': 'Language'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'courses.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'body': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_de': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_en': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_es': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_fr': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_it': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_pt': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_de': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_es': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_it': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_pt': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['badges']
|
import numpy as np
import matplotlib.pyplot as plt
def readmesh(fname):
"""
input
-----
fname: string
gmsh file name
output
------
V: array
vertices
E: array
element ids
"""
import gmsh
mesh = gmsh.Mesh()
mesh.read_msh(fname)
return mesh.Verts[:, :2], mesh.Elmts[2][1]
def identify_boundary(V):
"""
input
-----
V: array
vertices
output
------
d: dictionary
inflow, outflow, wall, cylinder
(unsorted)
"""
d = {}
II = np.where(np.abs(V[:, 0]) < 1e-13)
d["inflow"] = II
II = np.where(np.abs(V[:, 0] - 22.0) < 1e-13)
d["outflow"] = II
II = np.where(np.abs(V[:, 1]) < 1e-13)
J = np.where(np.abs(V[:, 1] - 4.1) < 1e-13)
d["wall"] = np.vstack((II, J)).ravel()
II = np.where(
np.abs(2 * np.sqrt((V[:, 0] - 2.0) ** 2 + (V[:, 1] - 2.0) ** 2) - 1.0) < 1e-13
)
d["cylinder"] = II
return d
if __name__ == "__main__":
V, E = readmesh("ns.msh")
d = identify_boundary(V)
plt.ion()
plt.triplot(V[:, 0], V[:, 1], E)
plt.axis("scaled")
II = d["inflow"]
plt.plot(V[II, 0], V[II, 1], "ro", markersize=10)
II = d["outflow"]
plt.plot(V[II, 0], V[II, 1], "bo", markersize=10)
II = d["wall"]
plt.plot(V[II, 0], V[II, 1], "gs", markersize=10)
II = d["cylinder"]
plt.plot(V[II, 0], V[II, 1], "m*", markersize=10)
plt.show()
|
CAMERA_MODE_PHOTO2 = 100
CAMERA_MODE_PHOTO = 0
CAMERA_MODE_FACE_BEAUTY = 1
CAMERA_MODE_PANORAMA = 2
CAMERA_MODE_SELF_WIDEVIEW = 3
CAMERA_MODE_SCENE_FRAME = 4
CAMERA_MODE_GESTURE_SHOT = 5
CAMERA_MODE_LIVE_PHOTO = 6
CAMERA_MODE_VIDEO = 7
CAMERA_MODE_PROFESSIONAL = 8
CAMERA_MODE_NIGHTSHOT = 9
CAMERA_MODE_PIP = 10
CAMERA_MODE_SPORTS = 11
CAMERA_MODE_VIV = 12
CAMERA_MODE_ZBAR_CODE = 13
CAMERA_MODE_REFOCUS = 14
CAMERA_MODE_CHROMAFLASH = 15
CAMERA_MODE_SUPERPIXEL = 16
CAMERA_MODE_CLEARSIGHT = 17
CAMERA_MODE_VIDEOBEAUTY = 18
CAMERA_MODE_VIDEOTIMELAPSE = 19
CAMERA_MODE_MONOCHROME = 20
CAMERA_MODE_PORTRAIT = 21
VALUE_CAPTURE_MODE_VIDEO = "video"
VALUE_CAPTURE_MODE_AUTO = "normal"
VALUE_CAPTURE_MODE_BEAUTYSHOT = "beautyshot"
VALUE_CAPTURE_MODE_NIGHTSHOT = "nightshot"
VALUE_CAPTURE_MODE_PANORAMA = "panorama"
VALUE_CAPTURE_MODE_WIDESELF = "wideself"
VALUE_CAPTURE_MODE_PROFESSIONAL = "professional"
VALUE_CAPTURE_MODE_SCENE_FRAME = "sceneframe"
VALUE_CAPTURE_MODE_SPORT = "sports"
VALUE_CAPTURE_MODE_PIP = "pip"
VALUE_CAPTURE_MODE_VIV = "viv"
VALUE_CAPTURE_MODE_ZBAR = "zbarcode"
VALUE_CAPTURE_MODE_REFOCUS = "refocus"
VALUE_CAPTURE_MODE_CHROMAFLASH = "chromaflash"
VALUE_CAPTURE_MODE_SUPERPIXEL = "superphoto"
VALUE_CAPTURE_MODE_VEDOBEAUTY = "videobeauty"
VALUE_CAPTURE_MODE_CLEARSIGHT = "clearsight"
VALUE_CAPTURE_MODE_VEDOTIMELAPSE = "videotimelapse"
VALUE_CAPTURE_MODE_MONOCHROME = "monochrome"
VALUE_CAPTURE_MODE_PORTRAIT = "picselfie"
VALUE_CAPTURE_MODE_VIDEOAUTOZOOM = "videoautozoom"
VALUE_CAPTURE_MODE_UNKNOWN = "unknown"
def get_mode_name(mode):
return {
CAMERA_MODE_PHOTO: VALUE_CAPTURE_MODE_AUTO,
CAMERA_MODE_FACE_BEAUTY: VALUE_CAPTURE_MODE_BEAUTYSHOT,
CAMERA_MODE_PANORAMA: VALUE_CAPTURE_MODE_PANORAMA,
CAMERA_MODE_SELF_WIDEVIEW: VALUE_CAPTURE_MODE_WIDESELF,
CAMERA_MODE_SCENE_FRAME: VALUE_CAPTURE_MODE_SCENE_FRAME,
CAMERA_MODE_GESTURE_SHOT: VALUE_CAPTURE_MODE_UNKNOWN,
CAMERA_MODE_LIVE_PHOTO: VALUE_CAPTURE_MODE_UNKNOWN,
CAMERA_MODE_VIDEO: VALUE_CAPTURE_MODE_VIDEO,
CAMERA_MODE_PROFESSIONAL: VALUE_CAPTURE_MODE_PROFESSIONAL,
CAMERA_MODE_NIGHTSHOT: VALUE_CAPTURE_MODE_NIGHTSHOT,
CAMERA_MODE_PIP: VALUE_CAPTURE_MODE_PIP,
CAMERA_MODE_SPORTS: VALUE_CAPTURE_MODE_SPORT,
CAMERA_MODE_VIV: VALUE_CAPTURE_MODE_VIV,
CAMERA_MODE_ZBAR_CODE: VALUE_CAPTURE_MODE_ZBAR,
CAMERA_MODE_REFOCUS: VALUE_CAPTURE_MODE_REFOCUS,
CAMERA_MODE_CHROMAFLASH: VALUE_CAPTURE_MODE_CHROMAFLASH,
CAMERA_MODE_SUPERPIXEL: VALUE_CAPTURE_MODE_SUPERPIXEL,
CAMERA_MODE_CLEARSIGHT: VALUE_CAPTURE_MODE_CLEARSIGHT,
CAMERA_MODE_VIDEOBEAUTY: VALUE_CAPTURE_MODE_VEDOBEAUTY,
CAMERA_MODE_VIDEOTIMELAPSE: VALUE_CAPTURE_MODE_VEDOTIMELAPSE,
CAMERA_MODE_MONOCHROME: VALUE_CAPTURE_MODE_MONOCHROME,
CAMERA_MODE_PORTRAIT: VALUE_CAPTURE_MODE_PORTRAIT
}.get(mode)
action = "android.myos.action.%s"
ACTION_NOMARL_CAMERA = "NOMARLCAMERA" # normal
ACTION_NS_CAMERA = "NSCAMERA" # night
ACTION_BEATY_CAMERA = "BEATYCAMERA" # beauty
ACTION_SUPERPIXEL_CAMERA = "SUPERPIXELCAMERA" # super photo
def get_actions():
actions = [action % ACTION_NS_CAMERA,
action % ACTION_BEATY_CAMERA,
action % ACTION_NOMARL_CAMERA,
action % ACTION_SUPERPIXEL_CAMERA]
return actions
|
"""
This module, problem_019.py, solves the nineteenth project euler problem.
"""
from project_euler_problems.problem import Problem
from datetime import date
'''
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
'''
class ImplementedProblem(Problem):
"""This specific problem's implementation.
"""
def get_solution(self):
"""Solves the solution for problem 019.
:return: The solution for problem 019.
"""
number_of_sundays = 0
for year in range(1901, 2001):
for month in range(1, 13):
# date(...) will create a Date() instance.
# weekday() gets the current day as an integer between 0-6.
if date(year, month, 1).weekday() == 6:
number_of_sundays += 1
return number_of_sundays
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.