repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
freyes/percona-xtradb-cluster-5.5 | python-for-subunit2junitxml/testtools/monkey.py | 64 | 3301 | # Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Helpers for monkey-patching Python code."""
__all__ = [
'MonkeyPatcher',
'patch',
]
class MonkeyPatcher(object):
"""A set of monkey-patches that can be applied and removed all together.
Use this to cover up attributes with new objects. Particularly useful for
testing difficult code.
"""
# Marker used to indicate that the patched attribute did not exist on the
# object before we patched it.
_NO_SUCH_ATTRIBUTE = object()
def __init__(self, *patches):
"""Construct a `MonkeyPatcher`.
:param patches: The patches to apply, each should be (obj, name,
new_value). Providing patches here is equivalent to calling
`add_patch`.
"""
# List of patches to apply in (obj, name, value).
self._patches_to_apply = []
# List of the original values for things that have been patched.
# (obj, name, value) format.
self._originals = []
for patch in patches:
self.add_patch(*patch)
def add_patch(self, obj, name, value):
"""Add a patch to overwrite 'name' on 'obj' with 'value'.
The attribute C{name} on C{obj} will be assigned to C{value} when
C{patch} is called or during C{run_with_patches}.
You can restore the original values with a call to restore().
"""
self._patches_to_apply.append((obj, name, value))
def patch(self):
"""Apply all of the patches that have been specified with `add_patch`.
Reverse this operation using L{restore}.
"""
for obj, name, value in self._patches_to_apply:
original_value = getattr(obj, name, self._NO_SUCH_ATTRIBUTE)
self._originals.append((obj, name, original_value))
setattr(obj, name, value)
def restore(self):
"""Restore all original values to any patched objects.
If the patched attribute did not exist on an object before it was
patched, `restore` will delete the attribute so as to return the
object to its original state.
"""
while self._originals:
obj, name, value = self._originals.pop()
if value is self._NO_SUCH_ATTRIBUTE:
delattr(obj, name)
else:
setattr(obj, name, value)
def run_with_patches(self, f, *args, **kw):
"""Run 'f' with the given args and kwargs with all patches applied.
Restores all objects to their original state when finished.
"""
self.patch()
try:
return f(*args, **kw)
finally:
self.restore()
def patch(obj, attribute, value):
"""Set 'obj.attribute' to 'value' and return a callable to restore 'obj'.
If 'attribute' is not set on 'obj' already, then the returned callable
will delete the attribute when called.
:param obj: An object to monkey-patch.
:param attribute: The name of the attribute to patch.
:param value: The value to set 'obj.attribute' to.
:return: A nullary callable that, when run, will restore 'obj' to its
original state.
"""
patcher = MonkeyPatcher((obj, attribute, value))
patcher.patch()
return patcher.restore
| gpl-2.0 |
jsoref/django | tests/gis_tests/test_geoip2.py | 75 | 6007 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.geoip2 import HAS_GEOIP2
from django.contrib.gis.geos import HAS_GEOS, GEOSGeometry
from django.test import mock
from django.utils import six
if HAS_GEOIP2:
from django.contrib.gis.geoip2 import GeoIP2, GeoIP2Exception
# Note: Requires both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoLite2-City.mmdb' and
# 'GeoLite2-City.mmdb'.
@skipUnless(HAS_GEOIP2 and getattr(settings, "GEOIP_PATH", None),
"GeoIP is required along with the GEOIP_PATH setting.")
class GeoIPTest(unittest.TestCase):
addr = '128.249.1.1'
fqdn = 'tmc.edu'
def test01_init(self):
"GeoIP initialization."
g1 = GeoIP2() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP2(path, 0) # Passing in data path explicitly.
g3 = GeoIP2.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertTrue(g._country)
self.assertTrue(g._city)
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLite2-City.mmdb')
cntry = os.path.join(path, 'GeoLite2-Country.mmdb')
g4 = GeoIP2(city, country='')
self.assertIsNone(g4._country)
g5 = GeoIP2(cntry, city='')
self.assertIsNone(g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIP2Exception, GeoIP2, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIP2Exception
else:
e = TypeError
self.assertRaises(e, GeoIP2, bad, 0)
def test02_bad_query(self):
"GeoIP query parameter checking."
cntry_g = GeoIP2(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIP2Exception, cntry_g.city, 'tmc.edu')
self.assertRaises(GeoIP2Exception, cntry_g.coords, 'tmc.edu')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP2)
@mock.patch('socket.gethostbyname')
def test03_country(self, gethostbyname):
"GeoIP country querying methods."
gethostbyname.return_value = '128.249.1.1'
g = GeoIP2(city='<foo>')
for query in (self.fqdn, self.addr):
self.assertEqual(
'US',
g.country_code(query),
'Failed for func country_code and query %s' % query
)
self.assertEqual(
'United States',
g.country_name(query),
'Failed for func country_name and query %s' % query
)
self.assertEqual(
{'country_code': 'US', 'country_name': 'United States'},
g.country(query)
)
@skipUnless(HAS_GEOS, "Geos is required")
@mock.patch('socket.gethostbyname')
def test04_city(self, gethostbyname):
"GeoIP city querying methods."
gethostbyname.return_value = '128.249.1.1'
g = GeoIP2(country='<foo>')
for query in (self.fqdn, self.addr):
# Country queries should still work.
self.assertEqual(
'US',
g.country_code(query),
'Failed for func country_code and query %s' % query
)
self.assertEqual(
'United States',
g.country_name(query),
'Failed for func country_name and query %s' % query
)
self.assertEqual(
{'country_code': 'US', 'country_name': 'United States'},
g.country(query)
)
# City information dictionary.
d = g.city(query)
self.assertEqual('US', d['country_code'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
geom = g.geos(query)
self.assertIsInstance(geom, GEOSGeometry)
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
@mock.patch('socket.gethostbyname')
def test05_unicode_response(self, gethostbyname):
"GeoIP strings should be properly encoded (#16553)."
gethostbyname.return_value = '194.27.42.76'
g = GeoIP2()
d = g.city("nigde.edu.tr")
self.assertEqual('Niğde', d['city'])
d = g.country('200.26.205.1')
# Some databases have only unaccented countries
self.assertIn(d['country_name'], ('Curaçao', 'Curacao'))
def test06_ipv6_query(self):
"GeoIP can lookup IPv6 addresses."
g = GeoIP2()
d = g.city('2002:81ed:c9a5::81ed:c9a5') # IPv6 address for www.nhm.ku.edu
self.assertEqual('US', d['country_code'])
self.assertEqual('Lawrence', d['city'])
self.assertEqual('KS', d['region'])
def test_repr(self):
path = settings.GEOIP_PATH
g = GeoIP2(path=path)
meta = g._reader.metadata()
version = '%s.%s' % (meta.binary_format_major_version, meta.binary_format_minor_version)
country_path = g._country_file
city_path = g._city_file
expected = '<GeoIP2 [v%(version)s] _country_file="%(country)s", _city_file="%(city)s">' % {
'version': version,
'country': country_path,
'city': city_path,
}
self.assertEqual(repr(g), expected)
| bsd-3-clause |
maaku/bitcoin | test/functional/p2p-acceptblock.py | 8 | 14265 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| mit |
authy/authy-python | tests/test_phones.py | 1 | 2463 | import six
import sys
import test_helper
import unittest
if six.PY3:
from unittest.mock import MagicMock
else:
from mock import MagicMock
from authy import AuthyException, AuthyFormatException
from authy.api import AuthyApiClient
from authy.api.resources import Phones
from authy.api.resources import Phone
class PhonesTest(unittest.TestCase):
def setUp(self):
self.phones = MagicMock(Phones(test_helper.API_URL, test_helper.API_KEY))
self.response = MagicMock()
phone = MagicMock(Phone(self.phones, self.response))
phone.ok = MagicMock(return_value=True)
phone.errors = MagicMock(return_value={})
self.phone_number = test_helper.PHONE_NUMBER
self.country_code = test_helper.COUNTRY_CODE
self.phones.__validate_channel = Phones._Phones__validate_channel
self.phones.__validate_code_length = Phones._Phones__validate_code_length
self.phones.verification_start = MagicMock(return_value = phone)
def test_phones(self):
self.assertIsInstance(self.phones, Phones)
def test_verification_start(self):
phone = self.phones.verification_start(
self.phone_number, self.country_code)
self.assertTrue(phone.ok())
self.assertEqual(phone.errors(), {})
def test_verification_start_with_code_length(self):
cl = self.phones.__validate_code_length(self.phones, code_length=7)
self.assertEqual(cl, 7)
def test_verification_start_with_str_code_length(self):
cl = self.phones.__validate_code_length(self.phones, code_length='7')
self.assertEqual(cl, 7)
def test_verification_start_with_non_numeric_code_length(self):
self.assertRaises(AuthyFormatException,
self.phones.__validate_code_length,
self.phones,
code_length='foo')
def test_verification_start_with_too_short_code_length(self):
self.assertRaises(AuthyFormatException,
self.phones.__validate_code_length,
self.phones,
code_length=2)
def test_verification_start_with_too_long_code_length(self):
self.assertRaises(AuthyFormatException,
self.phones.__validate_code_length,
self.phones,
code_length=12)
if __name__ == "__main__":
unittest.main()
| mit |
openstack/trove | trove/common/db/mongodb/models.py | 1 | 5206 | # Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.common.db import models
from trove.common.i18n import _
class MongoDBSchema(models.DatastoreSchema):
"""Represents a MongoDB database and its associated properties."""
@property
def _max_schema_name_length(self):
return 64
def _is_valid_schema_name(self, value):
# check against the invalid character set from
# http://docs.mongodb.org/manual/reference/limits
return not any(c in value for c in r'/\. "$')
class MongoDBUser(models.DatastoreUser):
"""Represents a MongoDB user and its associated properties.
MongoDB users are identified using their name and database.
Trove stores this as <database>.<username>
"""
root_username = 'admin.root'
def __init__(self, name=None, password=None, host=None, databases=None,
deserializing=False):
super(MongoDBUser, self).__init__(name=name, password=password,
host=host, databases=databases,
deserializing=deserializing)
if not deserializing:
self._init_roles()
@property
def username(self):
return self._username
@username.setter
def username(self, value):
self._update_name(username=value)
@property
def database(self):
return MongoDBSchema.deserialize(self._database)
@database.setter
def database(self, value):
self._update_name(database=value)
def _validate_user_name(self, value):
self._update_name(name=value)
def _update_name(self, name=None, username=None, database=None):
"""Keep the name, username, and database values in sync."""
if name:
(database, username) = self._parse_name(name)
if not (database and username):
missing = 'username' if self.database else 'database'
raise ValueError(_("MongoDB user's name missing %s.")
% missing)
else:
if username:
if not self.database:
raise ValueError(_('MongoDB user missing database.'))
database = self.database.name
else: # database
if not self.username:
raise ValueError(_('MongoDB user missing username.'))
username = self.username
name = '%s.%s' % (database, username)
self._name = name
self._username = username
self._database = self._build_database_schema(database).serialize()
@property
def roles(self):
return self._roles
@roles.setter
def roles(self, value):
if isinstance(value, list):
for role in value:
self._add_role(role)
else:
self._add_role(value)
def revoke_role(self, role):
if role in self.roles:
self._roles.remove(role)
def _init_roles(self):
if '_roles' not in self.__dict__:
self._roles = []
for db in self._databases:
self._roles.append({'db': db['_name'], 'role': 'readWrite'})
def _build_database_schema(self, name):
return MongoDBSchema(name)
def deserialize_schema(self, value):
return MongoDBSchema.deserialize(value)
@staticmethod
def _parse_name(value):
"""The name will be <database>.<username>, so split it."""
parts = value.split('.', 1)
if len(parts) != 2:
raise ValueError(_(
'MongoDB user name "%s" not in <database>.<username> format.'
) % value)
return parts[0], parts[1]
@property
def _max_user_name_length(self):
return 128
def _add_role(self, value):
if not self._is_valid_role(value):
raise ValueError(_('Role %s is invalid.') % value)
self._roles.append(value)
if value['role'] == 'readWrite':
self.databases = value['db']
def _is_valid_role(self, value):
if not isinstance(value, dict):
return False
if not {'db', 'role'} == set(value):
return False
return True
def verify_dict(self):
super(MongoDBUser, self).verify_dict()
self._init_roles()
@property
def schema_model(self):
return MongoDBSchema
def _create_checks(self):
super(MongoDBUser, self)._create_checks()
if not self.password:
raise ValueError(_("MongoDB user to create is missing a "
"password."))
| apache-2.0 |
Ensembles/ert | python/python/ert_gui/ide/keywords/definitions/proper_name_format_argument.py | 5 | 1496 | import re
from ert_gui.ide.keywords.definitions import ArgumentDefinition
class ProperNameFormatArgument(ArgumentDefinition):
NOT_A_VALID_NAME_FORMAT = "The argument must be a valid string containing a %d and only characters of these types:" \
"<ul>" \
"<li>Letters: <code>A-Z</code> and <code>a-z</code></li>" \
"<li>Numbers: <code>0-9</code></li>" \
"<li>Underscore: <code>_</code></li>" \
"<li>Dash: <code>—</code><li>" \
"<li>Period: <code>.</code></li>" \
"<li>Brackets: </code><></code></li>" \
"</ul>"
PATTERN = re.compile("^[A-Za-z0-9_\-.<>]*(%d)[A-Za-z0-9_\-.<>]*$")
def __init__(self, **kwargs):
super(ProperNameFormatArgument, self).__init__(**kwargs)
def validate(self, token):
validation_status = super(ProperNameFormatArgument, self).validate(token)
if not validation_status:
return validation_status
else:
match = ProperNameFormatArgument.PATTERN.match(token)
if match is None:
validation_status.setFailed()
validation_status.addToMessage(ProperNameFormatArgument.NOT_A_VALID_NAME_FORMAT)
else:
if not validation_status.failed():
validation_status.setValue(token)
return validation_status
| gpl-3.0 |
TRox1972/youtube-dl | youtube_dl/extractor/gfycat.py | 147 | 3555 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
qualities,
ExtractorError,
)
class GfycatIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gfycat\.com/(?:ifr/)?(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher',
'info_dict': {
'id': 'DeadlyDecisiveGermanpinscher',
'ext': 'mp4',
'title': 'Ghost in the Shell',
'timestamp': 1410656006,
'upload_date': '20140914',
'uploader': 'anonymous',
'duration': 10.4,
'view_count': int,
'like_count': int,
'dislike_count': int,
'categories': list,
'age_limit': 0,
}
}, {
'url': 'http://gfycat.com/ifr/JauntyTimelyAmazontreeboa',
'info_dict': {
'id': 'JauntyTimelyAmazontreeboa',
'ext': 'mp4',
'title': 'JauntyTimelyAmazontreeboa',
'timestamp': 1411720126,
'upload_date': '20140926',
'uploader': 'anonymous',
'duration': 3.52,
'view_count': int,
'like_count': int,
'dislike_count': int,
'categories': list,
'age_limit': 0,
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
gfy = self._download_json(
'http://gfycat.com/cajax/get/%s' % video_id,
video_id, 'Downloading video info')
if 'error' in gfy:
raise ExtractorError('Gfycat said: ' + gfy['error'], expected=True)
gfy = gfy['gfyItem']
title = gfy.get('title') or gfy['gfyName']
description = gfy.get('description')
timestamp = int_or_none(gfy.get('createDate'))
uploader = gfy.get('userName')
view_count = int_or_none(gfy.get('views'))
like_count = int_or_none(gfy.get('likes'))
dislike_count = int_or_none(gfy.get('dislikes'))
age_limit = 18 if gfy.get('nsfw') == '1' else 0
width = int_or_none(gfy.get('width'))
height = int_or_none(gfy.get('height'))
fps = int_or_none(gfy.get('frameRate'))
num_frames = int_or_none(gfy.get('numFrames'))
duration = float_or_none(num_frames, fps) if num_frames and fps else None
categories = gfy.get('tags') or gfy.get('extraLemmas') or []
FORMATS = ('gif', 'webm', 'mp4')
quality = qualities(FORMATS)
formats = []
for format_id in FORMATS:
video_url = gfy.get('%sUrl' % format_id)
if not video_url:
continue
filesize = gfy.get('%sSize' % format_id)
formats.append({
'url': video_url,
'format_id': format_id,
'width': width,
'height': height,
'fps': fps,
'filesize': filesize,
'quality': quality(format_id),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'timestamp': timestamp,
'uploader': uploader,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'categories': categories,
'age_limit': age_limit,
'formats': formats,
}
| unlicense |
chapmanb/cwltool | tests/test_pack.py | 1 | 1183 | import json
import os
import unittest
from functools import partial
import cwltool.pack
import cwltool.workflow
from cwltool.load_tool import fetch_document, validate_document
from cwltool.main import makeRelative
from cwltool.pathmapper import adjustFileObjs, adjustDirObjs
from .util import get_data
class TestPack(unittest.TestCase):
def test_pack(self):
self.maxDiff = None
document_loader, workflowobj, uri = fetch_document(
get_data("tests/wf/revsort.cwl"))
document_loader, avsc_names, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri)
packed = cwltool.pack.pack(document_loader, processobj, uri, metadata)
with open(get_data("tests/wf/expect_packed.cwl")) as f:
expect_packed = json.load(f)
adjustFileObjs(packed, partial(makeRelative,
os.path.abspath(get_data("tests/wf"))))
adjustDirObjs(packed, partial(makeRelative,
os.path.abspath(get_data("tests/wf"))))
self.assertIn("$schemas", packed)
del packed["$schemas"]
del expect_packed["$schemas"]
self.assertEqual(expect_packed, packed)
| apache-2.0 |
blckshrk/Weboob | modules/aum/test.py | 5 | 1520 | # -*- CODing: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.tools.browser import BrowserUnavailable
__all__ = ['AuMTest']
class AuMTest(BackendTest):
BACKEND = 'aum'
def test_new_messages(self):
try:
for message in self.backend.iter_unread_messages():
pass
except BrowserUnavailable:
# enough frequent to do not care about.
pass
def test_contacts(self):
try:
contacts = list(self.backend.iter_contacts())
if len(contacts) == 0:
# so bad, we can't test that...
return
self.backend.fillobj(contacts[0], ['photos', 'profile'])
except BrowserUnavailable:
# enough frequent to do not care about.
pass
| agpl-3.0 |
devclone/enigma2-9f38fd6 | lib/python/Plugins/SystemPlugins/DiseqcTester/plugin.py | 63 | 27159 | import random
from Screens.Satconfig import NimSelection
from Screens.Screen import Screen
from Screens.TextBox import TextBox
from Screens.MessageBox import MessageBox
from Plugins.Plugin import PluginDescriptor
from Components.ActionMap import ActionMap, NumberActionMap
from Components.NimManager import nimmanager
from Components.ResourceManager import resourcemanager
from Components.TuneTest import TuneTest
from Components.Sources.List import List
from Components.Sources.Progress import Progress
from Components.Sources.StaticText import StaticText
from Components.ConfigList import ConfigListScreen
from Components.config import getConfigListEntry, ConfigSelection, ConfigYesNo
from Components.Harddisk import harddiskmanager
# always use:
# setResultType(type)
# setResultParameter(parameter)
# getTextualResult()
class ResultParser:
def __init__(self):
pass
TYPE_BYORBPOS = 0
TYPE_BYINDEX = 1
TYPE_ALL = 2
def setResultType(self, type):
self.type = type
def setResultParameter(self, parameter):
if self.type == self.TYPE_BYORBPOS:
self.orbpos = parameter
elif self.type == self.TYPE_BYINDEX:
self.index = parameter
def getTextualResultForIndex(self, index, logfulltransponders = False):
text = ""
text += "%s:\n" % self.getTextualIndexRepresentation(index)
failed, successful = self.results[index]["failed"], self.results[index]["successful"]
countfailed = len(failed)
countsuccessful = len(successful)
countall = countfailed + countsuccessful
percentfailed = round(countfailed / float(countall + 0.0001) * 100)
percentsuccessful = round(countsuccessful / float(countall + 0.0001) * 100)
text += "Tested %d transponders\n%d (%d %%) transponders succeeded\n%d (%d %%) transponders failed\n" % (countall, countsuccessful, percentsuccessful, countfailed, percentfailed)
reasons = {}
completelist = []
if countfailed > 0:
for transponder in failed:
completelist.append({"transponder": transponder[0], "fedata": transponder[-1]})
reasons[transponder[2]] = reasons.get(transponder[2], [])
reasons[transponder[2]].append(transponder)
if transponder[2] == "pids_failed":
print transponder[2], "-", transponder[3]
text += "The %d unsuccessful tuning attempts failed for the following reasons:\n" % countfailed
for reason in reasons.keys():
text += "%s: %d transponders failed\n" % (reason, len(reasons[reason]))
for reason in reasons.keys():
text += "\n"
text += "%s previous planes:\n" % reason
for transponder in reasons[reason]:
if transponder[1] is not None:
text += self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[1]))
else:
text += "No transponder tuned"
text += " ==> " + self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[0]))
text += "\n"
if logfulltransponders:
text += str(transponder[1])
text += " ==> "
text += str(transponder[0])
text += "\n"
if reason == "pids_failed":
text += "(tsid, onid): "
text += str(transponder[3]['real'])
text += "(read from sat) != "
text += str(transponder[3]['expected'])
text += "(read from file)"
text += "\n"
text += "\n"
if countsuccessful > 0:
text += "\n"
text += "Successfully tuned transponders' previous planes:\n"
for transponder in successful:
completelist.append({"transponder": transponder[0], "fedata": transponder[-1]})
if transponder[1] is not None:
text += self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[1]))
else:
text += "No transponder tuned"
text += " ==> " + self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[0]))
text += "\n"
text += "------------------------------------------------\n"
text += "complete transponderlist:\n"
for entry in completelist:
text += str(entry["transponder"]) + " -- " + str(entry["fedata"]) + "\n"
return text
def getTextualResult(self):
text = ""
if self.type == self.TYPE_BYINDEX:
text += self.getTextualResultForIndex(self.index)
elif self.type == self.TYPE_BYORBPOS:
for index in self.results.keys():
if index[2] == self.orbpos:
text += self.getTextualResultForIndex(index)
text += "\n-----------------------------------------------------\n"
elif self.type == self.TYPE_ALL:
orderedResults = {}
for index in self.results.keys():
orbpos = index[2]
orderedResults[orbpos] = orderedResults.get(orbpos, [])
orderedResults[orbpos].append(index)
ordered_orbpos = orderedResults.keys()
ordered_orbpos.sort()
for orbpos in ordered_orbpos:
text += "\n*****************************************\n"
text += "Orbital position %s:" % str(orbpos)
text += "\n*****************************************\n"
for index in orderedResults[orbpos]:
text += self.getTextualResultForIndex(index, logfulltransponders = True)
text += "\n-----------------------------------------------------\n"
return text
class DiseqcTester(Screen, TuneTest, ResultParser):
skin = """
<screen position="90,100" size="520,400" title="DiSEqC Tester" >
<!--ePixmap pixmap="icons/dish_scan.png" position="5,25" zPosition="0" size="119,110" transparent="1" alphatest="on" />
<widget source="Frontend" render="Label" position="190,10" zPosition="2" size="260,20" font="Regular;19" halign="center" valign="center" transparent="1">
<convert type="FrontendInfo">SNRdB</convert>
</widget>
<eLabel name="snr" text="SNR:" position="120,35" size="60,22" font="Regular;21" halign="right" transparent="1" />
<widget source="Frontend" render="Progress" position="190,35" size="260,20" pixmap="bar_snr.png" borderWidth="2" borderColor="#cccccc">
<convert type="FrontendInfo">SNR</convert>
</widget>
<widget source="Frontend" render="Label" position="460,35" size="60,22" font="Regular;21">
<convert type="FrontendInfo">SNR</convert>
</widget>
<eLabel name="agc" text="AGC:" position="120,60" size="60,22" font="Regular;21" halign="right" transparent="1" />
<widget source="Frontend" render="Progress" position="190,60" size="260,20" pixmap="bar_snr.png" borderWidth="2" borderColor="#cccccc">
<convert type="FrontendInfo">AGC</convert>
</widget>
<widget source="Frontend" render="Label" position="460,60" size="60,22" font="Regular;21">
<convert type="FrontendInfo">AGC</convert>
</widget>
<eLabel name="ber" text="BER:" position="120,85" size="60,22" font="Regular;21" halign="right" transparent="1" />
<widget source="Frontend" render="Progress" position="190,85" size="260,20" pixmap="bar_ber.png" borderWidth="2" borderColor="#cccccc">
<convert type="FrontendInfo">BER</convert>
</widget>
<widget source="Frontend" render="Label" position="460,85" size="60,22" font="Regular;21">
<convert type="FrontendInfo">BER</convert>
</widget>
<eLabel name="lock" text="Lock:" position="120,115" size="60,22" font="Regular;21" halign="right" />
<widget source="Frontend" render="Pixmap" pixmap="icons/lock_on.png" position="190,110" zPosition="1" size="38,31" alphatest="on">
<convert type="FrontendInfo">LOCK</convert>
<convert type="ConditionalShowHide" />
</widget>
<widget source="Frontend" render="Pixmap" pixmap="icons/lock_off.png" position="190,110" zPosition="1" size="38,31" alphatest="on">
<convert type="FrontendInfo">LOCK</convert>
<convert type="ConditionalShowHide">Invert</convert>
</widget-->
<widget source="progress_list" render="Listbox" position="0,0" size="510,150" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (10, 0), size = (330, 25), flags = RT_HALIGN_LEFT, text = 1), # index 1 is the index name,
MultiContentEntryText(pos = (330, 0), size = (150, 25), flags = RT_HALIGN_RIGHT, text = 2) # index 2 is the status,
],
"fonts": [gFont("Regular", 20)],
"itemHeight": 25
}
</convert>
</widget>
<eLabel name="overall_progress" text="Overall progress:" position="20,162" size="480,22" font="Regular;21" halign="center" transparent="1" />
<widget source="overall_progress" render="Progress" position="20,192" size="480,20" borderWidth="2" backgroundColor="#254f7497" />
<eLabel name="overall_progress" text="Progress:" position="20,222" size="480,22" font="Regular;21" halign="center" transparent="1" />
<widget source="sub_progress" render="Progress" position="20,252" size="480,20" borderWidth="2" backgroundColor="#254f7497" />
<eLabel name="" text="Failed:" position="20,282" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="failed_counter" render="Label" position="160,282" size="100,20" font="Regular;21" />
<eLabel name="" text="Succeeded:" position="20,312" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="succeeded_counter" render="Label" position="160,312" size="100,20" font="Regular;21" />
<eLabel name="" text="With errors:" position="20,342" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="witherrors_counter" render="Label" position="160,342" size="100,20" font="Regular;21" />
<eLabel name="" text="Not tested:" position="20,372" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="untestable_counter" render="Label" position="160,372" size="100,20" font="Regular;21" />
<widget source="CmdText" render="Label" position="300,282" size="180,200" font="Regular;21" />
</screen>"""
TEST_TYPE_QUICK = 0
TEST_TYPE_RANDOM = 1
TEST_TYPE_COMPLETE = 2
def __init__(self, session, feid, test_type = TEST_TYPE_QUICK, loopsfailed = 3, loopssuccessful = 1, log = False):
Screen.__init__(self, session)
self.feid = feid
self.test_type = test_type
self.loopsfailed = loopsfailed
self.loopssuccessful = loopssuccessful
self.log = log
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.select,
"cancel": self.keyCancel,
}, -2)
TuneTest.__init__(self, feid, stopOnSuccess = self.loopssuccessful, stopOnError = self.loopsfailed)
#self["Frontend"] = FrontendStatus(frontend_source = lambda : self.frontend, update_interval = 100)
self["overall_progress"] = Progress()
self["sub_progress"] = Progress()
self["failed_counter"] = StaticText("0")
self["succeeded_counter"] = StaticText("0")
self["witherrors_counter"] = StaticText("0")
self["untestable_counter"] = StaticText("0")
self.list = []
self["progress_list"] = List(self.list)
self["progress_list"].onSelectionChanged.append(self.selectionChanged)
self["CmdText"] = StaticText(_("Please wait while scanning is in progress..."))
self.indexlist = {}
self.readTransponderList()
self.running = False
self.results = {}
self.resultsstatus = {}
self.onLayoutFinish.append(self.go)
def getProgressListComponent(self, index, status):
return index, self.getTextualIndexRepresentation(index), status
def clearProgressList(self):
self.list = []
self["progress_list"].list = self.list
def addProgressListItem(self, index):
if index in self.indexlist:
for entry in self.list:
if entry[0] == index:
self.changeProgressListStatus(index, "working")
return
self.list.append(self.getProgressListComponent(index, _("working")))
self["progress_list"].list = self.list
self["progress_list"].setIndex(len(self.list) - 1)
def changeProgressListStatus(self, index, status):
self.newlist = []
count = 0
indexpos = 0
for entry in self.list:
if entry[0] == index:
self.newlist.append(self.getProgressListComponent(index, status))
indexpos = count
else:
self.newlist.append(entry)
count += 1
self.list = self.newlist
self["progress_list"].list = self.list
self["progress_list"].setIndex(indexpos)
def readTransponderList(self):
for sat in nimmanager.getSatListForNim(self.feid):
for transponder in nimmanager.getTransponders(sat[0]):
#print transponder
mytransponder = (transponder[1] / 1000, transponder[2] / 1000, transponder[3], transponder[4], transponder[7], sat[0], transponder[5], transponder[6], transponder[8], transponder[9], transponder[10], transponder[11])
self.analyseTransponder(mytransponder)
def getIndexForTransponder(self, transponder):
if transponder[0] < 11700:
band = 1 # low
else:
band = 0 # high
polarisation = transponder[2]
sat = transponder[5]
index = (band, polarisation, sat)
return index
# sort the transponder into self.transponderlist
def analyseTransponder(self, transponder):
index = self.getIndexForTransponder(transponder)
if index not in self.indexlist:
self.indexlist[index] = []
self.indexlist[index].append(transponder)
#print "self.indexlist:", self.indexlist
# returns a string for the user representing a human readable output for index
def getTextualIndexRepresentation(self, index):
print "getTextualIndexRepresentation:", index
text = ""
text += nimmanager.getSatDescription(index[2]) + ", "
if index[0] == 1:
text += "Low Band, "
else:
text += "High Band, "
if index[1] == 0:
text += "H"
else:
text += "V"
return text
def fillTransponderList(self):
self.clearTransponder()
print "----------- fillTransponderList"
print "index:", self.currentlyTestedIndex
keys = self.indexlist.keys()
if self.getContinueScanning():
print "index:", self.getTextualIndexRepresentation(self.currentlyTestedIndex)
for transponder in self.indexlist[self.currentlyTestedIndex]:
self.addTransponder(transponder)
print "transponderList:", self.transponderlist
return True
else:
return False
def progressCallback(self, progress):
if progress[0] != self["sub_progress"].getRange():
self["sub_progress"].setRange(progress[0])
self["sub_progress"].setValue(progress[1])
# logic for scanning order of transponders
# on go getFirstIndex is called
def getFirstIndex(self):
# TODO use other function to scan more randomly
if self.test_type == self.TEST_TYPE_QUICK:
self.myindex = 0
keys = self.indexlist.keys()
keys.sort(key = lambda a: a[2]) # sort by orbpos
self["overall_progress"].setRange(len(keys))
self["overall_progress"].setValue(self.myindex)
return keys[0]
elif self.test_type == self.TEST_TYPE_RANDOM:
self.randomkeys = self.indexlist.keys()
random.shuffle(self.randomkeys)
self.myindex = 0
self["overall_progress"].setRange(len(self.randomkeys))
self["overall_progress"].setValue(self.myindex)
return self.randomkeys[0]
elif self.test_type == self.TEST_TYPE_COMPLETE:
keys = self.indexlist.keys()
print "keys:", keys
successorindex = {}
for index in keys:
successorindex[index] = []
for otherindex in keys:
if otherindex != index:
successorindex[index].append(otherindex)
random.shuffle(successorindex[index])
self.keylist = []
stop = False
currindex = None
while not stop:
if currindex is None or len(successorindex[currindex]) == 0:
oldindex = currindex
for index in successorindex.keys():
if len(successorindex[index]) > 0:
currindex = index
self.keylist.append(currindex)
break
if currindex == oldindex:
stop = True
else:
currindex = successorindex[currindex].pop()
self.keylist.append(currindex)
print "self.keylist:", self.keylist
self.myindex = 0
self["overall_progress"].setRange(len(self.keylist))
self["overall_progress"].setValue(self.myindex)
return self.keylist[0]
# after each index is finished, getNextIndex is called to get the next index to scan
def getNextIndex(self):
# TODO use other function to scan more randomly
if self.test_type == self.TEST_TYPE_QUICK:
self.myindex += 1
keys = self.indexlist.keys()
keys.sort(key = lambda a: a[2]) # sort by orbpos
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
elif self.test_type == self.TEST_TYPE_RANDOM:
self.myindex += 1
keys = self.randomkeys
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
elif self.test_type == self.TEST_TYPE_COMPLETE:
self.myindex += 1
keys = self.keylist
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
# after each index is finished and the next index is returned by getNextIndex
# the algorithm checks, if we should continue scanning
def getContinueScanning(self):
if self.test_type == self.TEST_TYPE_QUICK or self.test_type == self.TEST_TYPE_RANDOM:
return self.myindex < len(self.indexlist.keys())
elif self.test_type == self.TEST_TYPE_COMPLETE:
return self.myindex < len(self.keylist)
def addResult(self, index, status, failedTune, successfullyTune):
self.results[index] = self.results.get(index, {"failed": [], "successful": [], "status": None, "internalstatus": None})
self.resultsstatus[status] = self.resultsstatus.get(status, [])
oldstatus = self.results[index]["internalstatus"]
if oldstatus is None:
self.results[index]["status"] = status
elif oldstatus == "successful":
if status == "failed":
self.results[index]["status"] = "with_errors"
elif status == "successful":
self.results[index]["status"] = oldstatus
elif status == "with_errors":
self.results[index]["status"] = "with_errors"
elif status == "not_tested":
self.results[index]["status"] = oldstatus
elif oldstatus == "failed":
if status == "failed":
self.results[index]["status"] = oldstatus
elif status == "successful":
self.results[index]["status"] = "with_errors"
elif status == "with_errors":
self.results[index]["status"] = "with_errors"
elif status == "not_tested":
self.results[index]["status"] = oldstatus
elif oldstatus == "with_errors":
if status == "failed":
self.results[index]["status"] = oldstatus
elif status == "successful":
self.results[index]["status"] = oldstatus
elif status == "with_errors":
self.results[index]["status"] = oldstatus
elif status == "not_tested":
self.results[index]["status"] = oldstatus
elif oldstatus == "not_tested":
self.results[index]["status"] = status
if self.results[index]["status"] != "working":
self.results[index]["internalstatus"] = self.results[index]["status"]
self.results[index]["failed"] = failedTune + self.results[index]["failed"]
self.results[index]["successful"] = successfullyTune + self.results[index]["successful"]
self.resultsstatus[status].append(index)
def finishedChecking(self):
print "finishedChecking"
TuneTest.finishedChecking(self)
if not self.results.has_key(self.currentlyTestedIndex):
self.results[self.currentlyTestedIndex] = {"failed": [], "successful": [], "status": None, "internalstatus": None}
if len(self.failedTune) > 0 and len(self.successfullyTune) > 0:
self.changeProgressListStatus(self.currentlyTestedIndex, "with errors")
self["witherrors_counter"].setText(str(int(self["witherrors_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "with_errors", self.failedTune, self.successfullyTune)
elif len(self.failedTune) == 0 and len(self.successfullyTune) == 0:
self.changeProgressListStatus(self.currentlyTestedIndex, "not tested")
self["untestable_counter"].setText(str(int(self["untestable_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "untestable", self.failedTune, self.successfullyTune)
elif len(self.failedTune) > 0:
self.changeProgressListStatus(self.currentlyTestedIndex, "failed")
#self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + len(self.failedTune)))
self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "failed", self.failedTune, self.successfullyTune)
else:
self.changeProgressListStatus(self.currentlyTestedIndex, "successful")
#self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + len(self.successfullyTune)))
self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "successful", self.failedTune, self.successfullyTune)
#self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + len(self.failedTune)))
#self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + len(self.successfullyTune)))
#if len(self.failedTune) == 0 and len(self.successfullyTune) == 0:
#self["untestable_counter"].setText(str(int(self["untestable_counter"].getText()) + 1))
self.currentlyTestedIndex = self.getNextIndex()
self.addProgressListItem(self.currentlyTestedIndex)
if self.fillTransponderList():
self.run()
else:
self.running = False
self["progress_list"].setIndex(0)
print "results:", self.results
print "resultsstatus:", self.resultsstatus
if self.log:
file = open("/media/hdd/diseqctester.log", "w")
self.setResultType(ResultParser.TYPE_ALL)
file.write(self.getTextualResult())
file.close()
self.session.open(MessageBox, text=_("The results have been written to %s.") % "/media/hdd/diseqctester.log", type = MessageBox.TYPE_INFO)
def go(self):
self.running = True
self["failed_counter"].setText("0")
self["succeeded_counter"].setText("0")
self["untestable_counter"].setText("0")
self.currentlyTestedIndex = self.getFirstIndex()
self.clearProgressList()
self.addProgressListItem(self.currentlyTestedIndex)
if self.fillTransponderList():
self.run()
def keyCancel(self):
self.close()
def select(self):
print "selectedIndex:", self["progress_list"].getCurrent()[0]
if not self.running:
index = self["progress_list"].getCurrent()[0]
#self.setResultType(ResultParser.TYPE_BYORBPOS)
#self.setResultParameter(index[2])
self.setResultType(ResultParser.TYPE_BYINDEX)
self.setResultParameter(index)
#self.setResultType(ResultParser.TYPE_ALL)
self.session.open(TextBox, self.getTextualResult())
def selectionChanged(self):
print "selection changed"
if len(self.list) > 0 and not self.running:
self["CmdText"].setText(_("Press OK to get further details for %s") % str(self["progress_list"].getCurrent()[1]))
class DiseqcTesterTestTypeSelection(Screen, ConfigListScreen):
def __init__(self, session, feid):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["DiseqcTesterTestTypeSelection", "Setup" ]
self.setup_title = _("DiSEqC-tester settings")
self.onChangedEntry = [ ]
self.feid = feid
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keyOK,
"ok": self.keyOK,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
self.testtype = ConfigSelection(choices={"quick": _("Quick"), "random": _("Random"), "complete": _("Complete")}, default = "quick")
self.testtypeEntry = getConfigListEntry(_("Test type"), self.testtype)
self.list.append(self.testtypeEntry)
self.loopsfailed = ConfigSelection(choices={"-1": "Every known", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8"}, default = "3")
self.loopsfailedEntry = getConfigListEntry(_("Stop testing plane after # failed transponders"), self.loopsfailed)
self.list.append(self.loopsfailedEntry)
self.loopssuccessful = ConfigSelection(choices={"-1": "Every known", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8"}, default = "1")
self.loopssuccessfulEntry = getConfigListEntry(_("Stop testing plane after # successful transponders"), self.loopssuccessful)
self.list.append(self.loopssuccessfulEntry)
self.log = ConfigYesNo(False)
if harddiskmanager.HDDCount() > 0:
self.logEntry = getConfigListEntry(_("Log results to harddisk"), self.log)
self.list.append(self.logEntry)
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyOK(self):
print self.testtype.value
testtype = DiseqcTester.TEST_TYPE_QUICK
if self.testtype.value == "quick":
testtype = DiseqcTester.TEST_TYPE_QUICK
elif self.testtype.value == "random":
testtype = DiseqcTester.TEST_TYPE_RANDOM
elif self.testtype.value == "complete":
testtype = DiseqcTester.TEST_TYPE_COMPLETE
self.session.open(DiseqcTester, feid = self.feid, test_type = testtype, loopsfailed = int(self.loopsfailed.value), loopssuccessful = int(self.loopssuccessful.value), log = self.log.value)
def keyCancel(self):
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
class DiseqcTesterNimSelection(NimSelection):
skin = """
<screen position="160,123" size="400,330" title="Select a tuner">
<widget source="nimlist" render="Listbox" position="0,0" size="380,300" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (10, 5), size = (360, 30), flags = RT_HALIGN_LEFT, text = 1), # index 1 is the nim name,
MultiContentEntryText(pos = (50, 30), size = (320, 30), font = 1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is a description of the nim settings,
],
"fonts": [gFont("Regular", 20), gFont("Regular", 15)],
"itemHeight": 70
}
</convert>
</widget>
</screen>"""
def __init__(self, session, args = None):
NimSelection.__init__(self, session)
def setResultClass(self):
#self.resultclass = DiseqcTester
self.resultclass = DiseqcTesterTestTypeSelection
def showNim(self, nim):
nimConfig = nimmanager.getNimConfig(nim.slot)
if nim.isCompatible("DVB-S"):
if nimConfig.configMode.value in ("loopthrough", "equal", "satposdepends", "nothing"):
return False
if nimConfig.configMode.value == "simple":
if nimConfig.diseqcMode.value == "positioner":
return True
return True
return False
def DiseqcTesterMain(session, **kwargs):
session.open(DiseqcTesterNimSelection)
def autostart(reason, **kwargs):
resourcemanager.addResource("DiseqcTester", DiseqcTesterMain)
def Plugins(**kwargs):
return [ PluginDescriptor(name="DiSEqC Tester", description=_("Test DiSEqC settings"), where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=DiseqcTesterMain),
PluginDescriptor(where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = False, fnc = autostart)]
| gpl-2.0 |
jostep/tensorflow | tensorflow/python/estimator/canned/head_test.py | 4 | 97731 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _initialize_variables(test_case, scaffold):
scaffold.finalize()
test_case.assertIsNone(scaffold.init_feed_dict)
test_case.assertIsNone(scaffold.init_fn)
scaffold.init_op.run()
scaffold.ready_for_local_init_op.eval()
scaffold.local_init_op.run()
scaffold.ready_op.eval()
test_case.assertIsNotNone(scaffold.saver)
def _assert_simple_summaries(test_case, expected_summaries, summary_str,
tol=1e-6):
"""Assert summary the specified simple values.
Args:
test_case: test case.
expected_summaries: Dict of expected tags and simple values.
summary_str: Serialized `summary_pb2.Summary`.
tol: Tolerance for relative and absolute.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
test_case.assertAllClose(expected_summaries, {
v.tag: v.simple_value for v in summary.value
}, rtol=tol, atol=tol)
def _assert_no_hooks(test_case, spec):
test_case.assertAllEqual([], spec.training_chief_hooks)
test_case.assertAllEqual([], spec.training_hooks)
def _sigmoid(logits):
return 1 / (1 + np.exp(-logits))
class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def test_n_classes_is_none(self):
with self.assertRaisesRegexp(ValueError, 'n_classes must be > 2'):
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=None)
def test_n_classes_is_2(self):
with self.assertRaisesRegexp(ValueError, 'n_classes must be > 2'):
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=2)
def test_invalid_logits_shape(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
logits_2x2 = np.array(((45., 44.), (41., 42.),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': np.array(((30.,), (42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_2x2)
# Dynamic shape.
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((30.,), (42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({
logits_placeholder: logits_2x2
})
def test_invalid_labels_shape(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
# Labels should be shape (batch_size, 1).
labels_2x2 = np.array(((45, 44), (41, 42),), dtype=np.int)
logits_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))
features = {'x': np.array(((42.,),))}
# Static shape.
with self.assertRaisesRegexp(ValueError, 'labels shape'):
head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x3,
labels=labels_2x2)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'labels shape'):
unweighted_loss.eval({
logits_placeholder: logits_2x3,
labels_placeholder: labels_2x2
})
def test_invalid_labels_type(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
# Labels should be shape (batch_size, 1).
labels_2x1 = np.array(((1.,), (1.,),))
logits_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))
features = {'x': np.array(((42.,),))}
# Static shape.
with self.assertRaisesRegexp(ValueError, 'Labels dtype'):
head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x3,
labels=labels_2x1)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'Labels dtype'):
head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
def test_invalid_labels_values(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
labels_2x1_with_large_id = np.array(((45,), (1,),), dtype=np.int)
labels_2x1_with_negative_id = np.array(((-5,), (1,),), dtype=np.int)
logits_2x3 = np.array(((1., 2., 4.), (1., 2., 3.),))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
unweighted_loss, _ = head.create_loss(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesOpError('Label IDs must < n_classes'):
unweighted_loss.eval({
labels_placeholder: labels_2x1_with_large_id,
logits_placeholder: logits_2x3
})
with self.test_session():
with self.assertRaisesOpError('Label IDs must >= 0'):
unweighted_loss.eval({
labels_placeholder: labels_2x1_with_negative_id,
logits_placeholder: logits_2x3
})
def test_invalid_labels_sparse_tensor(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
labels_2x1 = sparse_tensor.SparseTensor(
values=['english', 'italian'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1])
logits_2x3 = np.array(((1., 2., 4.), (1., 2., 3.),))
with self.assertRaisesRegexp(
ValueError, 'SparseTensor labels are not supported.'):
head.create_loss(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x3,
labels=labels_2x1)
def test_incompatible_labels_shape(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
# Labels should be shape (batch_size, 1).
# Here batch sizes are different.
values_3x1 = np.array(((1,), (1,), (1,),))
values_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))
features = {'x': values_2x3}
# Static shape.
with self.assertRaisesRegexp(ValueError, 'Dimensions must be equal'):
head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=values_2x3,
labels=values_3x1)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(
errors.OpError,
'logits and labels must have the same first dimension'):
unweighted_loss.eval({
labels_placeholder: values_3x1,
logits_placeholder: values_2x3
})
def test_predict(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
logits = [[1., 0., 0.], [0., 0., 1.]]
expected_probabilities = [[0.576117, 0.2119416, 0.2119416],
[0.2119416, 0.2119416, 0.576117]]
expected_class_ids = [[0], [2]]
expected_classes = [[b'0'], [b'2']]
expected_export_classes = [[b'0', b'1', b'2']] * 2
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
('', _DEFAULT_SERVING_KEY), spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose(expected_class_ids,
predictions[prediction_keys.PredictionKeys.CLASS_IDS])
self.assertAllEqual(expected_classes,
predictions[prediction_keys.PredictionKeys.CLASSES])
self.assertAllClose(
expected_probabilities,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))
def test_predict_with_vocabulary_list(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[1., 0., 0.], [0., 0., 1.]]
expected_classes = [[b'aang'], [b'zuko']]
expected_export_classes = [[b'aang', b'iroh', b'zuko']] * 2
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertAllEqual(
expected_classes,
sess.run(spec.predictions[prediction_keys.PredictionKeys.CLASSES]))
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))
def test_weight_should_not_impact_prediction(self):
n_classes = 3
logits = [[1., 0., 0.], [0., 0., 1.]]
expected_probabilities = [[0.576117, 0.2119416, 0.2119416],
[0.2119416, 0.2119416, 0.576117]]
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
weights_2x1 = [[1.], [2.]]
spec = head.create_estimator_spec(
features={
'x': np.array(((42,),), dtype=np.int32),
'label_weights': weights_2x1,
},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
def test_eval_create_loss(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = cross_entropy(labels, logits) = [10, 0].
expected_unreduced_loss = np.array(((10.0,), (0,),))
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_unreduced_loss, unweighted_loss.eval(), rtol=1e-2, atol=1e-2)
def test_eval(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / 2,
keys.ACCURACY: 0.5, # 1 of 2 labels is correct.
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval()
for k in value_ops},
rtol=tol,
atol=tol)
def test_eval_metric_ops_with_head_name(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, head_name='some_multiclass_head')
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
expected_metric_keys = [
'{}/some_multiclass_head'.format(metric_keys.MetricKeys.LOSS_MEAN),
'{}/some_multiclass_head'.format(metric_keys.MetricKeys.ACCURACY)
]
self.assertItemsEqual(expected_metric_keys, spec.eval_metric_ops.keys())
def test_eval_with_label_vocabulary_create_loss(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[10., 0, 0], [0, 10, 0]]
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = cross_entropy(labels, logits) = [10, 0].
expected_unreduced_loss = np.array(((10.0,), (0,),))
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_unreduced_loss, unweighted_loss.eval(), rtol=1e-2, atol=1e-2)
def test_eval_with_label_vocabulary(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[10., 0, 0], [0, 10, 0]]
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / 2,
keys.ACCURACY: 0.5, # 1 of 2 labels is correct.
}
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol, atol=tol)
def test_weighted_multi_example_eval(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
# Create estimator spec.
logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)
labels = np.array(((1,), (2,), (2,)), dtype=np.int64)
weights_3x1 = np.array(((1.,), (2.,), (3.,)), dtype=np.float64)
# loss = sum(cross_entropy(labels, logits) * [1, 2, 3])
# = sum([10, 10, 0] * [1, 2, 3]) = 30
expected_loss = 30.
spec = head.create_estimator_spec(
features={
'x': np.array(((42,),), dtype=np.int32),
'label_weights': weights_3x1,
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / np.sum(weights_3x1),
# Weighted accuracy is 1 * 3.0 / sum weights = 0.5
keys.ACCURACY: 0.5,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert loss, and metrics.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol, atol=tol)
def test_train_create_loss(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = cross_entropy(labels, logits) = [10, 0].
expected_unreduced_loss = np.array(((10.0,), (0,),))
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_unreduced_loss, unweighted_loss.eval(), rtol=1e-2, atol=1e-2)
def test_train(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
}, summary_str, tol)
def test_train_summaries_with_head_name(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, head_name='some_multiclass_head')
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
features = {'x': np.array(((42,),), dtype=np.int32)}
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
_assert_simple_summaries(self, {
'{}/some_multiclass_head'.format(metric_keys.MetricKeys.LOSS):
expected_loss,
'{}/some_multiclass_head'.format(metric_keys.MetricKeys.LOSS_MEAN):
expected_loss / 2,
}, summary_str, tol)
def test_train_with_one_dim_label_and_weights_create_loss(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)
labels_rank_1 = np.array((1, 2, 2,), dtype=np.int64)
weights_rank_1 = np.array((1., 2., 3.,), dtype=np.float64)
features = {
'x': np.array(((42,),), dtype=np.float32),
'label_weights': weights_rank_1
}
# loss = cross_entropy(labels, logits) = [10, 10, 0].
expected_unreduced_loss = np.array(((10.0,), (10.0,), (0.0,),))
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_unreduced_loss, unweighted_loss.eval(), rtol=1e-2, atol=1e-2)
def test_train_with_one_dim_label_and_weights(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)
labels_rank_1 = np.array((1, 2, 2,), dtype=np.int64)
weights_rank_1 = np.array((1., 2., 3.,), dtype=np.float64)
self.assertEqual((3,), labels_rank_1.shape)
self.assertEqual((3,), weights_rank_1.shape)
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
# loss = sum(cross_entropy(labels, logits) * [1, 2, 3])
# = sum([10, 10, 0] * [1, 2, 3]) = 30
expected_loss = 30.
features = {
'x': np.array(((42,),), dtype=np.float32),
'label_weights': weights_rank_1
}
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: (
expected_loss / np.sum(weights_rank_1)),
}, summary_str, tol)
def test_train_with_vocabulary_create_loss(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[10., 0, 0], [0, 10, 0]]
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = cross_entropy(labels, logits) = [10, 0].
expected_unreduced_loss = np.array(((10.0,), (0,),))
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_unreduced_loss, unweighted_loss.eval(), rtol=1e-2, atol=1e-2)
def test_train_with_vocabulary(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[10., 0, 0], [0, 10, 0]]
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss = sess.run(spec.loss)
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
def test_weighted_multi_example_train(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
# Create estimator spec.
logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)
labels = np.array(((1,), (2,), (2,)), dtype=np.int64)
weights_3x1 = np.array(((1.,), (2.,), (3.,)), dtype=np.float64)
expected_train_result = 'my_train_op'
# loss = sum(cross_entropy(labels, logits) * [1, 2, 3])
# = sum([10, 10, 0] * [1, 2, 3]) = 30
expected_loss = 30.
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
spec = head.create_estimator_spec(
features={
'x': np.array(((42,),), dtype=np.float32),
'label_weights': weights_3x1,
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss mean = sum(cross_entropy(labels, logits) * [1,2,3]) / (1+2+3)
# = sum([10, 10, 0] * [1, 2, 3]) / 6 = 30 / 6
metric_keys.MetricKeys.LOSS_MEAN:
expected_loss / np.sum(weights_3x1),
}, summary_str, tol)
class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def test_threshold_too_small(self):
with self.assertRaisesRegexp(ValueError, r'thresholds not in \(0, 1\)'):
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
thresholds=(0., 0.5))
def test_threshold_too_large(self):
with self.assertRaisesRegexp(ValueError, r'thresholds not in \(0, 1\)'):
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
thresholds=(0.5, 1.))
def test_invalid_logits_shape(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Logits should be shape (batch_size, 1).
logits_2x2 = np.array(((45., 44.), (41., 42.),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_2x2)
# Dynamic shape.
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({
logits_placeholder: logits_2x2
})
def test_invalid_labels_shape(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Labels and logits should be shape (batch_size, 1).
labels_2x2 = np.array(((45., 44.), (41., 42.),))
logits_2x1 = np.array(((45.,), (41.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'labels shape'):
head.create_loss(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x1,
labels=labels_2x2)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
unweighted_loss, _ = head.create_loss(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'labels shape'):
unweighted_loss.eval({
logits_placeholder: logits_2x1,
labels_placeholder: labels_2x2
})
def test_incompatible_labels_shape(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Both logits and labels should be shape (batch_size, 1).
values_2x1 = np.array(((0.,), (1.,),))
values_3x1 = np.array(((0.,), (1.,), (0.,),))
# Static shape.
with self.assertRaisesRegexp(
ValueError, 'logits and labels must have the same shape'):
head.create_loss(
features={'x': values_2x1},
mode=model_fn.ModeKeys.EVAL,
logits=values_2x1,
labels=values_3x1)
with self.assertRaisesRegexp(
ValueError, 'logits and labels must have the same shape'):
head.create_loss(
features={'x': values_2x1},
mode=model_fn.ModeKeys.EVAL,
logits=values_3x1,
labels=values_2x1)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
unweighted_loss, _ = head.create_loss(
features={'x': values_2x1},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
unweighted_loss.eval({
labels_placeholder: values_2x1,
logits_placeholder: values_3x1
})
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
unweighted_loss.eval({
labels_placeholder: values_3x1,
logits_placeholder: values_2x1
})
def test_predict(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = [[0.3], [-0.4]]
expected_logistics = [[0.574443], [0.401312]]
expected_probabilities = [[0.425557, 0.574443], [0.598688, 0.401312]]
expected_class_ids = [[1], [0]]
expected_classes = [[b'1'], [b'0']]
expected_export_classes = [[b'0', b'1']] * 2
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert spec contains expected tensors.
self.assertIsNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNone(spec.train_op)
self.assertItemsEqual(('', 'classification', 'regression',
_DEFAULT_SERVING_KEY), spec.export_outputs.keys())
_assert_no_hooks(self, spec)
# Assert predictions.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(expected_logistics,
predictions[prediction_keys.PredictionKeys.LOGISTIC])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose(expected_class_ids,
predictions[prediction_keys.PredictionKeys.CLASS_IDS])
self.assertAllEqual(expected_classes,
predictions[prediction_keys.PredictionKeys.CLASSES])
self.assertAllClose(
expected_probabilities,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))
self.assertAllClose(expected_logistics,
sess.run(spec.export_outputs['regression'].value))
def test_predict_with_vocabulary_list(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
label_vocabulary=['aang', 'iroh'])
logits = [[1.], [0.]]
expected_classes = [[b'iroh'], [b'aang']]
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertAllEqual(
expected_classes,
sess.run(spec.predictions[prediction_keys.PredictionKeys.CLASSES]))
def test_eval_create_loss(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = cross_entropy(labels, logits) = [0, 41].
expected_unreduced_loss = np.array(((0.,), (41.,),))
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_unreduced_loss, unweighted_loss.eval(), rtol=1e-2, atol=1e-2)
def test_eval(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
# loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41
# loss_mean = loss/2 = 41./2 = 20.5
keys.LOSS_MEAN: 20.5,
keys.ACCURACY: 1./2,
keys.PREDICTION_MEAN: 1./2,
keys.LABEL_MEAN: 2./2,
keys.ACCURACY_BASELINE: 2./2,
keys.AUC: 0.,
keys.AUC_PR: 1.,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(41., loss)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops})
def test_eval_metric_ops_with_head_name(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
head_name='some_binary_head')
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
expected_metric_keys = [
'{}/some_binary_head'.format(metric_keys.MetricKeys.LOSS_MEAN),
'{}/some_binary_head'.format(metric_keys.MetricKeys.ACCURACY),
'{}/some_binary_head'.format(metric_keys.MetricKeys.PREDICTION_MEAN),
'{}/some_binary_head'.format(metric_keys.MetricKeys.LABEL_MEAN),
'{}/some_binary_head'.format(metric_keys.MetricKeys.ACCURACY_BASELINE),
'{}/some_binary_head'.format(metric_keys.MetricKeys.AUC),
'{}/some_binary_head'.format(metric_keys.MetricKeys.AUC_PR)
]
self.assertItemsEqual(expected_metric_keys, spec.eval_metric_ops.keys())
def test_eval_with_vocabulary_list_create_loss(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
label_vocabulary=['aang', 'iroh'])
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(np.array(((0.,), (41.,),)), unweighted_loss.eval())
def test_eval_with_vocabulary_list(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
label_vocabulary=['aang', 'iroh'])
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = [[b'iroh'], [b'iroh']]
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
sess.run(update_ops)
self.assertAllClose(1. / 2,
value_ops[metric_keys.MetricKeys.ACCURACY].eval())
def test_eval_with_thresholds_create_loss(self):
thresholds = [0.25, 0.5, 0.75]
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
thresholds=thresholds)
logits = np.array(((-1,), (1,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.int32)}
# probabilities[i] = 1/(1 + exp(-logits[i])) =>
# probabilities = [1/(1 + exp(1)), 1/(1 + exp(-1))] = [0.269, 0.731]
# loss = -ln(probabilities[label[i]])) = [-ln(0.269), -ln(0.731)]
# = [1.31304389, 0.31334182]
expected_unreduced_loss = np.array(((1.31304389,), (0.31334182,),))
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_unreduced_loss, unweighted_loss.eval(), rtol=1e-2, atol=1e-2)
def test_eval_with_thresholds(self):
thresholds = [0.25, 0.5, 0.75]
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
thresholds=thresholds)
logits = np.array(((-1,), (1,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.int32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
# probabilities[i] = 1/(1 + exp(-logits[i])) =>
# probabilities = [1/(1 + exp(1)), 1/(1 + exp(-1))] = [0.269, 0.731]
# loss = -sum(ln(probabilities[label[i]])) = -ln(0.269) -ln(0.731)
# = 1.62652338
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: 1.62652338 / 2.,
keys.ACCURACY: 1./2,
keys.PREDICTION_MEAN: 1./2,
keys.LABEL_MEAN: 2./2,
keys.ACCURACY_BASELINE: 2./2,
keys.AUC: 0.,
keys.AUC_PR: 1.,
keys.ACCURACY_AT_THRESHOLD % thresholds[0]: 1.,
keys.PRECISION_AT_THRESHOLD % thresholds[0]: 1.,
keys.RECALL_AT_THRESHOLD % thresholds[0]: 1.,
keys.ACCURACY_AT_THRESHOLD % thresholds[1]: .5,
keys.PRECISION_AT_THRESHOLD % thresholds[1]: 1.,
keys.RECALL_AT_THRESHOLD % thresholds[1]: .5,
keys.ACCURACY_AT_THRESHOLD % thresholds[2]: 0.,
keys.PRECISION_AT_THRESHOLD % thresholds[2]: 0.,
keys.RECALL_AT_THRESHOLD % thresholds[2]: 0.,
}
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(1.62652338, loss)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval()
for k in value_ops},
atol=tol,
rtol=tol)
def test_train_create_loss(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.float64)
features = {'x': np.array(((42,),), dtype=np.float32)}
# loss = cross_entropy(labels, logits) = [0, 41].
expected_unreduced_loss = np.array(((0.,), (41.,),))
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_unreduced_loss, unweighted_loss.eval())
def test_train(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.float64)
expected_train_result = b'my_train_op'
features = {'x': np.array(((42,),), dtype=np.float32)}
# loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41
expected_loss = 41.
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/2 = 41/2 = 20.5
metric_keys.MetricKeys.LOSS_MEAN: 20.5,
}, summary_str)
def test_train_summaries_with_head_name(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
head_name='some_binary_head')
logits = np.array(((45,), (-41,),), dtype=np.float32)
labels = np.array(((1,), (1,),), dtype=np.float64)
features = {'x': np.array(((42,),), dtype=np.float32)}
# loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41
expected_loss = 41.
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
_assert_simple_summaries(
self,
{
'{}/some_binary_head'.format(metric_keys.MetricKeys.LOSS):
expected_loss,
# loss_mean = loss/2 = 41/2 = 20.5
'{}/some_binary_head'.format(metric_keys.MetricKeys.LOSS_MEAN):
20.5,
},
summary_str)
def test_float_labels_train_create_loss(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array([[0.5], [-0.3]], dtype=np.float32)
labels = np.array([[0.8], [0.4]], dtype=np.float32)
features = {'x': np.array([[42]], dtype=np.float32)}
# loss = cross_entropy(labels, logits)
# = -label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i])
# = [-0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5)),
# -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))]
# = [0.57407698418, 0.67435524446]
expected_unreduced_loss = np.array(((0.57407698418,), (0.67435524446,),))
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_unreduced_loss, unweighted_loss.eval(), rtol=1e-2, atol=1e-2)
def test_float_labels_train(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array([[0.5], [-0.3]], dtype=np.float32)
labels = np.array([[0.8], [0.4]], dtype=np.float32)
expected_train_result = b'my_train_op'
features = {'x': np.array([[42]], dtype=np.float32)}
# loss = sum(cross_entropy(labels, logits))
# = sum(-label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i]))
# = -0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5))
# -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))
# = 1.2484322
expected_loss = 1.2484322
def _train_op_fn(loss):
with ops.control_dependencies((dnn_testing_utils.assert_close(
math_ops.to_float(expected_loss), math_ops.to_float(loss)),)):
return constant_op.constant(expected_train_result)
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAlmostEqual(expected_loss, loss, delta=1.e-5)
self.assertEqual(expected_train_result, train_result)
def test_float_labels_eval_create_loss(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array([[0.5], [-0.3]], dtype=np.float32)
labels = np.array([[0.8], [0.4]], dtype=np.float32)
features = {'x': np.array([[42]], dtype=np.float32)}
# loss = cross_entropy(labels, logits)
# = -label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i])
# = [-0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5)),
# -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))]
# = [0.57407698418, 0.67435524446]
expected_unreduced_loss = np.array(((0.57407698418,), (0.67435524446,),))
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_unreduced_loss, unweighted_loss.eval(), rtol=1e-2, atol=1e-2)
def test_float_labels_eval(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
logits = np.array([[0.5], [-0.3]], dtype=np.float32)
labels = np.array([[0.8], [0.4]], dtype=np.float32)
features = {'x': np.array([[42]], dtype=np.float32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
# loss = sum(cross_entropy(labels, logits))
# = sum(-label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i]))
# = -0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5))
# -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))
# = 1.2484322
expected_loss = 1.2484322
# Assert loss.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAlmostEqual(expected_loss, loss, delta=1.e-5)
self.assertAlmostEqual(
expected_loss / 2., metrics[metric_keys.MetricKeys.LOSS_MEAN])
def test_weighted_multi_example_predict(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
predictions = sess.run(spec.predictions)
self.assertAllClose(
logits.astype(np.float32),
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
_sigmoid(logits).astype(np.float32),
predictions[prediction_keys.PredictionKeys.LOGISTIC])
self.assertAllClose(
[[0., 1.], [1., 0.],
[0., 1.]], predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose([[1], [0], [1]],
predictions[prediction_keys.PredictionKeys.CLASS_IDS])
self.assertAllEqual([[b'1'], [b'0'], [b'1']],
predictions[prediction_keys.PredictionKeys.CLASSES])
def test_weighted_multi_example_eval(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((1,), (1,), (0,)), dtype=np.int32))
# label_mean = (1*1 + .1*1 + 1.5*0)/(1 + .1 + 1.5) = 1.1/2.6
# = .42307692307
expected_label_mean = .42307692307
keys = metric_keys.MetricKeys
expected_metrics = {
# losses = label_weights*cross_entropy(labels, logits)
# = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)
# loss = sum(losses) = 1 + 4.1 + 66 = 70.1
# loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)
# = 70.1/2.6 = 26.9615384615
keys.LOSS_MEAN: 26.9615384615,
# accuracy = (1*1 + .1*0 + 1.5*0)/(1 + .1 + 1.5) = 1/2.6 = .38461538461
keys.ACCURACY: .38461538461,
# prediction_mean = (1*1 + .1*0 + 1.5*1)/(1 + .1 + 1.5) = 2.5/2.6
# = .96153846153
keys.PREDICTION_MEAN: .96153846153,
keys.LABEL_MEAN: expected_label_mean,
keys.ACCURACY_BASELINE: 1 - expected_label_mean,
keys.AUC: .45454565,
keys.AUC_PR: .6737757325172424,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(70.1, loss)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops})
def test_train_with_one_dim_labels_and_weights_create_loss(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.float32)
labels_rank_1 = np.array((1., 1., 0.,))
weights_rank_1 = np.array(((1., .1, 1.5,)), dtype=np.float64)
features = {
'x': np.array(((42.,), (43.,), (44.,)), dtype=np.float32),
'label_weights': weights_rank_1,
}
# losses = cross_entropy(labels, logits) = [0, 41, 44]
expected_unreduced_loss = np.array(((0.,), (41,), (44.,),))
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_unreduced_loss, unweighted_loss.eval(), rtol=1e-2, atol=1e-2)
def test_train_with_one_dim_labels_and_weights(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.float32)
labels_rank_1 = np.array((1., 1., 0.,))
weights_rank_1 = np.array(((1., .1, 1.5,)), dtype=np.float64)
self.assertEqual((3,), labels_rank_1.shape)
self.assertEqual((3,), weights_rank_1.shape)
features = {
'x': np.array(((42.,), (43.,), (44.,)), dtype=np.float32),
'label_weights': weights_rank_1,
}
expected_train_result = b'my_train_op'
# losses = label_weights*cross_entropy(labels, logits)
# = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)
# loss = sum(losses) = 1 + 4.1 + 66 = 70.1
expected_loss = 70.1
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertIsNotNone(spec.train_op)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((
spec.loss, spec.train_op, spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)
# = 70.1/2.6 = 26.9615384615
metric_keys.MetricKeys.LOSS_MEAN: 26.9615384615,
}, summary_str)
def test_weighted_multi_example_train(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.float32)
expected_train_result = b'my_train_op'
# losses = label_weights*cross_entropy(labels, logits)
# = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)
# loss = sum(losses) = 1 + 4.1 + 66 = 70.1
expected_loss = 70.1
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features={
'x': np.array(((42.,), (43.,), (44.,)), dtype=np.float32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float64),
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=np.array(((1.,), (1.,), (0.,))),
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertIsNotNone(spec.train_op)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((
spec.loss, spec.train_op, spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)
# = 70.1/2.6 = 26.9615384615
metric_keys.MetricKeys.LOSS_MEAN: 26.9615384615,
}, summary_str)
class RegressionHeadWithMeanSquaredErrorLossTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def test_invalid_label_dimension(self):
with self.assertRaisesRegexp(ValueError, r'Invalid label_dimension'):
head_lib._regression_head_with_mean_squared_error_loss(label_dimension=-1)
with self.assertRaisesRegexp(ValueError, r'Invalid label_dimension'):
head_lib._regression_head_with_mean_squared_error_loss(label_dimension=0)
def test_invalid_logits(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=3)
self.assertEqual(3, head.logits_dimension)
logits_1d = np.array(((45.,), (41.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_1d)
# Dynamic shape.
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PREDICTIONS].eval({
logits_placeholder: logits_1d
})
def test_incompatible_labels_eval(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=3)
self.assertEqual(3, head.logits_dimension)
values_3d = np.array(((45., 46., 47.), (41., 42., 43.),))
values_1d = np.array(((43.,), (44.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'labels shape'):
head.create_loss(
features={'x': values_1d},
mode=model_fn.ModeKeys.EVAL,
logits=values_3d,
labels=values_1d)
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': values_3d}, labels=values_3d,
mode=model_fn.ModeKeys.EVAL, logits=values_1d, train_op_fn=None)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': values_1d},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.loss.eval({
labels_placeholder: values_3d,
logits_placeholder: values_1d
})
unweighted_loss, _ = head.create_loss(
features={'x': values_1d},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'labels shape'):
unweighted_loss.eval({
labels_placeholder: values_1d,
logits_placeholder: values_3d
})
def test_incompatible_labels_train(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=3)
self.assertEqual(3, head.logits_dimension)
values_3d = np.array(((45., 46., 47.), (41., 42., 43.),))
values_1d = np.array(((43.,), (44.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'labels shape'):
head.create_loss(
features={'x': values_1d},
mode=model_fn.ModeKeys.TRAIN,
logits=values_3d,
labels=values_1d)
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': values_3d},
mode=model_fn.ModeKeys.TRAIN,
logits=values_1d,
labels=values_3d,
train_op_fn=lambda x: x)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': values_1d},
mode=model_fn.ModeKeys.TRAIN,
logits=logits_placeholder,
labels=labels_placeholder,
train_op_fn=lambda x: x)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.loss.eval({
labels_placeholder: values_3d,
logits_placeholder: values_1d
})
unweighted_loss, _ = head.create_loss(
features={'x': values_1d},
mode=model_fn.ModeKeys.TRAIN,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'labels shape'):
unweighted_loss.eval({
labels_placeholder: values_1d,
logits_placeholder: values_3d
})
def test_predict(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,),), dtype=np.int32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertIsNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNone(spec.train_op)
self.assertItemsEqual(
('', signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY),
spec.export_outputs.keys())
_assert_no_hooks(self, spec)
# Assert predictions.
with self.test_session():
_initialize_variables(self, spec.scaffold)
self.assertAllClose(logits, spec.predictions[prediction_key].eval())
def test_eval_create_loss(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43,), (44,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.float32)}
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(43-45)^2, (44-41)] = [4, 9]
self.assertAllClose(np.array(((4.,), (9.,),)), unweighted_loss.eval())
def test_eval(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
self.assertEqual(1, head.logits_dimension)
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43,), (44,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.float32)}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
metric_keys.MetricKeys.LOSS_MEAN]
predictions, loss, loss_mean = sess.run((
spec.predictions[prediction_key], spec.loss, loss_mean_update_op))
self.assertAllClose(logits, predictions)
# loss = (43-45)^2 + (44-41)^2 = 4+9 = 13
self.assertAllClose(13., loss)
# loss_mean = loss/2 = 13/2 = 6.5
expected_loss_mean = 6.5
# Check results of both update (in `loss_mean`) and value ops.
self.assertAllClose(expected_loss_mean, loss_mean)
self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())
def test_train_create_loss(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43,), (44,),), dtype=np.int32)
features = {'x': np.array(((42,),), dtype=np.float32)}
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(43-45)^2, (44-41)] = [4, 9]
self.assertAllClose(np.array(((4.,), (9.,),)), unweighted_loss.eval())
def test_train(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43.,), (44.,),), dtype=np.float64)
expected_train_result = b'my_train_op'
features = {'x': np.array(((42.,),), dtype=np.float32)}
# loss = (43-45)^2 + (44-41)^2 = 4 + 9 = 13
expected_loss = 13
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/2 = 13/2 = 6.5
metric_keys.MetricKeys.LOSS_MEAN: 6.5,
}, summary_str)
def test_train_summaries_with_head_name(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
head_name='some_regression_head')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,),), dtype=np.float32)
labels = np.array(((43.,), (44.,),), dtype=np.float64)
features = {'x': np.array(((42.,),), dtype=np.float32)}
# loss = (43-45)^2 + (44-41)^2 = 4 + 9 = 13
expected_loss = 13
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
_assert_simple_summaries(
self,
{
'{}/some_regression_head'.format(metric_keys.MetricKeys.LOSS):
expected_loss,
# loss_mean = loss/2 = 13/2 = 6.5
'{}/some_regression_head'
.format(metric_keys.MetricKeys.LOSS_MEAN):
6.5,
},
summary_str)
def test_weighted_multi_example_eval(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((35,), (42,), (45,)), dtype=np.int32))
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
metric_keys.MetricKeys.LOSS_MEAN]
predictions, loss, loss_mean = sess.run((
spec.predictions[prediction_key], spec.loss, loss_mean_update_op))
self.assertAllClose(logits, predictions)
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
self.assertAllClose(101.6, loss)
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231
expected_loss_mean = 39.0769231
# Check results of both update (in `loss_mean`) and value ops.
self.assertAllClose(expected_loss_mean, loss_mean)
self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())
def test_weight_with_numeric_column(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column=feature_column_lib.numeric_column(
'label_weights', normalizer_fn=lambda x: x + 1.))
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x':
np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights':
np.array(((0.,), (-0.9,), (0.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((35,), (42,), (45,)), dtype=np.int32))
# Assert loss.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss = sess.run(spec.loss)
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
self.assertAllClose(101.6, loss)
def test_weighted_multi_example_train(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.float32)
expected_train_result = b'my_train_op'
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
expected_loss = 101.6
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.float32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float64),
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=np.array(((35.,), (42.,), (45.,)), dtype=np.float32),
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231
metric_keys.MetricKeys.LOSS_MEAN: 39.0769231,
}, summary_str)
def test_test_with_one_dim_label_and_weight_create_loss(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
logits = np.array(((45,), (41,), (44,)), dtype=np.float32)
# loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].
expected_unreduced_loss = np.array(((100.,), (1.,), (1.,),))
x_feature_rank_1 = np.array((42., 43., 44.,), dtype=np.float32)
weight_rank_1 = np.array((1., .1, 1.5,), dtype=np.float64)
labels_rank_1 = np.array((35., 42., 45.,))
features = {'x': x_feature_rank_1, 'label_weights': weight_rank_1}
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_unreduced_loss, unweighted_loss.eval())
def test_with_one_dim_label_and_weight(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.float32)
expected_train_result = b'my_train_op'
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
expected_loss = 101.6
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
x_feature_rank_1 = np.array((42., 43., 44.,), dtype=np.float32)
weight_rank_1 = np.array((1., .1, 1.5,), dtype=np.float64)
labels_rank_1 = np.array((35., 42., 45.,))
features = {'x': x_feature_rank_1, 'label_weights': weight_rank_1}
self.assertEqual((3,), x_feature_rank_1.shape)
self.assertEqual((3,), weight_rank_1.shape)
self.assertEqual((3,), labels_rank_1.shape)
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231
metric_keys.MetricKeys.LOSS_MEAN: 39.0769231,
}, summary_str)
def test_weighted_multi_value_eval_create_loss(self):
"""3d label, 1 example, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
logits = np.array(((45., 41., 44.),))
labels = np.array(((35., 42., 45.),))
features = {
'x': np.array(((42., 43., 44.),)),
'label_weights': np.array(((1., .1, 1.5),))
}
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].
self.assertAllClose(np.array(((100., 1., 1.,),)), unweighted_loss.eval())
def test_weighted_multi_value_eval(self):
"""3d label, 1 example, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
self.assertEqual(3, head.logits_dimension)
logits = np.array(((45., 41., 44.),))
labels = np.array(((35., 42., 45.),))
features = {
'x': np.array(((42., 43., 44.),)),
'label_weights': np.array(((1., .1, 1.5),))
}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
metric_keys.MetricKeys.LOSS_MEAN]
predictions, loss, loss_mean = sess.run((
spec.predictions[prediction_key], spec.loss, loss_mean_update_op))
self.assertAllClose(logits, predictions)
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
self.assertAllClose(101.6, loss)
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
expected_loss_mean = 39.076923
# Check results of both update (in `loss_mean`) and value ops.
self.assertAllClose(expected_loss_mean, loss_mean)
self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())
def test_weighted_multi_value_train_create_loss(self):
"""3d label, 1 example, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
logits = np.array(((45., 41., 44.),))
labels = np.array(((35., 42., 45.),))
features = {
'x': np.array(((42., 43., 44.),)),
'label_weights': np.array(((1., .1, 1.5),))
}
# Create loss.
unweighted_loss, _ = head.create_loss(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].
self.assertAllClose(np.array(((100., 1., 1.,),)), unweighted_loss.eval())
def test_weighted_multi_value_train(self):
"""3d label, 1 example, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
self.assertEqual(3, head.logits_dimension)
logits = np.array(((45., 41., 44.),))
labels = np.array(((35., 42., 45.),))
expected_train_result = b'my_train_op'
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
expected_loss = 101.6
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
features = {
'x': np.array(((42., 43., 44.),)),
'label_weights': np.array(((1., .1, 1.5),)),
}
# Create estimator spec.
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Evaluate predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
metric_keys.MetricKeys.LOSS_MEAN: 39.076923,
}, summary_str)
def test_weighted_multi_batch_eval(self):
"""1d label, 1 example, 3 batches."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45.,), (41.,), (44.,)))
input_fn = numpy_io.numpy_input_fn(
x={
'x': np.array(((42.,), (43.,), (44.,))),
'label_weights': np.array(((1.,), (.1,), (1.5,))),
# 'logits' is not a feature, but we use `numpy_input_fn` to make a
# batched version of it, and pop it off before passing to
# `create_estimator_spec`.
'logits': logits,
},
y=np.array(((35.,), (42.,), (45.,))),
batch_size=1,
num_epochs=1,
shuffle=False)
batched_features, batched_labels = input_fn()
batched_logits = batched_features.pop('logits')
spec = head.create_estimator_spec(
features=batched_features,
mode=model_fn.ModeKeys.EVAL,
logits=batched_logits,
labels=batched_labels,
train_op_fn=None)
# losses = [1*(35-45)^2, .1*(42-41)^2, 1.5*(45-44)^2] = [100, .1, 1.5]
# loss = sum(losses) = 100+.1+1.5 = 101.6
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
expected_metrics = {metric_keys.MetricKeys.LOSS_MEAN: 39.076923}
# Assert spec contains expected tensors.
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
_assert_no_hooks(self, spec)
with self.test_session() as sess:
# Finalize graph and initialize variables.
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
queue_runner_impl.start_queue_runners()
# Run tensors for `steps` steps.
steps = len(logits)
results = tuple([
sess.run((
spec.loss,
# The `[1]` gives us the metric update op.
{k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
)) for _ in range(steps)
])
# Assert losses and metrics.
self.assertAllClose((100, .1, 1.5), [r[0] for r in results])
# For metrics, check results of both update (in `results`) and value ops.
# Note: we only check the result of the last step for streaming metrics.
self.assertAllClose(expected_metrics, results[steps - 1][1])
self.assertAllClose(expected_metrics, {
k: spec.eval_metric_ops[k][0].eval() for k in spec.eval_metric_ops
})
def test_weighted_multi_batch_train(self):
"""1d label, 1 example, 3 batches."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45.,), (41.,), (44.,)))
input_fn = numpy_io.numpy_input_fn(
x={
'x': np.array(((42.,), (43.,), (44.,))),
'label_weights': np.array(((1.,), (.1,), (1.5,))),
# 'logits' is not a feature, but we use `numpy_input_fn` to make a
# batched version of it, and pop it off before passing to
# `create_estimator_spec`.
'logits': logits,
},
y=np.array(((35.,), (42.,), (45.,))),
batch_size=1,
num_epochs=1,
shuffle=False)
batched_features, batched_labels = input_fn()
batched_logits = batched_features.pop('logits')
spec = head.create_estimator_spec(
features=batched_features,
mode=model_fn.ModeKeys.TRAIN,
logits=batched_logits,
labels=batched_labels,
train_op_fn=lambda loss: loss * -7.)
# Assert spec contains expected tensors.
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertIsNotNone(spec.train_op)
with self.test_session() as sess:
# Finalize graph and initialize variables.
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
queue_runner_impl.start_queue_runners()
results = tuple([
sess.run((spec.loss, spec.train_op)) for _ in range(len(logits))
])
# losses = [1*(35-45)^2, .1*(42-41)^2, 1.5*(45-44)^2] = [100, .1, 1.5]
expected_losses = np.array((100, .1, 1.5))
self.assertAllClose(expected_losses, [r[0] for r in results])
self.assertAllClose(expected_losses * -7., [r[1] for r in results])
if __name__ == '__main__':
test.main()
| apache-2.0 |
wenjoy/homePage | node_modules/node-captcha/node_modules/canvas/node_modules/mocha/node_modules/jade/node_modules/stylus/node_modules/mkdirp/node_modules/tap/node_modules/readable-stream/node_modules/string_decoder/node_modules/tap/node_modules/yamlish/yamlish-py/setup.py | 160 | 1406 | # coding: utf-8
from setuptools import setup
requires_list = [
"PyYAML>=3.09"
]
import os.path
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as inf:
return "\n" + inf.read().replace("\r\n", "\n")
setup(
name='yamlish',
version="0.17.0",
description='Python implementation of YAMLish',
author='Matěj Cepl',
author_email='mcepl@redhat.com',
url='https://github.com/mcepl/yamlish/',
py_modules=['yamlish'],
long_description=read("README.rst"),
keywords=['TAP', 'YAML', 'yamlish'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
],
test_suite="test",
install_requires=requires_list
)
| mit |
digideskio/transitfeed | merge.py | 5 | 64888 | #!/usr/bin/python2.5
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tool for merging two Google Transit feeds.
Given two Google Transit feeds intending to cover two disjoint calendar
intervals, this tool will attempt to produce a single feed by merging as much
of the two feeds together as possible.
For example, most stops remain the same throughout the year. Therefore, many
of the stops given in stops.txt for the first feed represent the same stops
given in the second feed. This tool will try to merge these stops so they
only appear once in the resultant feed.
A note on terminology: The first schedule is referred to as the "old" schedule;
the second as the "new" schedule. The resultant schedule is referred to as
the "merged" schedule. Names of things in the old schedule are variations of
the letter "a" while names of things from the new schedule are variations of
"b". The objects that represents routes, agencies and so on are called
"entities".
usage: merge.py [options] old_feed_path new_feed_path merged_feed_path
Run merge.py --help for a list of the possible options.
"""
__author__ = 'timothy.stranex@gmail.com (Timothy Stranex)'
import datetime
import optparse
import os
import re
import sys
import time
import transitfeed
from transitfeed import util
import webbrowser
# TODO:
# 1. write unit tests that use actual data
# 2. write a proper trip and stop_times merger
# 3. add a serialised access method for stop_times and shapes to transitfeed
# 4. add support for merging schedules which have some service period overlap
def ApproximateDistanceBetweenPoints(pa, pb):
"""Finds the distance between two points on the Earth's surface.
This is an approximate distance based on assuming that the Earth is a sphere.
The points are specified by their lattitude and longitude.
Args:
pa: the first (lat, lon) point tuple
pb: the second (lat, lon) point tuple
Returns:
The distance as a float in metres.
"""
alat, alon = pa
blat, blon = pb
sa = transitfeed.Stop(lat=alat, lng=alon)
sb = transitfeed.Stop(lat=blat, lng=blon)
return transitfeed.ApproximateDistanceBetweenStops(sa, sb)
class Error(Exception):
"""The base exception class for this module."""
class MergeError(Error):
"""An error produced when two entities could not be merged."""
class MergeProblemWithContext(transitfeed.ExceptionWithContext):
"""The base exception class for problem reporting in the merge module.
Attributes:
dataset_merger: The DataSetMerger that generated this problem.
entity_type_name: The entity type of the dataset_merger. This is just
dataset_merger.ENTITY_TYPE_NAME.
ERROR_TEXT: The text used for generating the problem message.
"""
def __init__(self, dataset_merger, problem_type=transitfeed.TYPE_WARNING,
**kwargs):
"""Initialise the exception object.
Args:
dataset_merger: The DataSetMerger instance that generated this problem.
problem_type: The problem severity. This should be set to one of the
corresponding constants in transitfeed.
kwargs: Keyword arguments to be saved as instance attributes.
"""
kwargs['type'] = problem_type
kwargs['entity_type_name'] = dataset_merger.ENTITY_TYPE_NAME
transitfeed.ExceptionWithContext.__init__(self, None, None, **kwargs)
self.dataset_merger = dataset_merger
def FormatContext(self):
return "In files '%s'" % self.dataset_merger.FILE_NAME
class SameIdButNotMerged(MergeProblemWithContext):
ERROR_TEXT = ("There is a %(entity_type_name)s in the old feed with id "
"'%(id)s' and one from the new feed with the same id but "
"they could not be merged:")
class CalendarsNotDisjoint(MergeProblemWithContext):
ERROR_TEXT = ("The service periods could not be merged since they are not "
"disjoint.")
class MergeNotImplemented(MergeProblemWithContext):
ERROR_TEXT = ("The feed merger does not currently support merging in this "
"file. The entries have been duplicated instead.")
class FareRulesBroken(MergeProblemWithContext):
ERROR_TEXT = ("The feed merger is currently unable to handle fare rules "
"properly.")
class MergeProblemReporter(transitfeed.ProblemReporter):
"""The base problem reporter class for the merge module."""
def __init__(self, accumulator):
transitfeed.ProblemReporter.__init__(self, accumulator)
def SameIdButNotMerged(self, dataset, entity_id, reason):
self.AddToAccumulator(
SameIdButNotMerged(dataset, id=entity_id, reason=reason))
def CalendarsNotDisjoint(self, dataset):
self.AddToAccumulator(
CalendarsNotDisjoint(dataset, problem_type=transitfeed.TYPE_ERROR))
def MergeNotImplemented(self, dataset):
self.AddToAccumulator(MergeNotImplemented(dataset))
def FareRulesBroken(self, dataset):
self.AddToAccumulator(FareRulesBroken(dataset))
class HTMLProblemAccumulator(transitfeed.ProblemAccumulatorInterface):
"""A problem reporter which generates HTML output."""
def __init__(self):
"""Initialise."""
self._dataset_warnings = {} # a map from DataSetMergers to their warnings
self._dataset_errors = {}
self._notices = []
self._warning_count = 0
self._error_count = 0
self._notice_count = 0
def _Report(self, merge_problem):
# Notices are handled special
if merge_problem.IsNotice():
self._notice_count += 1
self._notices.append(merge_problem)
return
if merge_problem.IsWarning():
dataset_problems = self._dataset_warnings
self._warning_count += 1
else:
dataset_problems = self._dataset_errors
self._error_count += 1
problem_html = '<li>%s</li>' % (
merge_problem.FormatProblem().replace('\n', '<br>'))
dataset_problems.setdefault(merge_problem.dataset_merger, []).append(
problem_html)
def _GenerateStatsTable(self, feed_merger):
"""Generate an HTML table of merge statistics.
Args:
feed_merger: The FeedMerger instance.
Returns:
The generated HTML as a string.
"""
rows = []
rows.append('<tr><th class="header"/><th class="header">Merged</th>'
'<th class="header">Copied from old feed</th>'
'<th class="header">Copied from new feed</th></tr>')
for merger in feed_merger.GetMergerList():
stats = merger.GetMergeStats()
if stats is None:
continue
merged, not_merged_a, not_merged_b = stats
rows.append('<tr><th class="header">%s</th>'
'<td class="header">%d</td>'
'<td class="header">%d</td>'
'<td class="header">%d</td></tr>' %
(merger.DATASET_NAME, merged, not_merged_a, not_merged_b))
return '<table>%s</table>' % '\n'.join(rows)
def _GenerateSection(self, problem_type):
"""Generate a listing of the given type of problems.
Args:
problem_type: The type of problem. This is one of the problem type
constants from transitfeed.
Returns:
The generated HTML as a string.
"""
if problem_type == transitfeed.TYPE_WARNING:
dataset_problems = self._dataset_warnings
heading = 'Warnings'
else:
dataset_problems = self._dataset_errors
heading = 'Errors'
if not dataset_problems:
return ''
prefix = '<h2 class="issueHeader">%s:</h2>' % heading
dataset_sections = []
for dataset_merger, problems in dataset_problems.items():
dataset_sections.append('<h3>%s</h3><ol>%s</ol>' % (
dataset_merger.FILE_NAME, '\n'.join(problems)))
body = '\n'.join(dataset_sections)
return prefix + body
def _GenerateSummary(self):
"""Generate a summary of the warnings and errors.
Returns:
The generated HTML as a string.
"""
items = []
if self._notices:
items.append('notices: %d' % self._notice_count)
if self._dataset_errors:
items.append('errors: %d' % self._error_count)
if self._dataset_warnings:
items.append('warnings: %d' % self._warning_count)
if items:
return '<p><span class="fail">%s</span></p>' % '<br>'.join(items)
else:
return '<p><span class="pass">feeds merged successfully</span></p>'
def _GenerateNotices(self):
"""Generate a summary of any notices.
Returns:
The generated HTML as a string.
"""
items = []
for e in self._notices:
d = e.GetDictToFormat()
if 'url' in d.keys():
d['url'] = '<a href="%(url)s">%(url)s</a>' % d
items.append('<li class="notice">%s</li>' %
e.FormatProblem(d).replace('\n', '<br>'))
if items:
return '<h2>Notices:</h2>\n<ul>%s</ul>\n' % '\n'.join(items)
else:
return ''
def WriteOutput(self, output_file, feed_merger,
old_feed_path, new_feed_path, merged_feed_path):
"""Write the HTML output to a file.
Args:
output_file: The file object that the HTML output will be written to.
feed_merger: The FeedMerger instance.
old_feed_path: The path to the old feed file as a string.
new_feed_path: The path to the new feed file as a string
merged_feed_path: The path to the merged feed file as a string. This
may be None if no merged feed was written.
"""
if merged_feed_path is None:
html_merged_feed_path = ''
else:
html_merged_feed_path = '<p>Merged feed created: <code>%s</code></p>' % (
merged_feed_path)
html_header = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<title>Feed Merger Results</title>
<style>
body {font-family: Georgia, serif; background-color: white}
.path {color: gray}
div.problem {max-width: 500px}
td,th {background-color: khaki; padding: 2px; font-family:monospace}
td.problem,th.problem {background-color: dc143c; color: white; padding: 2px;
font-family:monospace}
table {border-spacing: 5px 0px; margin-top: 3px}
h3.issueHeader {padding-left: 1em}
.notice {background-color: yellow}
span.pass {background-color: lightgreen}
span.fail {background-color: yellow}
.pass, .fail {font-size: 16pt; padding: 3px}
ol,.unused {padding-left: 40pt}
.header {background-color: white; font-family: Georgia, serif; padding: 0px}
th.header {text-align: right; font-weight: normal; color: gray}
.footer {font-size: 10pt}
</style>
</head>
<body>
<h1>Feed merger results</h1>
<p>Old feed: <code>%(old_feed_path)s</code></p>
<p>New feed: <code>%(new_feed_path)s</code></p>
%(html_merged_feed_path)s""" % locals()
html_stats = self._GenerateStatsTable(feed_merger)
html_summary = self._GenerateSummary()
html_notices = self._GenerateNotices()
html_errors = self._GenerateSection(transitfeed.TYPE_ERROR)
html_warnings = self._GenerateSection(transitfeed.TYPE_WARNING)
html_footer = """
<div class="footer">
Generated using transitfeed version %s on %s.
</div>
</body>
</html>""" % (transitfeed.__version__,
time.strftime('%B %d, %Y at %I:%M %p %Z'))
output_file.write(transitfeed.EncodeUnicode(html_header))
output_file.write(transitfeed.EncodeUnicode(html_stats))
output_file.write(transitfeed.EncodeUnicode(html_summary))
output_file.write(transitfeed.EncodeUnicode(html_notices))
output_file.write(transitfeed.EncodeUnicode(html_errors))
output_file.write(transitfeed.EncodeUnicode(html_warnings))
output_file.write(transitfeed.EncodeUnicode(html_footer))
def LoadWithoutErrors(path, memory_db):
""""Return a Schedule object loaded from path; sys.exit for any error."""
accumulator = transitfeed.ExceptionProblemAccumulator()
loading_problem_handler = MergeProblemReporter(accumulator)
try:
schedule = transitfeed.Loader(path,
memory_db=memory_db,
problems=loading_problem_handler,
extra_validation=True).Load()
except transitfeed.ExceptionWithContext, e:
print >>sys.stderr, (
"\n\nFeeds to merge must load without any errors.\n"
"While loading %s the following error was found:\n%s\n%s\n" %
(path, e.FormatContext(), transitfeed.EncodeUnicode(e.FormatProblem())))
sys.exit(1)
return schedule
class DataSetMerger(object):
"""A DataSetMerger is in charge of merging a set of entities.
This is an abstract class and should be subclassed for each different entity
type.
Attributes:
ENTITY_TYPE_NAME: The name of the entity type like 'agency' or 'stop'.
FILE_NAME: The name of the file containing this data set like 'agency.txt'.
DATASET_NAME: A name for the dataset like 'Agencies' or 'Stops'.
"""
def __init__(self, feed_merger):
"""Initialise.
Args:
feed_merger: The FeedMerger.
"""
self.feed_merger = feed_merger
self._num_merged = 0
self._num_not_merged_a = 0
self._num_not_merged_b = 0
def _MergeIdentical(self, a, b):
"""Tries to merge two values. The values are required to be identical.
Args:
a: The first value.
b: The second value.
Returns:
The trivially merged value.
Raises:
MergeError: The values were not identical.
"""
if a != b:
raise MergeError("values must be identical ('%s' vs '%s')" %
(transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return b
def _MergeIdenticalCaseInsensitive(self, a, b):
"""Tries to merge two strings.
The string are required to be the same ignoring case. The second string is
always used as the merged value.
Args:
a: The first string.
b: The second string.
Returns:
The merged string. This is equal to the second string.
Raises:
MergeError: The strings were not the same ignoring case.
"""
if a.lower() != b.lower():
raise MergeError("values must be the same (case insensitive) "
"('%s' vs '%s')" % (transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return b
def _MergeOptional(self, a, b):
"""Tries to merge two values which may be None.
If both values are not None, they are required to be the same and the
merge is trivial. If one of the values is None and the other is not None,
the merge results in the one which is not None. If both are None, the merge
results in None.
Args:
a: The first value.
b: The second value.
Returns:
The merged value.
Raises:
MergeError: If both values are not None and are not the same.
"""
if a and b:
if a != b:
raise MergeError("values must be identical if both specified "
"('%s' vs '%s')" % (transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return a or b
def _MergeSameAgency(self, a_agency_id, b_agency_id):
"""Merge agency ids to the corresponding agency id in the merged schedule.
Args:
a_agency_id: an agency id from the old schedule
b_agency_id: an agency id from the new schedule
Returns:
The agency id of the corresponding merged agency.
Raises:
MergeError: If a_agency_id and b_agency_id do not correspond to the same
merged agency.
KeyError: Either aaid or baid is not a valid agency id.
"""
a_agency_id = (a_agency_id or
self.feed_merger.a_schedule.GetDefaultAgency().agency_id)
b_agency_id = (b_agency_id or
self.feed_merger.b_schedule.GetDefaultAgency().agency_id)
a_agency = self.feed_merger.a_schedule.GetAgency(
a_agency_id)._migrated_entity
b_agency = self.feed_merger.b_schedule.GetAgency(
b_agency_id)._migrated_entity
if a_agency != b_agency:
raise MergeError('agency must be the same')
return a_agency.agency_id
def _SchemedMerge(self, scheme, a, b):
"""Tries to merge two entities according to a merge scheme.
A scheme is specified by a map where the keys are entity attributes and the
values are merge functions like Merger._MergeIdentical or
Merger._MergeOptional. The entity is first migrated to the merged schedule.
Then the attributes are individually merged as specified by the scheme.
Args:
scheme: The merge scheme, a map from entity attributes to merge
functions.
a: The entity from the old schedule.
b: The entity from the new schedule.
Returns:
The migrated and merged entity.
Raises:
MergeError: One of the attributes was not able to be merged.
"""
migrated = self._Migrate(b, self.feed_merger.b_schedule, False)
for attr, merger in scheme.items():
a_attr = getattr(a, attr, None)
b_attr = getattr(b, attr, None)
try:
merged_attr = merger(a_attr, b_attr)
except MergeError, merge_error:
raise MergeError("Attribute '%s' could not be merged: %s." % (
attr, merge_error))
setattr(migrated, attr, merged_attr)
return migrated
def _MergeSameId(self):
"""Tries to merge entities based on their ids.
This tries to merge only the entities from the old and new schedules which
have the same id. These are added into the merged schedule. Entities which
do not merge or do not have the same id as another entity in the other
schedule are simply migrated into the merged schedule.
This method is less flexible than _MergeDifferentId since it only tries
to merge entities which have the same id while _MergeDifferentId tries to
merge everything. However, it is faster and so should be used whenever
possible.
This method makes use of various methods like _Merge and _Migrate which
are not implemented in the abstract DataSetMerger class. These method
should be overwritten in a subclass to allow _MergeSameId to work with
different entity types.
Returns:
The number of merged entities.
"""
a_not_merged = []
b_not_merged = []
for a in self._GetIter(self.feed_merger.a_schedule):
try:
b = self._GetById(self.feed_merger.b_schedule, self._GetId(a))
except KeyError:
# there was no entity in B with the same id as a
a_not_merged.append(a)
continue
try:
self._Add(a, b, self._MergeEntities(a, b))
self._num_merged += 1
except MergeError, merge_error:
a_not_merged.append(a)
b_not_merged.append(b)
self._ReportSameIdButNotMerged(self._GetId(a), merge_error)
for b in self._GetIter(self.feed_merger.b_schedule):
try:
a = self._GetById(self.feed_merger.a_schedule, self._GetId(b))
except KeyError:
# there was no entity in A with the same id as b
b_not_merged.append(b)
# migrate the remaining entities
for a in a_not_merged:
newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a))
self._Add(a, None, self._Migrate(a, self.feed_merger.a_schedule, newid))
for b in b_not_merged:
newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b))
self._Add(None, b, self._Migrate(b, self.feed_merger.b_schedule, newid))
self._num_not_merged_a = len(a_not_merged)
self._num_not_merged_b = len(b_not_merged)
return self._num_merged
def _MergeByIdKeepNew(self):
"""Migrate all entities, discarding duplicates from the old/a schedule.
This method migrates all entities from the new/b schedule. It then migrates
entities in the old schedule where there isn't already an entity with the
same ID.
Unlike _MergeSameId this method migrates entities to the merged schedule
before comparing their IDs. This allows transfers to be compared when they
refer to stops that had their ID updated by migration.
This method makes use of various methods like _Migrate and _Add which
are not implemented in the abstract DataSetMerger class. These methods
should be overwritten in a subclass to allow _MergeByIdKeepNew to work with
different entity types.
Returns:
The number of merged entities.
"""
# Maps from migrated ID to tuple(original object, migrated object)
a_orig_migrated = {}
b_orig_migrated = {}
for orig in self._GetIter(self.feed_merger.a_schedule):
migrated = self._Migrate(orig, self.feed_merger.a_schedule)
a_orig_migrated[self._GetId(migrated)] = (orig, migrated)
for orig in self._GetIter(self.feed_merger.b_schedule):
migrated = self._Migrate(orig, self.feed_merger.b_schedule)
b_orig_migrated[self._GetId(migrated)] = (orig, migrated)
for migrated_id, (orig, migrated) in b_orig_migrated.items():
self._Add(None, orig, migrated)
self._num_not_merged_b += 1
for migrated_id, (orig, migrated) in a_orig_migrated.items():
if migrated_id not in b_orig_migrated:
self._Add(orig, None, migrated)
self._num_not_merged_a += 1
return self._num_merged
def _MergeDifferentId(self):
"""Tries to merge all possible combinations of entities.
This tries to merge every entity in the old schedule with every entity in
the new schedule. Unlike _MergeSameId, the ids do not need to match.
However, _MergeDifferentId is much slower than _MergeSameId.
This method makes use of various methods like _Merge and _Migrate which
are not implemented in the abstract DataSetMerger class. These method
should be overwritten in a subclass to allow _MergeSameId to work with
different entity types.
Returns:
The number of merged entities.
"""
# TODO: The same entity from A could merge with multiple from B.
# This should either generate an error or should be prevented from
# happening.
for a in self._GetIter(self.feed_merger.a_schedule):
for b in self._GetIter(self.feed_merger.b_schedule):
try:
self._Add(a, b, self._MergeEntities(a, b))
self._num_merged += 1
except MergeError:
continue
for a in self._GetIter(self.feed_merger.a_schedule):
if a not in self.feed_merger.a_merge_map:
self._num_not_merged_a += 1
newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a))
self._Add(a, None,
self._Migrate(a, self.feed_merger.a_schedule, newid))
for b in self._GetIter(self.feed_merger.b_schedule):
if b not in self.feed_merger.b_merge_map:
self._num_not_merged_b += 1
newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b))
self._Add(None, b,
self._Migrate(b, self.feed_merger.b_schedule, newid))
return self._num_merged
def _ReportSameIdButNotMerged(self, entity_id, reason):
"""Report that two entities have the same id but could not be merged.
Args:
entity_id: The id of the entities.
reason: A string giving a reason why they could not be merged.
"""
self.feed_merger.problem_reporter.SameIdButNotMerged(self,
entity_id,
reason)
def _GetIter(self, schedule):
"""Returns an iterator of entities for this data set in the given schedule.
This method usually corresponds to one of the methods from
transitfeed.Schedule like GetAgencyList() or GetRouteList().
Note: This method must be overwritten in a subclass if _MergeSameId or
_MergeDifferentId are to be used.
Args:
schedule: Either the old or new schedule from the FeedMerger.
Returns:
An iterator of entities.
"""
raise NotImplementedError()
def _GetById(self, schedule, entity_id):
"""Returns an entity given its id.
This method usually corresponds to one of the methods from
transitfeed.Schedule like GetAgency() or GetRoute().
Note: This method must be overwritten in a subclass if _MergeSameId or
_MergeDifferentId are to be used.
Args:
schedule: Either the old or new schedule from the FeedMerger.
entity_id: The id string of the entity.
Returns:
The entity with the given id.
Raises:
KeyError: There is not entity with the given id.
"""
raise NotImplementedError()
def _HasId(self, schedule, entity_id):
"""Check if the schedule has an entity with the given id.
Args:
schedule: The transitfeed.Schedule instance to look in.
entity_id: The id of the entity.
Returns:
True if the schedule has an entity with the id or False if not.
"""
try:
self._GetById(schedule, entity_id)
has = True
except KeyError:
has = False
return has
def _MergeEntities(self, a, b):
"""Tries to merge the two entities.
Note: This method must be overwritten in a subclass if _MergeSameId or
_MergeDifferentId are to be used.
Args:
a: The entity from the old schedule.
b: The entity from the new schedule.
Returns:
The merged migrated entity.
Raises:
MergeError: The entities were not able to be merged.
"""
raise NotImplementedError()
def _Migrate(self, entity, schedule, newid):
"""Migrates the entity to the merge schedule.
This involves copying the entity and updating any ids to point to the
corresponding entities in the merged schedule. If newid is True then
a unique id is generated for the migrated entity using the original id
as a prefix.
Note: This method must be overwritten in a subclass if _MergeSameId or
_MergeDifferentId are to be used.
Args:
entity: The entity to migrate.
schedule: The schedule from the FeedMerger that contains ent.
newid: Whether to generate a new id (True) or keep the original (False).
Returns:
The migrated entity.
"""
raise NotImplementedError()
def _Add(self, a, b, migrated):
"""Adds the migrated entity to the merged schedule.
If a and b are both not None, it means that a and b were merged to create
migrated. If one of a or b is None, it means that the other was not merged
but has been migrated. This mapping is registered with the FeedMerger.
Note: This method must be overwritten in a subclass if _MergeSameId or
_MergeDifferentId are to be used.
Args:
a: The original entity from the old schedule.
b: The original entity from the new schedule.
migrated: The migrated entity for the merged schedule.
"""
raise NotImplementedError()
def _GetId(self, entity):
"""Returns the id of the given entity.
Note: This method must be overwritten in a subclass if _MergeSameId or
_MergeDifferentId are to be used.
Args:
entity: The entity.
Returns:
The id of the entity as a string or None.
"""
raise NotImplementedError()
def MergeDataSets(self):
"""Merge the data sets.
This method is called in FeedMerger.MergeSchedule().
Note: This method must be overwritten in a subclass.
Returns:
A boolean which is False if the dataset was unable to be merged and
as a result the entire merge should be aborted. In this case, the problem
will have been reported using the FeedMerger's problem reporter.
"""
raise NotImplementedError()
def GetMergeStats(self):
"""Returns some merge statistics.
These are given as a tuple (merged, not_merged_a, not_merged_b) where
"merged" is the number of merged entities, "not_merged_a" is the number of
entities from the old schedule that were not merged and "not_merged_b" is
the number of entities from the new schedule that were not merged.
The return value can also be None. This means that there are no statistics
for this entity type.
The statistics are only available after MergeDataSets() has been called.
Returns:
Either the statistics tuple or None.
"""
return (self._num_merged, self._num_not_merged_a, self._num_not_merged_b)
class AgencyMerger(DataSetMerger):
"""A DataSetMerger for agencies."""
ENTITY_TYPE_NAME = 'agency'
FILE_NAME = 'agency.txt'
DATASET_NAME = 'Agencies'
def _GetIter(self, schedule):
return schedule.GetAgencyList()
def _GetById(self, schedule, agency_id):
return schedule.GetAgency(agency_id)
def _MergeEntities(self, a, b):
"""Merges two agencies.
To be merged, they are required to have the same id, name, url and
timezone. The remaining language attribute is taken from the new agency.
Args:
a: The first agency.
b: The second agency.
Returns:
The merged agency.
Raises:
MergeError: The agencies could not be merged.
"""
def _MergeAgencyId(a_agency_id, b_agency_id):
"""Merge two agency ids.
The only difference between this and _MergeIdentical() is that the values
None and '' are regarded as being the same.
Args:
a_agency_id: The first agency id.
b_agency_id: The second agency id.
Returns:
The merged agency id.
Raises:
MergeError: The agency ids could not be merged.
"""
a_agency_id = a_agency_id or None
b_agency_id = b_agency_id or None
return self._MergeIdentical(a_agency_id, b_agency_id)
scheme = {'agency_id': _MergeAgencyId,
'agency_name': self._MergeIdentical,
'agency_url': self._MergeIdentical,
'agency_timezone': self._MergeIdentical}
return self._SchemedMerge(scheme, a, b)
def _Migrate(self, entity, schedule, newid):
a = transitfeed.Agency(field_dict=entity)
if newid:
a.agency_id = self.feed_merger.GenerateId(entity.agency_id)
return a
def _Add(self, a, b, migrated):
self.feed_merger.Register(a, b, migrated)
self.feed_merger.merged_schedule.AddAgencyObject(migrated)
def _GetId(self, entity):
return entity.agency_id
def MergeDataSets(self):
self._MergeSameId()
return True
class StopMerger(DataSetMerger):
"""A DataSetMerger for stops.
Attributes:
largest_stop_distance: The largest distance allowed between stops that
will be merged in metres.
"""
ENTITY_TYPE_NAME = 'stop'
FILE_NAME = 'stops.txt'
DATASET_NAME = 'Stops'
largest_stop_distance = 10.0
def __init__(self, feed_merger):
DataSetMerger.__init__(self, feed_merger)
self._merged = []
self._a_not_merged = []
self._b_not_merged = []
def SetLargestStopDistance(self, distance):
"""Sets largest_stop_distance."""
self.largest_stop_distance = distance
def _GetIter(self, schedule):
return schedule.GetStopList()
def _GetById(self, schedule, stop_id):
return schedule.GetStop(stop_id)
def _MergeEntities(self, a, b):
"""Merges two stops.
For the stops to be merged, they must have:
- the same stop_id
- the same stop_name (case insensitive)
- the same zone_id
- locations less than largest_stop_distance apart
The other attributes can have arbitary changes. The merged attributes are
taken from the new stop.
Args:
a: The first stop.
b: The second stop.
Returns:
The merged stop.
Raises:
MergeError: The stops could not be merged.
"""
distance = transitfeed.ApproximateDistanceBetweenStops(a, b)
if distance > self.largest_stop_distance:
raise MergeError("Stops are too far apart: %.1fm "
"(largest_stop_distance is %.1fm)." %
(distance, self.largest_stop_distance))
scheme = {'stop_id': self._MergeIdentical,
'stop_name': self._MergeIdenticalCaseInsensitive,
'zone_id': self._MergeIdentical,
'location_type': self._MergeIdentical}
return self._SchemedMerge(scheme, a, b)
def _Migrate(self, entity, schedule, newid):
migrated_stop = transitfeed.Stop(field_dict=entity)
if newid:
migrated_stop.stop_id = self.feed_merger.GenerateId(entity.stop_id)
return migrated_stop
def _Add(self, a, b, migrated_stop):
self.feed_merger.Register(a, b, migrated_stop)
# The migrated_stop will be added to feed_merger.merged_schedule later
# since adding must be done after the zone_ids have been finalized.
if a and b:
self._merged.append((a, b, migrated_stop))
elif a:
self._a_not_merged.append((a, migrated_stop))
elif b:
self._b_not_merged.append((b, migrated_stop))
def _GetId(self, entity):
return entity.stop_id
def MergeDataSets(self):
num_merged = self._MergeSameId()
fm = self.feed_merger
# now we do all the zone_id and parent_station mapping
# the zone_ids for merged stops can be preserved
for (a, b, merged_stop) in self._merged:
assert a.zone_id == b.zone_id
fm.a_zone_map[a.zone_id] = a.zone_id
fm.b_zone_map[b.zone_id] = b.zone_id
merged_stop.zone_id = a.zone_id
if merged_stop.parent_station:
# Merged stop has a parent. Update it to be the parent it had in b.
parent_in_b = fm.b_schedule.GetStop(b.parent_station)
merged_stop.parent_station = fm.b_merge_map[parent_in_b].stop_id
fm.merged_schedule.AddStopObject(merged_stop)
self._UpdateAndMigrateUnmerged(self._a_not_merged, fm.a_zone_map,
fm.a_merge_map, fm.a_schedule)
self._UpdateAndMigrateUnmerged(self._b_not_merged, fm.b_zone_map,
fm.b_merge_map, fm.b_schedule)
print 'Stops merged: %d of %d, %d' % (
num_merged,
len(fm.a_schedule.GetStopList()),
len(fm.b_schedule.GetStopList()))
return True
def _UpdateAndMigrateUnmerged(self, not_merged_stops, zone_map, merge_map,
schedule):
"""Correct references in migrated unmerged stops and add to merged_schedule.
For stops migrated from one of the input feeds to the output feed update the
parent_station and zone_id references to point to objects in the output
feed. Then add the migrated stop to the new schedule.
Args:
not_merged_stops: list of stops from one input feed that have not been
merged
zone_map: map from zone_id in the input feed to zone_id in the output feed
merge_map: map from Stop objects in the input feed to Stop objects in
the output feed
schedule: the input Schedule object
"""
# for the unmerged stops, we use an already mapped zone_id if possible
# if not, we generate a new one and add it to the map
for stop, migrated_stop in not_merged_stops:
if stop.zone_id in zone_map:
migrated_stop.zone_id = zone_map[stop.zone_id]
else:
migrated_stop.zone_id = self.feed_merger.GenerateId(stop.zone_id)
zone_map[stop.zone_id] = migrated_stop.zone_id
if stop.parent_station:
parent_original = schedule.GetStop(stop.parent_station)
migrated_stop.parent_station = merge_map[parent_original].stop_id
self.feed_merger.merged_schedule.AddStopObject(migrated_stop)
class RouteMerger(DataSetMerger):
"""A DataSetMerger for routes."""
ENTITY_TYPE_NAME = 'route'
FILE_NAME = 'routes.txt'
DATASET_NAME = 'Routes'
def _GetIter(self, schedule):
return schedule.GetRouteList()
def _GetById(self, schedule, route_id):
return schedule.GetRoute(route_id)
def _MergeEntities(self, a, b):
scheme = {'route_short_name': self._MergeIdentical,
'route_long_name': self._MergeIdentical,
'agency_id': self._MergeSameAgency,
'route_type': self._MergeIdentical,
'route_id': self._MergeIdentical,
'route_url': self._MergeOptional,
'route_color': self._MergeOptional,
'route_text_color': self._MergeOptional}
return self._SchemedMerge(scheme, a, b)
def _Migrate(self, entity, schedule, newid):
migrated_route = transitfeed.Route(field_dict=entity)
if newid:
migrated_route.route_id = self.feed_merger.GenerateId(entity.route_id)
if entity.agency_id:
original_agency = schedule.GetAgency(entity.agency_id)
else:
original_agency = schedule.GetDefaultAgency()
migrated_route.agency_id = original_agency._migrated_entity.agency_id
return migrated_route
def _Add(self, a, b, migrated_route):
self.feed_merger.Register(a, b, migrated_route)
self.feed_merger.merged_schedule.AddRouteObject(migrated_route)
def _GetId(self, entity):
return entity.route_id
def MergeDataSets(self):
self._MergeSameId()
return True
class ServicePeriodMerger(DataSetMerger):
"""A DataSetMerger for service periods.
Attributes:
require_disjoint_calendars: A boolean specifying whether to require
disjoint calendars when merging (True) or not (False).
"""
ENTITY_TYPE_NAME = 'service period'
FILE_NAME = 'calendar.txt/calendar_dates.txt'
DATASET_NAME = 'Service Periods'
def __init__(self, feed_merger):
DataSetMerger.__init__(self, feed_merger)
self.require_disjoint_calendars = True
def _ReportSameIdButNotMerged(self, entity_id, reason):
pass
def _GetIter(self, schedule):
return schedule.GetServicePeriodList()
def _GetById(self, schedule, service_id):
return schedule.GetServicePeriod(service_id)
def _MergeEntities(self, a, b):
"""Tries to merge two service periods.
Note: Currently this just raises a MergeError since service periods cannot
be merged.
Args:
a: The first service period.
b: The second service period.
Returns:
The merged service period.
Raises:
MergeError: When the service periods could not be merged.
"""
raise MergeError('Cannot merge service periods')
def _Migrate(self, original_service_period, schedule, newid):
migrated_service_period = transitfeed.ServicePeriod()
migrated_service_period.day_of_week = list(
original_service_period.day_of_week)
migrated_service_period.start_date = original_service_period.start_date
migrated_service_period.end_date = original_service_period.end_date
migrated_service_period.date_exceptions = dict(
original_service_period.date_exceptions)
if newid:
migrated_service_period.service_id = self.feed_merger.GenerateId(
original_service_period.service_id)
else:
migrated_service_period.service_id = original_service_period.service_id
return migrated_service_period
def _Add(self, a, b, migrated_service_period):
self.feed_merger.Register(a, b, migrated_service_period)
self.feed_merger.merged_schedule.AddServicePeriodObject(
migrated_service_period)
def _GetId(self, entity):
return entity.service_id
def MergeDataSets(self):
if self.require_disjoint_calendars and not self.CheckDisjointCalendars():
self.feed_merger.problem_reporter.CalendarsNotDisjoint(self)
return False
self._MergeSameId()
self.feed_merger.problem_reporter.MergeNotImplemented(self)
return True
def DisjoinCalendars(self, cutoff):
"""Forces the old and new calendars to be disjoint about a cutoff date.
This truncates the service periods of the old schedule so that service
stops one day before the given cutoff date and truncates the new schedule
so that service only begins on the cutoff date.
Args:
cutoff: The cutoff date as a string in YYYYMMDD format. The timezone
is the same as used in the calendar.txt file.
"""
def TruncatePeriod(service_period, start, end):
"""Truncate the service period to into the range [start, end].
Args:
service_period: The service period to truncate.
start: The start date as a string in YYYYMMDD format.
end: The end date as a string in YYYYMMDD format.
"""
service_period.start_date = max(service_period.start_date, start)
service_period.end_date = min(service_period.end_date, end)
dates_to_delete = []
for k in service_period.date_exceptions:
if (k < start) or (k > end):
dates_to_delete.append(k)
for k in dates_to_delete:
del service_period.date_exceptions[k]
# find the date one day before cutoff
year = int(cutoff[:4])
month = int(cutoff[4:6])
day = int(cutoff[6:8])
cutoff_date = datetime.date(year, month, day)
one_day_delta = datetime.timedelta(days=1)
before = (cutoff_date - one_day_delta).strftime('%Y%m%d')
for a in self.feed_merger.a_schedule.GetServicePeriodList():
TruncatePeriod(a, 0, before)
for b in self.feed_merger.b_schedule.GetServicePeriodList():
TruncatePeriod(b, cutoff, '9'*8)
def CheckDisjointCalendars(self):
"""Check whether any old service periods intersect with any new ones.
This is a rather coarse check based on
transitfeed.SevicePeriod.GetDateRange.
Returns:
True if the calendars are disjoint or False if not.
"""
# TODO: Do an exact check here.
a_service_periods = self.feed_merger.a_schedule.GetServicePeriodList()
b_service_periods = self.feed_merger.b_schedule.GetServicePeriodList()
for a_service_period in a_service_periods:
a_start, a_end = a_service_period.GetDateRange()
for b_service_period in b_service_periods:
b_start, b_end = b_service_period.GetDateRange()
overlap_start = max(a_start, b_start)
overlap_end = min(a_end, b_end)
if overlap_end >= overlap_start:
return False
return True
def GetMergeStats(self):
return None
class FareMerger(DataSetMerger):
"""A DataSetMerger for fares."""
ENTITY_TYPE_NAME = 'fare attribute'
FILE_NAME = 'fare_attributes.txt'
DATASET_NAME = 'Fares'
def _GetIter(self, schedule):
return schedule.GetFareAttributeList()
def _GetById(self, schedule, fare_id):
return schedule.GetFareAttribute(fare_id)
def _MergeEntities(self, a, b):
"""Merges the fares if all the attributes are the same."""
scheme = {'price': self._MergeIdentical,
'currency_type': self._MergeIdentical,
'payment_method': self._MergeIdentical,
'transfers': self._MergeIdentical,
'transfer_duration': self._MergeIdentical}
return self._SchemedMerge(scheme, a, b)
def _Migrate(self, original_fare, schedule, newid):
migrated_fare = transitfeed.FareAttribute(
field_dict=original_fare)
if newid:
migrated_fare.fare_id = self.feed_merger.GenerateId(
original_fare.fare_id)
return migrated_fare
def _Add(self, a, b, migrated_fare):
self.feed_merger.Register(a, b, migrated_fare)
self.feed_merger.merged_schedule.AddFareAttributeObject(migrated_fare)
def _GetId(self, fare):
return fare.fare_id
def MergeDataSets(self):
num_merged = self._MergeSameId()
print 'Fares merged: %d of %d, %d' % (
num_merged,
len(self.feed_merger.a_schedule.GetFareAttributeList()),
len(self.feed_merger.b_schedule.GetFareAttributeList()))
return True
class TransferMerger(DataSetMerger):
"""A DataSetMerger for transfers.
Copy every transfer from the a/old and b/new schedules into the merged
schedule, translating from_stop_id and to_stop_id. Where a transfer ID is
found in both source schedules only the one from the b/new schedule is
migrated.
Only one transfer is processed per ID. Duplicates within a schedule are
ignored."""
ENTITY_TYPE_NAME = 'transfer'
FILE_NAME = 'transfers.txt'
DATASET_NAME = 'Transfers'
def _GetIter(self, schedule):
return schedule.GetTransferIter()
def _GetId(self, transfer):
return transfer._ID()
def _Migrate(self, original_transfer, schedule):
# Make a copy of the original and then fix the stop_id references.
migrated_transfer = transitfeed.Transfer(field_dict=original_transfer)
if original_transfer.from_stop_id:
migrated_transfer.from_stop_id = schedule.GetStop(
original_transfer.from_stop_id)._migrated_entity.stop_id
if migrated_transfer.to_stop_id:
migrated_transfer.to_stop_id = schedule.GetStop(
original_transfer.to_stop_id)._migrated_entity.stop_id
return migrated_transfer
def _Add(self, a, b, migrated_transfer):
self.feed_merger.Register(a, b, migrated_transfer)
self.feed_merger.merged_schedule.AddTransferObject(migrated_transfer)
def MergeDataSets(self):
# If both schedules contain rows with equivalent from_stop_id and
# to_stop_id but different transfer_type or min_transfer_time only the
# transfer from b will be in the output.
self._MergeByIdKeepNew()
print 'Transfers merged: %d of %d, %d' % (
self._num_merged,
# http://mail.python.org/pipermail/baypiggies/2008-August/003817.html
# claims this is a good way to find number of items in an iterable.
sum(1 for _ in self.feed_merger.a_schedule.GetTransferIter()),
sum(1 for _ in self.feed_merger.b_schedule.GetTransferIter()))
return True
class ShapeMerger(DataSetMerger):
"""A DataSetMerger for shapes.
In this implementation, merging shapes means just taking the new shape.
The only conditions for a merge are that the shape_ids are the same and
the endpoints of the old and new shapes are no further than
largest_shape_distance apart.
Attributes:
largest_shape_distance: The largest distance between the endpoints of two
shapes allowed for them to be merged in metres.
"""
ENTITY_TYPE_NAME = 'shape'
FILE_NAME = 'shapes.txt'
DATASET_NAME = 'Shapes'
largest_shape_distance = 10.0
def SetLargestShapeDistance(self, distance):
"""Sets largest_shape_distance."""
self.largest_shape_distance = distance
def _GetIter(self, schedule):
return schedule.GetShapeList()
def _GetById(self, schedule, shape_id):
return schedule.GetShape(shape_id)
def _MergeEntities(self, a, b):
"""Merges the shapes by taking the new shape.
Args:
a: The first transitfeed.Shape instance.
b: The second transitfeed.Shape instance.
Returns:
The merged shape.
Raises:
MergeError: If the ids are different or if the endpoints are further
than largest_shape_distance apart.
"""
if a.shape_id != b.shape_id:
raise MergeError('shape_id must be the same')
distance = max(ApproximateDistanceBetweenPoints(a.points[0][:2],
b.points[0][:2]),
ApproximateDistanceBetweenPoints(a.points[-1][:2],
b.points[-1][:2]))
if distance > self.largest_shape_distance:
raise MergeError('The shape endpoints are too far away: %.1fm '
'(largest_shape_distance is %.1fm)' %
(distance, self.largest_shape_distance))
return self._Migrate(b, self.feed_merger.b_schedule, False)
def _Migrate(self, original_shape, schedule, newid):
migrated_shape = transitfeed.Shape(original_shape.shape_id)
if newid:
migrated_shape.shape_id = self.feed_merger.GenerateId(
original_shape.shape_id)
for (lat, lon, dist) in original_shape.points:
migrated_shape.AddPoint(lat=lat, lon=lon, distance=dist)
return migrated_shape
def _Add(self, a, b, migrated_shape):
self.feed_merger.Register(a, b, migrated_shape)
self.feed_merger.merged_schedule.AddShapeObject(migrated_shape)
def _GetId(self, shape):
return shape.shape_id
def MergeDataSets(self):
self._MergeSameId()
return True
class TripMerger(DataSetMerger):
"""A DataSetMerger for trips.
This implementation makes no attempt to merge trips, it simply migrates
them all to the merged feed.
"""
ENTITY_TYPE_NAME = 'trip'
FILE_NAME = 'trips.txt'
DATASET_NAME = 'Trips'
def _ReportSameIdButNotMerged(self, trip_id, reason):
pass
def _GetIter(self, schedule):
return schedule.GetTripList()
def _GetById(self, schedule, trip_id):
return schedule.GetTrip(trip_id)
def _MergeEntities(self, a, b):
"""Raises a MergeError because currently trips cannot be merged."""
raise MergeError('Cannot merge trips')
def _Migrate(self, original_trip, schedule, newid):
migrated_trip = transitfeed.Trip(field_dict=original_trip)
# Make new trip_id first. AddTripObject reports a problem if it conflicts
# with an existing id.
if newid:
migrated_trip.trip_id = self.feed_merger.GenerateId(
original_trip.trip_id)
migrated_trip.original_trip_id = original_trip.trip_id
# Need to add trip to schedule before copying stoptimes
self.feed_merger.merged_schedule.AddTripObject(migrated_trip,
validate=False)
if schedule == self.feed_merger.a_schedule:
merge_map = self.feed_merger.a_merge_map
else:
merge_map = self.feed_merger.b_merge_map
original_route = schedule.GetRoute(original_trip.route_id)
migrated_trip.route_id = merge_map[original_route].route_id
original_service_period = schedule.GetServicePeriod(
original_trip.service_id)
migrated_trip.service_id = merge_map[original_service_period].service_id
if original_trip.block_id:
migrated_trip.block_id = '%s_%s' % (
self.feed_merger.GetScheduleName(schedule),
original_trip.block_id)
if original_trip.shape_id:
original_shape = schedule.GetShape(original_trip.shape_id)
migrated_trip.shape_id = merge_map[original_shape].shape_id
for original_stop_time in original_trip.GetStopTimes():
migrated_stop_time = transitfeed.StopTime(
None,
merge_map[original_stop_time.stop],
original_stop_time.arrival_time,
original_stop_time.departure_time,
original_stop_time.stop_headsign,
original_stop_time.pickup_type,
original_stop_time.drop_off_type,
original_stop_time.shape_dist_traveled,
original_stop_time.arrival_secs,
original_stop_time.departure_secs)
migrated_trip.AddStopTimeObject(migrated_stop_time)
for headway_period in original_trip.GetFrequencyTuples():
migrated_trip.AddFrequency(*headway_period)
return migrated_trip
def _Add(self, a, b, migrated_trip):
# Validate now, since it wasn't done in _Migrate
migrated_trip.Validate(self.feed_merger.merged_schedule.problem_reporter)
self.feed_merger.Register(a, b, migrated_trip)
def _GetId(self, trip):
return trip.trip_id
def MergeDataSets(self):
self._MergeSameId()
self.feed_merger.problem_reporter.MergeNotImplemented(self)
return True
def GetMergeStats(self):
return None
class FareRuleMerger(DataSetMerger):
"""A DataSetMerger for fare rules."""
ENTITY_TYPE_NAME = 'fare rule'
FILE_NAME = 'fare_rules.txt'
DATASET_NAME = 'Fare Rules'
def MergeDataSets(self):
"""Merge the fare rule datasets.
The fare rules are first migrated. Merging is done by removing any
duplicate rules.
Returns:
True since fare rules can always be merged.
"""
rules = set()
for (schedule, merge_map, zone_map) in ([self.feed_merger.a_schedule,
self.feed_merger.a_merge_map,
self.feed_merger.a_zone_map],
[self.feed_merger.b_schedule,
self.feed_merger.b_merge_map,
self.feed_merger.b_zone_map]):
for fare in schedule.GetFareAttributeList():
for fare_rule in fare.GetFareRuleList():
fare_id = merge_map[
schedule.GetFareAttribute(fare_rule.fare_id)].fare_id
route_id = (fare_rule.route_id and
merge_map[schedule.GetRoute(fare_rule.route_id)].route_id)
origin_id = (fare_rule.origin_id and
zone_map[fare_rule.origin_id])
destination_id = (fare_rule.destination_id and
zone_map[fare_rule.destination_id])
contains_id = (fare_rule.contains_id and
zone_map[fare_rule.contains_id])
rules.add((fare_id, route_id, origin_id, destination_id,
contains_id))
for fare_rule_tuple in rules:
migrated_fare_rule = transitfeed.FareRule(*fare_rule_tuple)
self.feed_merger.merged_schedule.AddFareRuleObject(migrated_fare_rule)
if rules:
self.feed_merger.problem_reporter.FareRulesBroken(self)
print 'Fare Rules: union has %d fare rules' % len(rules)
return True
def GetMergeStats(self):
return None
class FeedMerger(object):
"""A class for merging two whole feeds.
This class takes two instances of transitfeed.Schedule and uses
DataSetMerger instances to merge the feeds and produce the resultant
merged feed.
Attributes:
a_schedule: The old transitfeed.Schedule instance.
b_schedule: The new transitfeed.Schedule instance.
problem_reporter: The merge problem reporter.
merged_schedule: The merged transitfeed.Schedule instance.
a_merge_map: A map from old entities to merged entities.
b_merge_map: A map from new entities to merged entities.
a_zone_map: A map from old zone ids to merged zone ids.
b_zone_map: A map from new zone ids to merged zone ids.
"""
def __init__(self, a_schedule, b_schedule, merged_schedule,
problem_reporter):
"""Initialise the merger.
Once this initialiser has been called, a_schedule and b_schedule should
not be modified.
Args:
a_schedule: The old schedule, an instance of transitfeed.Schedule.
b_schedule: The new schedule, an instance of transitfeed.Schedule.
problem_reporter: The problem reporter, an instance of
transitfeed.ProblemReporter.
"""
self.a_schedule = a_schedule
self.b_schedule = b_schedule
self.merged_schedule = merged_schedule
self.a_merge_map = {}
self.b_merge_map = {}
self.a_zone_map = {}
self.b_zone_map = {}
self._mergers = []
self._idnum = max(self._FindLargestIdPostfixNumber(self.a_schedule),
self._FindLargestIdPostfixNumber(self.b_schedule))
self.problem_reporter = problem_reporter
def _FindLargestIdPostfixNumber(self, schedule):
"""Finds the largest integer used as the ending of an id in the schedule.
Args:
schedule: The schedule to check.
Returns:
The maximum integer used as an ending for an id.
"""
postfix_number_re = re.compile('(\d+)$')
def ExtractPostfixNumber(entity_id):
"""Try to extract an integer from the end of entity_id.
If entity_id is None or if there is no integer ending the id, zero is
returned.
Args:
entity_id: An id string or None.
Returns:
An integer ending the entity_id or zero.
"""
if entity_id is None:
return 0
match = postfix_number_re.search(entity_id)
if match is not None:
return int(match.group(1))
else:
return 0
id_data_sets = {'agency_id': schedule.GetAgencyList(),
'stop_id': schedule.GetStopList(),
'route_id': schedule.GetRouteList(),
'trip_id': schedule.GetTripList(),
'service_id': schedule.GetServicePeriodList(),
'fare_id': schedule.GetFareAttributeList(),
'shape_id': schedule.GetShapeList()}
max_postfix_number = 0
for id_name, entity_list in id_data_sets.items():
for entity in entity_list:
entity_id = getattr(entity, id_name)
postfix_number = ExtractPostfixNumber(entity_id)
max_postfix_number = max(max_postfix_number, postfix_number)
return max_postfix_number
def GetScheduleName(self, schedule):
"""Returns a single letter identifier for the schedule.
This only works for the old and new schedules which return 'a' and 'b'
respectively. The purpose of such identifiers is for generating ids.
Args:
schedule: The transitfeed.Schedule instance.
Returns:
The schedule identifier.
Raises:
KeyError: schedule is not the old or new schedule.
"""
return {self.a_schedule: 'a', self.b_schedule: 'b'}[schedule]
def GenerateId(self, entity_id=None):
"""Generate a unique id based on the given id.
This is done by appending a counter which is then incremented. The
counter is initialised at the maximum number used as an ending for
any id in the old and new schedules.
Args:
entity_id: The base id string. This is allowed to be None.
Returns:
The generated id.
"""
self._idnum += 1
if entity_id:
return '%s_merged_%d' % (entity_id, self._idnum)
else:
return 'merged_%d' % self._idnum
def Register(self, a, b, migrated_entity):
"""Registers a merge mapping.
If a and b are both not None, this means that entities a and b were merged
to produce migrated_entity. If one of a or b are not None, then it means
it was not merged but simply migrated.
The effect of a call to register is to update a_merge_map and b_merge_map
according to the merge. Also the private attributes _migrated_entity of a
and b are set to migrated_entity.
Args:
a: The entity from the old feed or None.
b: The entity from the new feed or None.
migrated_entity: The migrated entity.
"""
# There are a few places where code needs to find the corresponding
# migrated entity of an object without knowing in which original schedule
# the entity started. With a_merge_map and b_merge_map both have to be
# checked. Use of the _migrated_entity attribute allows the migrated entity
# to be directly found without the schedule. The merge maps also require
# that all objects be hashable. GenericGTFSObject is at the moment, but
# this is a bug. See comment in transitfeed.GenericGTFSObject.
if a is not None:
self.a_merge_map[a] = migrated_entity
a._migrated_entity = migrated_entity
if b is not None:
self.b_merge_map[b] = migrated_entity
b._migrated_entity = migrated_entity
def AddMerger(self, merger):
"""Add a DataSetMerger to be run by Merge().
Args:
merger: The DataSetMerger instance.
"""
self._mergers.append(merger)
def AddDefaultMergers(self):
"""Adds the default DataSetMergers defined in this module."""
self.AddMerger(AgencyMerger(self))
self.AddMerger(StopMerger(self))
self.AddMerger(RouteMerger(self))
self.AddMerger(ServicePeriodMerger(self))
self.AddMerger(FareMerger(self))
self.AddMerger(ShapeMerger(self))
self.AddMerger(TripMerger(self))
self.AddMerger(FareRuleMerger(self))
def GetMerger(self, cls):
"""Looks for an added DataSetMerger derived from the given class.
Args:
cls: A class derived from DataSetMerger.
Returns:
The matching DataSetMerger instance.
Raises:
LookupError: No matching DataSetMerger has been added.
"""
for merger in self._mergers:
if isinstance(merger, cls):
return merger
raise LookupError('No matching DataSetMerger found')
def GetMergerList(self):
"""Returns the list of DataSetMerger instances that have been added."""
return self._mergers
def MergeSchedules(self):
"""Merge the schedules.
This is done by running the DataSetMergers that have been added with
AddMerger() in the order that they were added.
Returns:
True if the merge was successful.
"""
for merger in self._mergers:
if not merger.MergeDataSets():
return False
return True
def GetMergedSchedule(self):
"""Returns the merged schedule.
This will be empty before MergeSchedules() is called.
Returns:
The merged schedule.
"""
return self.merged_schedule
def main():
"""Run the merge driver program."""
usage = \
"""%prog [options] <input GTFS a.zip> <input GTFS b.zip> <output GTFS.zip>
Merges <input GTFS a.zip> and <input GTFS b.zip> into a new GTFS file
<output GTFS.zip>.
For more information see
https://github.com/google/transitfeed/wiki/Merge
"""
parser = util.OptionParserLongError(
usage=usage, version='%prog '+transitfeed.__version__)
parser.add_option('--cutoff_date',
dest='cutoff_date',
default=None,
help='a transition date from the old feed to the new '
'feed in the format YYYYMMDD')
parser.add_option('--largest_stop_distance',
dest='largest_stop_distance',
default=StopMerger.largest_stop_distance,
help='the furthest distance two stops can be apart and '
'still be merged, in metres')
parser.add_option('--largest_shape_distance',
dest='largest_shape_distance',
default=ShapeMerger.largest_shape_distance,
help='the furthest distance the endpoints of two shapes '
'can be apart and the shape still be merged, in metres')
parser.add_option('--html_output_path',
dest='html_output_path',
default='merge-results.html',
help='write the html output to this file')
parser.add_option('--no_browser',
dest='no_browser',
action='store_true',
help='prevents the merge results from being opened in a '
'browser')
parser.add_option('--latest_version', dest='latest_version',
action='store',
help='a version number such as 1.2.1 or None to get the '
'latest version from the project page. Output a warning if '
'merge.py is older than this version.')
parser.add_option('-m', '--memory_db', dest='memory_db', action='store_true',
help='Use in-memory sqlite db instead of a temporary file. '
'It is faster but uses more RAM.')
parser.set_defaults(memory_db=False)
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error('You did not provide all required command line arguments.')
old_feed_path = os.path.abspath(args[0])
new_feed_path = os.path.abspath(args[1])
merged_feed_path = os.path.abspath(args[2])
if old_feed_path.find("IWantMyCrash") != -1:
# See tests/testmerge.py
raise Exception('For testing the merge crash handler.')
a_schedule = LoadWithoutErrors(old_feed_path, options.memory_db)
b_schedule = LoadWithoutErrors(new_feed_path, options.memory_db)
merged_schedule = transitfeed.Schedule(memory_db=options.memory_db)
accumulator = HTMLProblemAccumulator()
problem_reporter = MergeProblemReporter(accumulator)
util.CheckVersion(problem_reporter, options.latest_version)
feed_merger = FeedMerger(a_schedule, b_schedule, merged_schedule,
problem_reporter)
feed_merger.AddDefaultMergers()
feed_merger.GetMerger(StopMerger).SetLargestStopDistance(float(
options.largest_stop_distance))
feed_merger.GetMerger(ShapeMerger).SetLargestShapeDistance(float(
options.largest_shape_distance))
if options.cutoff_date is not None:
service_period_merger = feed_merger.GetMerger(ServicePeriodMerger)
service_period_merger.DisjoinCalendars(options.cutoff_date)
if feed_merger.MergeSchedules():
feed_merger.GetMergedSchedule().WriteGoogleTransitFeed(merged_feed_path)
else:
merged_feed_path = None
output_file = file(options.html_output_path, 'w')
accumulator.WriteOutput(output_file, feed_merger,
old_feed_path, new_feed_path, merged_feed_path)
output_file.close()
if not options.no_browser:
webbrowser.open('file://%s' % os.path.abspath(options.html_output_path))
if __name__ == '__main__':
util.RunWithCrashHandler(main)
| apache-2.0 |
Feandil/hardened-refpolicy | support/pyplate.py | 8 | 10631 | #!/usr/bin/env python3
"""PyPlate : a simple Python-based templating program
PyPlate parses a file and replaces directives (in double square brackets [[ ... ]])
by various means using a given dictionary of variables. Arbitrary Python code
can be run inside many of the directives, making this system highly flexible.
Usage:
# Load and parse template file
template = pyplate.Template("output") (filename or string)
# Execute it with a dictionary of variables
template.execute_file(output_stream, locals())
PyPlate defines the following directives:
[[...]] evaluate the arbitrary Python expression and insert the
result into the output
[[# ... #]] comment.
[[exec ...]] execute arbitrary Python code in the sandbox namespace
[[if ...]] conditional expressions with usual Python semantics
[[elif ...]]
[[else]]
[[end]]
[[for ... in ...]] for-loop with usual Python semantics
[[end]]
[[def ...(...)]] define a "function" out of other templating elements
[[end]]
[[call ...]] call a templating function (not a regular Python function)
"""
#
# Copyright (C) 2002 Michael Droettboom
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import sys, re, io
re_directive = re.compile(r"\[\[(.*)\]\]")
re_for_loop = re.compile(r"for (.*) in (.*)")
re_if = re.compile(r"if (.*)")
re_elif = re.compile(r"elif (.*)")
re_def = re.compile(r"def (.*?)\((.*)\)")
re_call = re.compile(r"call (.*?)\((.*)\)")
re_exec = re.compile(r"exec (.*)")
re_comment = re.compile(r"#(.*)#")
############################################################
# Template parser
class ParserException(Exception):
def __init__(self, lineno, s):
Exception.__init__(self, "line %d: %s" % (lineno, s))
class Template:
def __init__(self, filename=None):
if filename != None:
try:
self.parse_file(filename)
except:
self.parse_string(filename)
def parse_file(self, filename):
file = open(filename, 'r')
self.parse(file)
file.close()
def parse_string(self, template):
if sys.version_info >= (3,0):
file = io.StringIO(template)
else:
file = io.StringIO(template.decode('utf-8'))
self.parse(file)
file.close()
def parse(self, file):
self.file = file
self.line = self.file.read()
self.lineno = 0
self.functions = {}
self.tree = TopLevelTemplateNode(self)
def parser_get(self):
if self.line == '':
return None
return self.line
def parser_eat(self, chars):
self.lineno = self.lineno + self.line[:chars].count("\n")
self.line = self.line[chars:]
def parser_exception(self, s):
raise ParserException(self.lineno, s)
def execute_file(self, filename, data):
file = open(filename, 'w')
self.execute(file, data)
file.close()
def execute_string(self, data):
s = io.StringIO()
self.execute(s, data)
return s.getvalue()
def execute_stdout(self, data):
self.execute(sys.stdout, data)
def execute(self, stream=sys.stdout, data={}):
self.tree.execute(stream, data)
def __repr__(self):
return repr(self.tree)
############################################################
# NODES
class TemplateNode:
def __init__(self, parent, s):
self.parent = parent
self.s = s
self.node_list = []
while 1:
new_node = TemplateNodeFactory(parent)
if self.add_node(new_node):
break
def add_node(self, node):
if node == 'end':
return 1
elif node != None:
self.node_list.append(node)
else:
raise self.parent.parser_exception(
"[[%s]] does not have a matching [[end]]" % self.s)
def execute(self, stream, data):
for node in self.node_list:
node.execute(stream, data)
def __repr__(self):
r = "<" + self.__class__.__name__ + " "
for i in self.node_list:
r = r + repr(i)
r = r + ">"
return r
class TopLevelTemplateNode(TemplateNode):
def __init__(self, parent):
TemplateNode.__init__(self, parent, '')
def add_node(self, node):
if node != None:
self.node_list.append(node)
else:
return 1
class ForTemplateNode(TemplateNode):
def __init__(self, parent, s):
TemplateNode.__init__(self, parent, s)
match = re_for_loop.match(s)
if match == None:
raise self.parent.parser_exception(
"[[%s]] is not a valid for-loop expression" % self.s)
else:
self.vars_temp = match.group(1).split(",")
self.vars = []
for v in self.vars_temp:
self.vars.append(v.strip())
#print self.vars
self.expression = match.group(2)
def execute(self, stream, data):
remember_vars = {}
for var in self.vars:
if var in data:
remember_vars[var] = data[var]
for list in eval(self.expression, globals(), data):
if is_sequence(list):
for index, value in enumerate(list):
data[self.vars[index]] = value
else:
data[self.vars[0]] = list
TemplateNode.execute(self, stream, data)
for key, value in remember_vars.items():
data[key] = value
class IfTemplateNode(TemplateNode):
def __init__(self, parent, s):
self.else_node = None
TemplateNode.__init__(self, parent, s)
match = re_if.match(s)
if match == None:
raise self.parent.parser_exception(
"[[%s]] is not a valid if expression" % self.s)
else:
self.expression = match.group(1)
def add_node(self, node):
if node == 'end':
return 1
elif isinstance(node, ElseTemplateNode):
self.else_node = node
return 1
elif isinstance(node, ElifTemplateNode):
self.else_node = node
return 1
elif node != None:
self.node_list.append(node)
else:
raise self.parent.parser_exception(
"[[%s]] does not have a matching [[end]]" % self.s)
def execute(self, stream, data):
if eval(self.expression, globals(), data):
TemplateNode.execute(self, stream, data)
elif self.else_node != None:
self.else_node.execute(stream, data)
class ElifTemplateNode(IfTemplateNode):
def __init__(self, parent, s):
self.else_node = None
TemplateNode.__init__(self, parent, s)
match = re_elif.match(s)
if match == None:
self.parent.parser_exception(
"[[%s]] is not a valid elif expression" % self.s)
else:
self.expression = match.group(1)
class ElseTemplateNode(TemplateNode):
pass
class FunctionTemplateNode(TemplateNode):
def __init__(self, parent, s):
TemplateNode.__init__(self, parent, s)
match = re_def.match(s)
if match == None:
self.parent.parser_exception(
"[[%s]] is not a valid function definition" % self.s)
self.function_name = match.group(1)
self.vars_temp = match.group(2).split(",")
self.vars = []
for v in self.vars_temp:
self.vars.append(v.strip())
#print self.vars
self.parent.functions[self.function_name] = self
def execute(self, stream, data):
pass
def call(self, args, stream, data):
remember_vars = {}
for index, var in enumerate(self.vars):
if var in data:
remember_vars[var] = data[var]
data[var] = args[index]
TemplateNode.execute(self, stream, data)
for key, value in remember_vars.items():
data[key] = value
class LeafTemplateNode(TemplateNode):
def __init__(self, parent, s):
self.parent = parent
self.s = s
def execute(self, stream, data):
stream.write(self.s)
def __repr__(self):
return "<" + self.__class__.__name__ + ">"
class CommentTemplateNode(LeafTemplateNode):
def execute(self, stream, data):
pass
class ExpressionTemplateNode(LeafTemplateNode):
def execute(self, stream, data):
if sys.version_info >= (3,0):
stream.write(str(eval(self.s, globals(), data)))
else:
stream.write(str(eval(self.s, globals(), data)).decode('utf-8'))
class ExecTemplateNode(LeafTemplateNode):
def __init__(self, parent, s):
LeafTemplateNode.__init__(self, parent, s)
match = re_exec.match(s)
if match == None:
self.parent.parser_exception(
"[[%s]] is not a valid statement" % self.s)
self.s = match.group(1)
def execute(self, stream, data):
exec(self.s, globals(), data)
class CallTemplateNode(LeafTemplateNode):
def __init__(self, parent, s):
LeafTemplateNode.__init__(self, parent, s)
match = re_call.match(s)
if match == None:
self.parent.parser_exception(
"[[%s]] is not a valid function call" % self.s)
self.function_name = match.group(1)
self.vars = "(" + match.group(2).strip() + ",)"
def execute(self, stream, data):
self.parent.functions[self.function_name].call(
eval(self.vars, globals(), data), stream, data)
############################################################
# Node factory
template_factory_type_map = {
'if' : IfTemplateNode,
'for' : ForTemplateNode,
'elif' : ElifTemplateNode,
'else' : ElseTemplateNode,
'def' : FunctionTemplateNode,
'call' : CallTemplateNode,
'exec' : ExecTemplateNode }
template_factory_types = template_factory_type_map.keys()
def TemplateNodeFactory(parent):
src = parent.parser_get()
if src == None:
return None
match = re_directive.search(src)
if match == None:
parent.parser_eat(len(src))
return LeafTemplateNode(parent, src)
elif src == '' or match.start() != 0:
parent.parser_eat(match.start())
return LeafTemplateNode(parent, src[:match.start()])
else:
directive = match.group()[2:-2].strip()
parent.parser_eat(match.end())
if directive == 'end':
return 'end'
elif re_comment.match(directive):
return CommentTemplateNode(parent, directive)
else:
for i in template_factory_types:
if directive[0:len(i)] == i:
return template_factory_type_map[i](parent, directive)
return ExpressionTemplateNode(parent, directive)
def is_sequence(object):
try:
object[0:0]
except:
return False
else:
return True
| gpl-2.0 |
lothian/psi4 | psi4/driver/p4util/fcidump.py | 6 | 17412 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with utility function for dumping the Hamiltonian to file in FCIDUMP format."""
import numpy as np
from psi4.driver import psifiles as psif
from psi4.driver.p4util.testing import compare_integers, compare_values, compare_recursive
from psi4.driver.procrouting.proc_util import check_iwl_file_from_scf_type
from psi4 import core
from .exceptions import ValidationError, TestComparisonError
def fcidump(wfn, fname='INTDUMP', oe_ints=None):
"""Save integrals to file in FCIDUMP format as defined in Comp. Phys. Commun. 54 75 (1989)
Additional one-electron integrals, including orbital energies, can also be saved.
This latter format can be used with the HANDE QMC code but is not standard.
:returns: None
:raises: ValidationError when SCF wavefunction is not RHF
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, basis, orbitals from which to generate cube files
:param fname: name of the integrals file, defaults to INTDUMP
:param oe_ints: list of additional one-electron integrals to save to file. So far only EIGENVALUES is a valid option.
:examples:
>>> # [1] Save one- and two-electron integrals to standard FCIDUMP format
>>> E, wfn = energy('scf', return_wfn=True)
>>> fcidump(wfn)
>>> # [2] Save orbital energies, one- and two-electron integrals.
>>> E, wfn = energy('scf', return_wfn=True)
>>> fcidump(wfn, oe_ints=['EIGENVALUES'])
"""
# Get some options
reference = core.get_option('SCF', 'REFERENCE')
ints_tolerance = core.get_global_option('INTS_TOLERANCE')
# Some sanity checks
if reference not in ['RHF', 'UHF']:
raise ValidationError('FCIDUMP not implemented for {} references\n'.format(reference))
if oe_ints is None:
oe_ints = []
molecule = wfn.molecule()
docc = wfn.doccpi()
frzcpi = wfn.frzcpi()
frzvpi = wfn.frzvpi()
active_docc = docc - frzcpi
active_socc = wfn.soccpi()
active_mopi = wfn.nmopi() - frzcpi - frzvpi
nbf = active_mopi.sum() if wfn.same_a_b_orbs() else 2 * active_mopi.sum()
nirrep = wfn.nirrep()
nelectron = 2 * active_docc.sum() + active_socc.sum()
irrep_map = _irrep_map(wfn)
wfn_irrep = 0
for h, n_socc in enumerate(active_socc):
if n_socc % 2 == 1:
wfn_irrep ^= h
core.print_out('Writing integrals in FCIDUMP format to ' + fname + '\n')
# Generate FCIDUMP header
header = '&FCI\n'
header += 'NORB={:d},\n'.format(nbf)
header += 'NELEC={:d},\n'.format(nelectron)
header += 'MS2={:d},\n'.format(wfn.nalpha() - wfn.nbeta())
header += 'UHF=.{}.,\n'.format(not wfn.same_a_b_orbs()).upper()
orbsym = ''
for h in range(active_mopi.n()):
for n in range(frzcpi[h], frzcpi[h] + active_mopi[h]):
orbsym += '{:d},'.format(irrep_map[h])
if not wfn.same_a_b_orbs():
orbsym += '{:d},'.format(irrep_map[h])
header += 'ORBSYM={}\n'.format(orbsym)
header += 'ISYM={:d},\n'.format(irrep_map[wfn_irrep])
header += '&END\n'
with open(fname, 'w') as intdump:
intdump.write(header)
# Get an IntegralTransform object
check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), wfn)
spaces = [core.MOSpace.all()]
trans_type = core.IntegralTransform.TransformationType.Restricted
if not wfn.same_a_b_orbs():
trans_type = core.IntegralTransform.TransformationType.Unrestricted
ints = core.IntegralTransform(wfn, spaces, trans_type)
ints.transform_tei(core.MOSpace.all(), core.MOSpace.all(), core.MOSpace.all(), core.MOSpace.all())
core.print_out('Integral transformation complete!\n')
DPD_info = {'instance_id': ints.get_dpd_id(), 'alpha_MO': ints.DPD_ID('[A>=A]+'), 'beta_MO': 0}
if not wfn.same_a_b_orbs():
DPD_info['beta_MO'] = ints.DPD_ID("[a>=a]+")
# Write TEI to fname in FCIDUMP format
core.fcidump_tei_helper(nirrep, wfn.same_a_b_orbs(), DPD_info, ints_tolerance, fname)
# Read-in OEI and write them to fname in FCIDUMP format
# Indexing functions to translate from zero-based (C and Python) to
# one-based (Fortran)
mo_idx = lambda x: x + 1
alpha_mo_idx = lambda x: 2 * x + 1
beta_mo_idx = lambda x: 2 * (x + 1)
with open(fname, 'a') as intdump:
core.print_out('Writing frozen core operator in FCIDUMP format to ' + fname + '\n')
if reference == 'RHF':
PSIF_MO_FZC = 'MO-basis Frozen-Core Operator'
moH = core.Matrix(PSIF_MO_FZC, wfn.nmopi(), wfn.nmopi())
moH.load(core.IO.shared_object(), psif.PSIF_OEI)
mo_slice = core.Slice(frzcpi, active_mopi)
MO_FZC = moH.get_block(mo_slice, mo_slice)
offset = 0
for h, block in enumerate(MO_FZC.nph):
il = np.tril_indices(block.shape[0])
for index, x in np.ndenumerate(block[il]):
row = mo_idx(il[0][index] + offset)
col = mo_idx(il[1][index] + offset)
if (abs(x) > ints_tolerance):
intdump.write('{:29.20E} {:4d} {:4d} {:4d} {:4d}\n'.format(x, row, col, 0, 0))
offset += block.shape[0]
# Additional one-electron integrals as requested in oe_ints
# Orbital energies
core.print_out('Writing orbital energies in FCIDUMP format to ' + fname + '\n')
if 'EIGENVALUES' in oe_ints:
eigs_dump = write_eigenvalues(wfn.epsilon_a().get_block(mo_slice).to_array(), mo_idx)
intdump.write(eigs_dump)
else:
PSIF_MO_A_FZC = 'MO-basis Alpha Frozen-Core Oper'
moH_A = core.Matrix(PSIF_MO_A_FZC, wfn.nmopi(), wfn.nmopi())
moH_A.load(core.IO.shared_object(), psif.PSIF_OEI)
mo_slice = core.Slice(frzcpi, active_mopi)
MO_FZC_A = moH_A.get_block(mo_slice, mo_slice)
offset = 0
for h, block in enumerate(MO_FZC_A.nph):
il = np.tril_indices(block.shape[0])
for index, x in np.ndenumerate(block[il]):
row = alpha_mo_idx(il[0][index] + offset)
col = alpha_mo_idx(il[1][index] + offset)
if (abs(x) > ints_tolerance):
intdump.write('{:29.20E} {:4d} {:4d} {:4d} {:4d}\n'.format(x, row, col, 0, 0))
offset += block.shape[0]
PSIF_MO_B_FZC = 'MO-basis Beta Frozen-Core Oper'
moH_B = core.Matrix(PSIF_MO_B_FZC, wfn.nmopi(), wfn.nmopi())
moH_B.load(core.IO.shared_object(), psif.PSIF_OEI)
mo_slice = core.Slice(frzcpi, active_mopi)
MO_FZC_B = moH_B.get_block(mo_slice, mo_slice)
offset = 0
for h, block in enumerate(MO_FZC_B.nph):
il = np.tril_indices(block.shape[0])
for index, x in np.ndenumerate(block[il]):
row = beta_mo_idx(il[0][index] + offset)
col = beta_mo_idx(il[1][index] + offset)
if (abs(x) > ints_tolerance):
intdump.write('{:29.20E} {:4d} {:4d} {:4d} {:4d}\n'.format(x, row, col, 0, 0))
offset += block.shape[0]
# Additional one-electron integrals as requested in oe_ints
# Orbital energies
core.print_out('Writing orbital energies in FCIDUMP format to ' + fname + '\n')
if 'EIGENVALUES' in oe_ints:
alpha_eigs_dump = write_eigenvalues(wfn.epsilon_a().get_block(mo_slice).to_array(), alpha_mo_idx)
beta_eigs_dump = write_eigenvalues(wfn.epsilon_b().get_block(mo_slice).to_array(), beta_mo_idx)
intdump.write(alpha_eigs_dump + beta_eigs_dump)
# Dipole integrals
#core.print_out('Writing dipole moment OEI in FCIDUMP format to ' + fname + '\n')
# Traceless quadrupole integrals
#core.print_out('Writing traceless quadrupole moment OEI in FCIDUMP format to ' + fname + '\n')
# Frozen core + nuclear repulsion energy
core.print_out('Writing frozen core + nuclear repulsion energy in FCIDUMP format to ' + fname + '\n')
e_fzc = ints.get_frozen_core_energy()
e_nuc = molecule.nuclear_repulsion_energy(wfn.get_dipole_field_strength())
intdump.write('{: 29.20E} {:4d} {:4d} {:4d} {:4d}\n'.format(e_fzc + e_nuc, 0, 0, 0, 0))
core.print_out('Done generating {} with integrals in FCIDUMP format.\n'.format(fname))
def write_eigenvalues(eigs, mo_idx):
"""Prepare multi-line string with one-particle eigenvalues to be written to the FCIDUMP file.
"""
eigs_dump = ''
iorb = 0
for h, block in enumerate(eigs):
for idx, x in np.ndenumerate(block):
eigs_dump += '{: 29.20E} {:4d} {:4d} {:4d} {:4d}\n'.format(x, mo_idx(iorb), 0, 0, 0)
iorb += 1
return eigs_dump
def _irrep_map(wfn):
"""Returns an array of irrep indices that maps from Psi4's ordering convention to the standard FCIDUMP convention.
"""
symm = wfn.molecule().point_group().symbol()
psi2dump = {'c1' : [1], # A
'ci' : [1,2], # Ag Au
'c2' : [1,2], # A B
'cs' : [1,2], # A' A"
'd2' : [1,4,3,2], # A B1 B2 B3
'c2v' : [1,4,2,3], # A1 A2 B1 B2
'c2h' : [1,4,2,3], # Ag Bg Au Bu
'd2h' : [1,4,6,7,8,5,3,2] # Ag B1g B2g B3g Au B1u B2u B3u
}
irrep_map = psi2dump[symm]
return np.array(irrep_map, dtype='int')
def fcidump_from_file(fname):
"""Function to read in a FCIDUMP file.
:returns: a dictionary with FCIDUMP header and integrals
- 'norb' : number of basis functions
- 'nelec' : number of electrons
- 'ms2' : spin polarization of the system
- 'isym' : symmetry of state (if present in FCIDUMP)
- 'orbsym' : list of symmetry labels of each orbital
- 'uhf' : whether restricted or unrestricted
- 'enuc' : nuclear repulsion plus frozen core energy
- 'epsilon' : orbital energies
- 'hcore' : core Hamiltonian
- 'eri' : electron-repulsion integrals
:param fname: FCIDUMP file name
"""
intdump = {}
with open(fname, 'r') as handle:
assert '&FCI' == handle.readline().strip()
skiplines = 1
read = True
while True:
skiplines += 1
line = handle.readline()
if 'END' in line:
break
key, value = line.split('=')
value = value.strip().rstrip(',')
if key == 'UHF':
value = 'TRUE' in value
elif key == 'ORBSYM':
value = [int(x) for x in value.split(',')]
else:
value = int(value.replace(',', ''))
intdump[key.lower()] = value
# Read the data and index, skip header
raw_ints = np.genfromtxt(fname, skip_header=skiplines)
# Read last line, i.e. Enuc + Efzc
intdump['enuc'] = raw_ints[-1, 0]
# Read in integrals and indices
ints = raw_ints[:-1, 0]
# Get dimensions and indices
nbf = intdump['norb']
idxs = raw_ints[:, 1:].astype(int) - 1
# Slices
sl = slice(ints.shape[0] - nbf, ints.shape[0])
# Extract orbital energies
epsilon = np.zeros(nbf)
epsilon[idxs[sl, 0]] = ints[sl]
intdump['epsilon'] = epsilon
# Count how many 2-index intdump we have
sl = slice(sl.start - nbf * nbf, sl.stop - nbf)
two_index = np.all(idxs[sl, 2:] == -1, axis=1).sum()
sl = slice(sl.stop - two_index, sl.stop)
# Extract Hcore
Hcore = np.zeros((nbf, nbf))
Hcore[(idxs[sl, 0], idxs[sl, 1])] = ints[sl]
Hcore[(idxs[sl, 1], idxs[sl, 0])] = ints[sl]
intdump['hcore'] = Hcore
# Extract ERIs
sl = slice(0, sl.start)
eri = np.zeros((nbf, nbf, nbf, nbf))
eri[(idxs[sl, 0], idxs[sl, 1], idxs[sl, 2], idxs[sl, 3])] = ints[sl]
eri[(idxs[sl, 0], idxs[sl, 1], idxs[sl, 3], idxs[sl, 2])] = ints[sl]
eri[(idxs[sl, 1], idxs[sl, 0], idxs[sl, 2], idxs[sl, 3])] = ints[sl]
eri[(idxs[sl, 1], idxs[sl, 0], idxs[sl, 3], idxs[sl, 2])] = ints[sl]
eri[(idxs[sl, 2], idxs[sl, 3], idxs[sl, 0], idxs[sl, 1])] = ints[sl]
eri[(idxs[sl, 3], idxs[sl, 2], idxs[sl, 0], idxs[sl, 1])] = ints[sl]
eri[(idxs[sl, 2], idxs[sl, 3], idxs[sl, 1], idxs[sl, 0])] = ints[sl]
eri[(idxs[sl, 3], idxs[sl, 2], idxs[sl, 1], idxs[sl, 0])] = ints[sl]
intdump['eri'] = eri
return intdump
def compare_fcidumps(expected, computed, label):
"""Function to compare two FCIDUMP files. Prints success
when value *computed* matches value *expected*.
Performs a system exit on failure. Used in input files in the test suite.
:returns: a dictionary of energies computed from the MO integrals.
- 'NUCLEAR REPULSION ENERGY' : nuclear repulsion plus frozen core energy
- 'ONE-ELECTRON ENERGY' : SCF one-electron energy
- 'TWO-ELECTRON ENERGY' : SCF two-electron energy
- 'SCF TOTAL ENERGY' : SCF total energy
- 'MP2 CORRELATION ENERGY' : MP2 correlation energy
:param expected: reference FCIDUMP file
:param computed: computed FCIDUMP file
:param label: string labelling the test
"""
# Grab expected header and integrals
ref_intdump = fcidump_from_file(expected)
intdump = fcidump_from_file(computed)
# Compare headers
compare_recursive(
ref_intdump,
intdump,
'FCIDUMP header',
forgive=['enuc', 'hcore', 'eri', 'epsilon'])
ref_energies = energies_from_fcidump(ref_intdump)
energies = energies_from_fcidump(intdump)
pass_1el = compare_values(ref_energies['ONE-ELECTRON ENERGY'], energies['ONE-ELECTRON ENERGY'], 7,
label + '. 1-electron energy')
pass_2el = compare_values(ref_energies['TWO-ELECTRON ENERGY'], energies['TWO-ELECTRON ENERGY'], 7,
label + '. 2-electron energy')
pass_scf = compare_values(ref_energies['SCF TOTAL ENERGY'], energies['SCF TOTAL ENERGY'], 10,
label + '. SCF total energy')
pass_mp2 = compare_values(ref_energies['MP2 CORRELATION ENERGY'], energies['MP2 CORRELATION ENERGY'], 10,
label + '. MP2 correlation energy')
compare_integers(True, (pass_1el and pass_2el and pass_scf and pass_mp2), label)
def energies_from_fcidump(intdump):
energies = {}
energies['NUCLEAR REPULSION ENERGY'] = intdump['enuc']
epsilon = intdump['epsilon']
Hcore = intdump['hcore']
eri = intdump['eri']
# Compute SCF energy
energies['ONE-ELECTRON ENERGY'], energies['TWO-ELECTRON ENERGY'] = _scf_energy(Hcore, eri,
np.where(epsilon < 0)[0],
intdump['uhf'])
# yapf: disable
energies['SCF TOTAL ENERGY'] = energies['ONE-ELECTRON ENERGY'] + energies['TWO-ELECTRON ENERGY'] + energies['NUCLEAR REPULSION ENERGY']
# yapf: enable
# Compute MP2 energy
energies['MP2 CORRELATION ENERGY'] = _mp2_energy(eri, epsilon, intdump['uhf'])
return energies
def _scf_energy(Hcore, ERI, occ_sl, unrestricted):
scf_1el_e = np.einsum('ii->', Hcore[np.ix_(occ_sl, occ_sl)])
if not unrestricted:
scf_1el_e *= 2
coulomb = np.einsum('iijj->', ERI[np.ix_(occ_sl, occ_sl, occ_sl, occ_sl)])
exchange = np.einsum('ijij->', ERI[np.ix_(occ_sl, occ_sl, occ_sl, occ_sl)])
if unrestricted:
scf_2el_e = 0.5 * (coulomb - exchange)
else:
scf_2el_e = 2.0 * coulomb - exchange
return scf_1el_e, scf_2el_e
def _mp2_energy(ERI, epsilon, unrestricted):
# Occupied and virtual slices
occ_sl = np.where(epsilon < 0)[0]
vir_sl = np.where(epsilon > 0)[0]
eocc = epsilon[occ_sl]
evir = epsilon[vir_sl]
denom = 1 / (eocc.reshape(-1, 1, 1, 1) - evir.reshape(-1, 1, 1) + eocc.reshape(-1, 1) - evir)
MO = ERI[np.ix_(occ_sl, vir_sl, occ_sl, vir_sl)]
if unrestricted:
mp2_e = 0.5 * np.einsum("abrs,abrs,abrs->", MO, MO - MO.swapaxes(1, 3), denom)
else:
mp2_e = np.einsum('iajb,iajb,iajb->', MO, MO, denom) + np.einsum('iajb,iajb,iajb->', MO - MO.swapaxes(1, 3),
MO, denom)
return mp2_e
| lgpl-3.0 |
MDBrothers/FluidMechanicsToys | ideal_flow_2D/postprocessing.py | 1 | 1127 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import sys
pressure = sys.argv[1]
def load(fname):
''' load file using std open '''
f = open(fname, 'r')
data = []
for line in f.readlines():
data.append(line.replace('\n','').replace(',', ' ').strip(' ').split(' '))
f.close()
return np.array(np.concatenate(data), dtype=np.float64)
def main():
#output files will be loaded into 1-D arrays left to right, top to bottom
#grid_shape_data = np.loadtxt(boundary_conditions_filename, delimiter = ' ,', unpack = True)
pressure_values = load(pressure);
print pressure_values
print " "
print pressure_values[0:pressure_values.size:3].size
print pressure_values[1:pressure_values.size:3].size
print pressure_values[2:pressure_values.size:3].size
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(pressure_values[0:66:3], pressure_values[1:66:3], pressure_values[2:66:3], label='parametric curve')
ax.legend()
plt.show()
if __name__ == "__main__":
main()
| mit |
ashemedai/ansible | lib/ansible/modules/cloud/openstack/os_floating_ip.py | 49 | 10134 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Author: Davide Guerri <davide.guerri@hp.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_floating_ip
version_added: "2.0"
author: "Davide Guerri <davide.guerri@hp.com>"
short_description: Add/Remove floating IP from an instance
extends_documentation_fragment: openstack
description:
- Add or Remove a floating IP to an instance
options:
server:
description:
- The name or ID of the instance to which the IP address
should be assigned.
required: true
network:
description:
- The name or ID of a neutron external network or a nova pool name.
required: false
floating_ip_address:
description:
- A floating IP address to attach or to detach. Required only if I(state)
is absent. When I(state) is present can be used to specify a IP address
to attach.
required: false
reuse:
description:
- When I(state) is present, and I(floating_ip_address) is not present,
this parameter can be used to specify whether we should try to reuse
a floating IP address already allocated to the project.
required: false
default: false
fixed_address:
description:
- To which fixed IP of server the floating IP address should be
attached to.
required: false
nat_destination:
description:
- The name or id of a neutron private network that the fixed IP to
attach floating IP is on
required: false
default: None
aliases: ["fixed_network", "internal_network"]
version_added: "2.3"
wait:
description:
- When attaching a floating IP address, specify whether we should
wait for it to appear as attached.
required: false
default: false
timeout:
description:
- Time to wait for an IP address to appear as attached. See wait.
required: false
default: 60
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
required: false
default: present
purge:
description:
- When I(state) is absent, indicates whether or not to delete the floating
IP completely, or only detach it from the server. Default is to detach only.
required: false
default: false
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Assign a floating IP to the fist interface of `cattle001` from an exiting
# external network or nova pool. A new floating IP from the first available
# external network is allocated to the project.
- os_floating_ip:
cloud: dguerri
server: cattle001
# Assign a new floating IP to the instance fixed ip `192.0.2.3` of
# `cattle001`. If a free floating IP is already allocated to the project, it is
# reused; if not, a new one is created.
- os_floating_ip:
cloud: dguerri
state: present
reuse: yes
server: cattle001
network: ext_net
fixed_address: 192.0.2.3
wait: true
timeout: 180
# Assign a new floating IP from the network `ext_net` to the instance fixed
# ip in network `private_net` of `cattle001`.
- os_floating_ip:
cloud: dguerri
state: present
server: cattle001
network: ext_net
nat_destination: private_net
wait: true
timeout: 180
# Detach a floating IP address from a server
- os_floating_ip:
cloud: dguerri
state: absent
floating_ip_address: 203.0.113.2
server: cattle001
'''
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _get_floating_ip(cloud, floating_ip_address):
f_ips = cloud.search_floating_ips(
filters={'floating_ip_address': floating_ip_address})
if not f_ips:
return None
return f_ips[0]
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
network=dict(required=False, default=None),
floating_ip_address=dict(required=False, default=None),
reuse=dict(required=False, type='bool', default=False),
fixed_address=dict(required=False, default=None),
nat_destination=dict(required=False, default=None,
aliases=['fixed_network', 'internal_network']),
wait=dict(required=False, type='bool', default=False),
timeout=dict(required=False, type='int', default=60),
purge=dict(required=False, type='bool', default=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['nat_destination'] and
StrictVersion(shade.__version__) < StrictVersion('1.8.0')):
module.fail_json(msg="To utilize nat_destination, the installed version of"
"the shade library MUST be >= 1.8.0")
server_name_or_id = module.params['server']
state = module.params['state']
network = module.params['network']
floating_ip_address = module.params['floating_ip_address']
reuse = module.params['reuse']
fixed_address = module.params['fixed_address']
nat_destination = module.params['nat_destination']
wait = module.params['wait']
timeout = module.params['timeout']
purge = module.params['purge']
cloud = shade.openstack_cloud(**module.params)
try:
server = cloud.get_server(server_name_or_id)
if server is None:
module.fail_json(
msg="server {0} not found".format(server_name_or_id))
if state == 'present':
# If f_ip already assigned to server, check that it matches
# requirements.
public_ip = cloud.get_server_public_ip(server)
f_ip = _get_floating_ip(cloud, public_ip) if public_ip else public_ip
if f_ip:
if network:
network_id = cloud.get_network(name_or_id=network)["id"]
else:
network_id = None
if all([(fixed_address and f_ip.fixed_ip_address == fixed_address) or
(nat_destination and f_ip.internal_network == fixed_address),
network, f_ip.network != network_id]):
# Current state definitely conflicts with requirements
module.fail_json(msg="server {server} already has a "
"floating-ip on requested "
"interface but it doesn't match "
"requested network {network: {fip}"
.format(server=server_name_or_id,
network=network,
fip=remove_values(f_ip,
module.no_log_values)))
if not network or f_ip.network == network_id:
# Requirements are met
module.exit_json(changed=False, floating_ip=f_ip)
# Requirements are vague enough to ignore existing f_ip and try
# to create a new f_ip to the server.
server = cloud.add_ips_to_server(
server=server, ips=floating_ip_address, ip_pool=network,
reuse=reuse, fixed_address=fixed_address, wait=wait,
timeout=timeout, nat_destination=nat_destination)
fip_address = cloud.get_server_public_ip(server)
# Update the floating IP status
f_ip = _get_floating_ip(cloud, fip_address)
module.exit_json(changed=True, floating_ip=f_ip)
elif state == 'absent':
if floating_ip_address is None:
if not server_name_or_id:
module.fail_json(msg="either server or floating_ip_address are required")
server = cloud.get_server(server_name_or_id)
floating_ip_address = cloud.get_server_public_ip(server)
f_ip = _get_floating_ip(cloud, floating_ip_address)
if not f_ip:
# Nothing to detach
module.exit_json(changed=False)
changed = False
if f_ip["fixed_ip_address"]:
cloud.detach_ip_from_server(
server_id=server['id'], floating_ip_id=f_ip['id'])
# Update the floating IP status
f_ip = cloud.get_floating_ip(id=f_ip['id'])
changed = True
if purge:
cloud.delete_floating_ip(f_ip['id'])
module.exit_json(changed=True)
module.exit_json(changed=changed, floating_ip=f_ip)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ynotstartups/Wanhao | plugins/USBPrinting/avr_isp/intelHex.py | 5 | 1655 | """
Module to read intel hex files into binary data blobs.
IntelHex files are commonly used to distribute firmware
See: http://en.wikipedia.org/wiki/Intel_HEX
This is a python 3 conversion of the code created by David Braam for the Cura project.
"""
import io
from UM.Logger import Logger
def readHex(filename):
"""
Read an verify an intel hex file. Return the data as an list of bytes.
"""
data = []
extra_addr = 0
f = io.open(filename, "r")
for line in f:
line = line.strip()
if len(line) < 1:
continue
if line[0] != ":":
raise Exception("Hex file has a line not starting with ':'")
rec_len = int(line[1:3], 16)
addr = int(line[3:7], 16) + extra_addr
rec_type = int(line[7:9], 16)
if len(line) != rec_len * 2 + 11:
raise Exception("Error in hex file: " + line)
check_sum = 0
for i in range(0, rec_len + 5):
check_sum += int(line[i*2+1:i*2+3], 16)
check_sum &= 0xFF
if check_sum != 0:
raise Exception("Checksum error in hex file: " + line)
if rec_type == 0:#Data record
while len(data) < addr + rec_len:
data.append(0)
for i in range(0, rec_len):
data[addr + i] = int(line[i*2+9:i*2+11], 16)
elif rec_type == 1: #End Of File record
pass
elif rec_type == 2: #Extended Segment Address Record
extra_addr = int(line[9:13], 16) * 16
else:
Logger.log("d", "%s, %s, %s, %s, %s", rec_type, rec_len, addr, check_sum, line)
f.close()
return data
| agpl-3.0 |
robertbreker/xscontainer | src/xscontainer/util/tls_secret.py | 1 | 4924 | from xscontainer import api_helper
from xscontainer import util
from xscontainer.util import log
import os
import XenAPI
XSCONTAINER_TLS_CLIENT_CERT = 'xscontainer-tls-client-cert'
XSCONTAINER_TLS_CLIENT_KEY = 'xscontainer-tls-client-key'
XSCONTAINER_TLS_CA_CERT = 'xscontainer-tls-ca-cert'
XSCONTAINER_TLS_KEYS = [XSCONTAINER_TLS_CLIENT_CERT,
XSCONTAINER_TLS_CLIENT_KEY,
XSCONTAINER_TLS_CA_CERT]
TEMP_FILE_PATH = '/tmp/xscontainer/tls/'
def remove_if_refcount_less_or_equal(session, tls_secret_uuid,
refcount_threshold):
""" removes TLS secrets if there is fewer VMs using a secret as specified
in refcount_threshold """
refcount = _get_refcount(session, tls_secret_uuid)
if refcount > refcount_threshold:
log.info("refcount for secret uuid %s is larger than threshold with %d"
% (tls_secret_uuid, refcount))
# There's still more references than the threshold - keep
return
try:
tls_secret_ref = session.xenapi.secret.get_by_uuid(tls_secret_uuid)
session.xenapi.secret.destroy(tls_secret_ref)
log.info("Deleted secret uuid %s with refcount %d"
% (tls_secret_uuid, refcount))
except XenAPI.Failure:
log.exception("Failed to delete secret uuid %s, moving on..."
% (tls_secret_uuid))
def _get_refcount(session, secret_uuid):
""" Returns how many VMs use a certain secret_uuid """
refcount = 0
vm_records = api_helper.get_vm_records(session)
for vm_record in vm_records.values():
for keyname in XSCONTAINER_TLS_KEYS:
if ((keyname in vm_record['other_config'] and
vm_record['other_config'][keyname] == secret_uuid)):
refcount = refcount + 1
return refcount
def set_for_vm(session, vm_uuid, client_cert_content,
client_key_content, ca_cert_content):
_destroy_for_vm(session, vm_uuid)
log.info("set_vm_tls_secrets is updating certs and keys for %s" %
(vm_uuid))
content = {
XSCONTAINER_TLS_CLIENT_CERT:
api_helper.create_secret_return_uuid(session,
client_cert_content),
XSCONTAINER_TLS_CLIENT_KEY:
api_helper.create_secret_return_uuid(session,
client_key_content),
XSCONTAINER_TLS_CA_CERT:
api_helper.create_secret_return_uuid(session,
ca_cert_content),
}
api_helper.update_vm_other_config(session, vm_uuid, content)
def export_for_vm(session, vm_uuid):
other_config = api_helper.get_vm_other_config(session, vm_uuid)
secretdict = {}
for key, value in other_config.items():
if key in XSCONTAINER_TLS_KEYS:
secret_uuid = value
secret_ref = session.xenapi.secret.get_by_uuid(secret_uuid)
secret_record = session.xenapi.secret.get_record(secret_ref)
secretdict[key] = secret_record['value']
temptlspaths = _get_temptlspaths(vm_uuid)
if util.file_old_or_none_existent(temptlspaths['client_cert']):
if not os.path.exists(temptlspaths['parent']):
os.makedirs(temptlspaths['parent'])
os.chmod(temptlspaths['parent'], 0600)
util.write_file(
temptlspaths['client_cert'],
secretdict[XSCONTAINER_TLS_CLIENT_CERT])
util.write_file(
temptlspaths['client_key'],
secretdict[XSCONTAINER_TLS_CLIENT_KEY])
util.write_file(
temptlspaths['ca_cert'],
secretdict[XSCONTAINER_TLS_CA_CERT])
return temptlspaths
def _get_temptlspaths(vm_uuid):
temptlsfolder = os.path.join(TEMP_FILE_PATH, vm_uuid)
return {'parent': temptlsfolder,
'client_cert': os.path.join(temptlsfolder,
XSCONTAINER_TLS_CLIENT_CERT),
'client_key': os.path.join(temptlsfolder,
XSCONTAINER_TLS_CLIENT_KEY),
'ca_cert': os.path.join(temptlsfolder,
XSCONTAINER_TLS_CA_CERT)}
def _destroy_for_vm(session, vm_uuid):
log.info("destroy_tls_secrets is wiping certs and keys for %s" % (vm_uuid))
other_config = api_helper.get_vm_other_config(session, vm_uuid)
for key in XSCONTAINER_TLS_KEYS:
if key in other_config:
tls_secret_uuid = other_config[key]
# remove if there is no VMs other than this one who use the secret
remove_if_refcount_less_or_equal(session, tls_secret_uuid, 1)
temptlspaths = _get_temptlspaths(vm_uuid)
for path in temptlspaths:
if os.path.exists(path):
if os.path.isdir(path):
os.rmdir(path)
else:
os.remove(path)
| bsd-2-clause |
aselle/tensorflow | tensorflow/contrib/distribute/python/cross_tower_ops_test.py | 1 | 23082 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CrossTowerOps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import cross_tower_ops as cross_tower_ops_lib
from tensorflow.contrib.distribute.python import cross_tower_utils
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import values as value_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.estimator import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import device_util
def _make_per_device(values, devices):
devices = cross_tower_ops_lib.get_devices_from(devices)
assert len(values) == len(devices)
index = {}
for d, v in zip(devices, values):
with ops.device(d):
placed_v = array_ops.identity(v)
index[d] = placed_v
return value_lib.PerDevice(index)
# pylint: disable=g-doc-args,g-doc-return-or-yield
def _fake_mirrored(value, devices):
"""Create a faked Mirrored object for testing.
All components of the returned Mirrored have the same objects, which is not
true in reality.
"""
devices = cross_tower_ops_lib.get_devices_from(devices)
return value_lib.Mirrored(
{d: v for d, v in zip(devices, [value] * len(devices))})
def _make_indexed_slices(values, indices, dense_shape, device):
with ops.device(device):
tensor = ops.IndexedSlices(
values=constant_op.constant(values),
indices=constant_op.constant(indices),
dense_shape=constant_op.constant(dense_shape))
return tensor
def _make_mirrored_indexed_slices(devices, values, indices, dense_shape):
return value_lib.Mirrored({
d: _make_indexed_slices(values, indices, dense_shape, d) for d in devices
})
_cpu_device = "/device:CPU:0"
class CrossTowerOpsTestBase(test.TestCase, parameterized.TestCase):
def _assert_indexed_slices_equal(self, left, right):
self.assertIsInstance(left, ops.IndexedSlices)
self.assertIsInstance(right, ops.IndexedSlices)
self.assertEqual(device_util.resolve(left.device),
device_util.resolve(right.device))
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
def _assert_values_equal(self, left, right):
if isinstance(left, list):
for l, r in zip(left, right):
self._assert_values_equal(l, r)
else:
self.assertEqual(type(left), type(right))
self.assertEqual(set(left.devices), set(right.devices))
if isinstance(list(left._index.values())[0], ops.IndexedSlices):
for (d, v) in left._index.items():
self._assert_indexed_slices_equal(v, right._index[d])
elif context.executing_eagerly():
self.assertEqual([v.numpy() for v in left._index.values()],
list(right._index.values()))
else:
with self.test_session() as sess:
self.assertEqual(
sess.run(list(left._index.values())), list(right._index.values()))
def _testReductionAndBroadcast(self, cross_tower_ops, distribution):
devices = distribution.worker_devices
values = [constant_op.constant(float(d)) for d in range(len(devices))]
per_device = _make_per_device(values, devices)
mean = (len(devices) - 1.) / 2.
values_2 = [constant_op.constant(d + 1.0) for d in range(len(devices))]
per_device_2 = _make_per_device(values_2, devices)
mean_2 = mean + 1.
destination_mirrored = _fake_mirrored(1., devices)
destination_different = _fake_mirrored(1., _cpu_device)
destination_str = _cpu_device
destination_list = devices
all_destinations = [
None, destination_mirrored, destination_different, destination_str,
destination_list
]
# test reduce()
for destinations in all_destinations:
self._assert_values_equal(
cross_tower_ops.reduce(
vs.VariableAggregation.MEAN,
per_device,
destinations=destinations),
_fake_mirrored(mean, destinations or per_device))
self._assert_values_equal(
cross_tower_ops.reduce(
vs.VariableAggregation.MEAN,
per_device_2,
destinations=destinations),
_fake_mirrored(mean_2, destinations or per_device))
self._assert_values_equal(
cross_tower_ops.reduce(
vs.VariableAggregation.SUM, per_device,
destinations=destinations),
_fake_mirrored(mean * len(devices), destinations or per_device))
self._assert_values_equal(
cross_tower_ops.reduce(
vs.VariableAggregation.SUM,
per_device_2,
destinations=destinations),
_fake_mirrored(mean_2 * len(devices), destinations or per_device))
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_values_equal(
cross_tower_ops.batch_reduce(vs.VariableAggregation.MEAN,
[(per_device, d1), (per_device_2, d2)]),
[
_fake_mirrored(mean, d1 or per_device),
_fake_mirrored(mean_2, d2 or per_device_2)
])
self._assert_values_equal(
cross_tower_ops.batch_reduce(vs.VariableAggregation.SUM,
[(per_device, d1), (per_device_2, d2)]),
[
_fake_mirrored(mean * len(devices), d1 or per_device),
_fake_mirrored(mean_2 * len(devices), d2 or per_device_2)
])
# test broadcast()
for destinations in all_destinations:
if destinations is None:
continue
else:
self._assert_values_equal(
cross_tower_ops.broadcast(constant_op.constant(1.), destinations),
_fake_mirrored(1., destinations))
class SingleWorkerCrossTowerOpsTest(CrossTowerOpsTestBase):
# TODO(yuefengz): decouple the num_gpus check from distribution in
# combinations module so that we can pass in devices instead of a distribution
# strategy.
reduction_to_one_combinations = combinations.combine(
cross_tower_ops=[
combinations.NamedObject(
"DefaultReductionToOneDeviceCrossTowerOps",
cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps()),
combinations.NamedObject(
"ReductionToCPUDeviceCrossTowerOps",
cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps(
reduce_to_device=_cpu_device)),
combinations.NamedObject(
"AccumulateNCrossTowerOp",
cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps(
accumulation_fn=math_ops.accumulate_n)),
],
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus
],
mode=["graph", "eager"])
allreduce_combinations = combinations.combine(
cross_tower_ops=[
combinations.NamedObject(
"AllReduce",
cross_tower_ops_lib.AllReduceCrossTowerOps("nccl", 1, 0, 0)),
combinations.NamedObject(
"HierarchicalCopy",
cross_tower_ops_lib.AllReduceCrossTowerOps(
"hierarchical_copy", 8, 0, 0)),
combinations.NamedObject(
"AllReduceNoGradientRepacking",
cross_tower_ops_lib.AllReduceCrossTowerOps("nccl", 0, 0, 0)),
combinations.NamedObject(
"HierarchicalCopyAggregateSmallTensors",
cross_tower_ops_lib.AllReduceCrossTowerOps(
"hierarchical_copy", 0, 100, 10))
],
distribution=[combinations.mirrored_strategy_with_two_gpus],
mode=["graph", "eager"])
@combinations.generate(reduction_to_one_combinations + allreduce_combinations)
def testReductionAndBroadcast(self, cross_tower_ops, distribution):
with distribution.scope():
self._testReductionAndBroadcast(cross_tower_ops, distribution)
def testChooseAlgorithm(self):
device_links = [[1, 2, 3, 4], [0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7],
[0, 5, 6, 7], [1, 4, 6, 7], [2, 4, 5, 7], [3, 4, 5, 6]]
result = cross_tower_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_tower_ops_lib.AllReduceCrossTowerOps)
self.assertEqual(result._all_reduce_alg, "hierarchical_copy")
self.assertEqual(result._num_packs, 8)
# if there are only 4 devices
device_links = [[1, 2, 3, 4], [0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7]]
result = cross_tower_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_tower_ops_lib.AllReduceCrossTowerOps)
self.assertEqual(result._all_reduce_alg, "nccl")
self.assertEqual(result._num_packs, 1)
# if devices links contain each device itself
device_links = [[0, 1, 2, 3, 4], [0, 1, 2, 3, 5], [0, 1, 2, 3, 6],
[0, 1, 2, 3, 7], [0, 4, 5, 6, 7], [1, 4, 5, 6, 7],
[2, 4, 5, 6, 7], [3, 4, 5, 6, 7]]
result = cross_tower_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_tower_ops_lib.AllReduceCrossTowerOps)
self.assertEqual(result._all_reduce_alg, "hierarchical_copy")
self.assertEqual(result._num_packs, 8)
# if not dgx1-like links
device_links = [[0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7], [0, 5, 6, 7],
[1, 4, 6, 7], [2, 4, 5, 7], [3, 4, 5, 6], [1, 2, 3, 4]]
result = cross_tower_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_tower_ops_lib.AllReduceCrossTowerOps)
self.assertEqual(result._all_reduce_alg, "nccl")
self.assertEqual(result._num_packs, 1)
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
required_gpus=1))
def testSimpleReduceWithIndexedSlices(self):
devices = ["/cpu:0", "/gpu:0"]
t0 = _make_indexed_slices([[1., 2.]], [1], [5, 2], devices[0])
t1 = _make_indexed_slices([[3., 4.], [5., 6.]], [1, 3], [5, 2], devices[1])
per_device = value_lib.PerDevice({devices[0]: t0, devices[1]: t1})
result = cross_tower_ops_lib._simple_reduce(
per_device, devices[0], math_ops.add_n, vs.VariableAggregation.SUM)
# Test that the result is semantically equal to both the concatenated
# IndexedSlices with and without duplicate indices.
total_with_dups = _make_indexed_slices(
[[1., 2.], [3., 4.], [5., 6.]], [1, 1, 3], [5, 2], devices[0])
total_without_dups = _make_indexed_slices(
[[4., 6.], [5., 6.]], [1, 3], [5, 2], devices[0])
self._assert_indexed_slices_equal(total_with_dups, result)
self._assert_indexed_slices_equal(total_without_dups, result)
@combinations.generate(
combinations.combine(
cross_tower_ops_instance=[
combinations.NamedObject(
"ReductionToOneDeviceCrossTowerOps",
cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps()),
combinations.NamedObject(
"AllReduceCrossTowerOps",
cross_tower_ops_lib.AllReduceCrossTowerOps())
],
aggregation=[vs.VariableAggregation.SUM, vs.VariableAggregation.MEAN],
batch_reduce=[True, False],
mode=["graph", "eager"],
required_gpus=1))
def testIndexedSlicesAllReduce(self, cross_tower_ops_instance, aggregation,
batch_reduce):
devices = ["/cpu:0", "/gpu:0"]
dense_shape = [5, 2]
t0 = _make_indexed_slices([[1., 2.]], [1], dense_shape, devices[0])
t1 = _make_indexed_slices(
[[3., 4.], [5., 6.]], [1, 3], dense_shape, devices[1])
per_device = value_lib.PerDevice({devices[0]: t0, devices[1]: t1})
if batch_reduce:
result = cross_tower_ops_instance.batch_reduce(aggregation,
[(per_device, devices)])
else:
result = cross_tower_ops_instance.reduce(aggregation, per_device, devices)
total_indices_with_dups = [1, 1, 3]
total_indices_without_dups = [1, 3]
if aggregation == vs.VariableAggregation.SUM:
total_values_with_dups = [[1., 2.], [3., 4.], [5., 6.]]
total_values_without_dups = [[4., 6.], [5., 6.]]
else:
assert aggregation == vs.VariableAggregation.MEAN
total_values_with_dups = [[0.5, 1.], [1.5, 2.], [2.5, 3.]]
total_values_without_dups = [[2., 3.], [2.5, 3.]]
total_mirrored_with_dups = _make_mirrored_indexed_slices(
devices, total_values_with_dups, total_indices_with_dups, dense_shape)
total_mirrored_without_dups = _make_mirrored_indexed_slices(
devices, total_values_without_dups, total_indices_without_dups,
dense_shape)
# Test that the result is semantically equal to both the concatenated
# IndexedSlices, as well as when the duplicate indices are summed up.
if batch_reduce:
total_mirrored_with_dups = [total_mirrored_with_dups]
total_mirrored_without_dups = [total_mirrored_without_dups]
self._assert_values_equal(total_mirrored_with_dups, result)
self._assert_values_equal(total_mirrored_without_dups, result)
class MultiWorkerCrossTowerOpsTest(multi_worker_test_base.MultiWorkerTestBase,
CrossTowerOpsTestBase):
worker_devices = [
"/job:worker/replica:0/task:0", "/job:worker/replica:0/task:1"
]
multi_worker_allreduce_combinations = combinations.combine(
cross_tower_ops=[
combinations.NamedObject(
"MultiWorkerAllReduce",
cross_tower_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, ("pscpu/pscpu", 2, -1), 0, 0, 0)),
combinations.NamedObject(
"MultiWorkerAllReducePack",
cross_tower_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, ("pscpu/pscpu", 2, -1), 1, 0, 0)),
combinations.NamedObject(
"MultiWorkerAllReduceAggregation",
cross_tower_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, ("pscpu/pscpu", 2, -1), 0, 100, 10)),
combinations.NamedObject(
"MultiWorkerAllReduceMultipleSpecs",
cross_tower_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, [("pscpu/pscpu", 2, 100),
("xring", 2, -1)], 0, 0, 0)),
],
distribution=[
combinations.multi_worker_strategy_with_cpu,
combinations.multi_worker_strategy_with_one_gpu,
combinations.multi_worker_strategy_with_two_gpus
],
mode=["graph"])
@combinations.generate(multi_worker_allreduce_combinations)
def testReductionAndBroadcast(self, cross_tower_ops, distribution):
with distribution.scope():
self._testReductionAndBroadcast(cross_tower_ops, distribution)
class MultiWorkerCollectiveAllReduceTest(
multi_worker_test_base.MultiWorkerTestBase, parameterized.TestCase):
collective_key_base = 10000
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers."""
cls._workers, cls._ps = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=0)
cls._cluster_spec = {
run_config.TaskType.WORKER: [
"fake_worker_0", "fake_worker_1", "fake_worker_2"
]
}
def setUp(self):
super(MultiWorkerCollectiveAllReduceTest, self).setUp()
# Reusing keys are not supported well. So we have to give a different
# collective key base for different tests.
MultiWorkerCollectiveAllReduceTest.collective_key_base += 100000
def _get_test_objects(self, task_type, task_id, num_gpus=0, local_mode=False):
collective_keys = cross_tower_utils.CollectiveKeys(
group_key_start=10 * num_gpus +
MultiWorkerCollectiveAllReduceTest.collective_key_base,
instance_key_start=num_gpus * 100 +
MultiWorkerCollectiveAllReduceTest.collective_key_base,
instance_key_with_id_start=num_gpus * 10000 +
MultiWorkerCollectiveAllReduceTest.collective_key_base)
if local_mode:
collective_all_reduce_ops = cross_tower_ops_lib.CollectiveAllReduce(
1, num_gpus, collective_keys=collective_keys)
if num_gpus:
devices = ["/device:GPU:%d" % i for i in range(num_gpus)]
else:
devices = ["/device:CPU:0"]
return collective_all_reduce_ops, devices, "local"
else:
collective_all_reduce_ops = cross_tower_ops_lib.CollectiveAllReduce(
3, num_gpus, collective_keys=collective_keys)
if num_gpus:
devices = [
"/job:%s/task:%d/device:GPU:%d" % (task_type, task_id, i)
for i in range(num_gpus)
]
else:
devices = ["/job:%s/task:%d" % (task_type, task_id)]
return collective_all_reduce_ops, devices, self._workers[task_id].target
def _assert_values_equal(self, left, right, sess):
if isinstance(left, list):
for l, r in zip(left, right):
self._assert_values_equal(l, r, sess)
else:
self.assertEqual(type(left), type(right))
self.assertEqual(set(left.devices), set(right.devices))
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 6
left_values = np.array(
sess.run(list(left._index.values()), options=run_options)).flatten()
right_values = np.array(list(right._index.values())).flatten()
self.assertEqual(len(left_values), len(right_values))
for l, r in zip(left_values, right_values):
self.assertEqual(l, r)
def _test_reduction(self, task_type, task_id, num_gpus, local_mode=False):
collective_all_reduce, devices, master_target = self._get_test_objects(
task_type, task_id, num_gpus, local_mode=local_mode)
if local_mode:
num_workers = 1
worker_device = None
else:
num_workers = len(self._workers)
worker_device = "/job:%s/task:%d" % (task_type, task_id)
with ops.Graph().as_default(), \
ops.device(worker_device), \
self.test_session(target=master_target) as sess:
# Collective ops doesn't support scalar tensors, so we have to construct
# 1-d tensors.
values = [constant_op.constant([float(d)]) for d in range(len(devices))]
per_device = _make_per_device(values, devices)
mean = np.array([(len(devices) - 1.) / 2.])
values_2 = [constant_op.constant([d + 1.0]) for d in range(len(devices))]
per_device_2 = _make_per_device(values_2, devices)
mean_2 = np.array([mean[0] + 1.])
destination_mirrored = _fake_mirrored(1., devices)
destination_different = _fake_mirrored(1., _cpu_device)
destination_str = _cpu_device
destination_list = devices
all_destinations = [
None, destination_mirrored, destination_different, destination_str,
destination_list
]
# test reduce()
for destinations in all_destinations:
self._assert_values_equal(
collective_all_reduce.reduce(
vs.VariableAggregation.MEAN,
per_device,
destinations=destinations),
_fake_mirrored(mean, destinations or per_device), sess)
self._assert_values_equal(
collective_all_reduce.reduce(
vs.VariableAggregation.MEAN,
per_device_2,
destinations=destinations),
_fake_mirrored(mean_2, destinations or per_device), sess)
self._assert_values_equal(
collective_all_reduce.reduce(
vs.VariableAggregation.SUM,
per_device,
destinations=destinations),
_fake_mirrored(mean * len(devices) * num_workers, destinations or
per_device), sess)
self._assert_values_equal(
collective_all_reduce.reduce(
vs.VariableAggregation.SUM,
per_device_2,
destinations=destinations),
_fake_mirrored(mean_2 * len(devices) * num_workers, destinations or
per_device), sess)
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_values_equal(
collective_all_reduce.batch_reduce(vs.VariableAggregation.MEAN,
[(per_device, d1),
(per_device_2, d2)]),
[
_fake_mirrored(mean, d1 or per_device),
_fake_mirrored(mean_2, d2 or per_device_2)
], sess)
self._assert_values_equal(
collective_all_reduce.batch_reduce(vs.VariableAggregation.SUM,
[(per_device, d1),
(per_device_2, d2)]),
[
_fake_mirrored(mean * len(devices) * num_workers, d1 or
per_device),
_fake_mirrored(mean_2 * len(devices) * num_workers, d2 or
per_device_2)
], sess)
return True
@combinations.generate(
combinations.combine(mode=["graph"], num_gpus=[0, 1, 2]))
def testReductionDistributed(self, num_gpus):
if context.num_gpus() < num_gpus:
return
self._run_between_graph_clients(self._test_reduction, self._cluster_spec,
num_gpus)
# Collective ops doesn't support strategy with one device.
def testReductionLocal(self, num_gpus=2):
if context.num_gpus() < num_gpus:
return
self._run_between_graph_clients(
self._test_reduction, self._cluster_spec, num_gpus, local_mode=True)
if __name__ == "__main__":
test.main()
| apache-2.0 |
uw-it-aca/bridge-sis-provisioner | sis_provisioner/tests/account_managers/test_gws_bridge.py | 1 | 7653 | from django.test import TransactionTestCase
from restclients_core.exceptions import DataFailureException
from sis_provisioner.dao.hrp import get_worker
from sis_provisioner.dao.pws import get_person
from sis_provisioner.models import UwAccount, get_now
from sis_provisioner.account_managers.bridge_worker import BridgeWorker
from sis_provisioner.account_managers.gws_bridge import GwsBridgeLoader
from sis_provisioner.tests import (
fdao_pws_override, fdao_gws_override, fdao_bridge_override)
from sis_provisioner.tests.dao import get_mock_bridge_user
from sis_provisioner.tests.account_managers import (
set_uw_account, set_db_records, set_db_err_records)
@fdao_bridge_override
@fdao_gws_override
@fdao_pws_override
class TestGwsBridgeLoader(TransactionTestCase):
def test_del_bridge_account(self):
loader = GwsBridgeLoader(BridgeWorker())
ellen = loader.get_bridge().get_user_by_uwnetid('ellen')
self.assertFalse(loader.del_bridge_account(ellen))
retiree = loader.get_bridge().get_user_by_uwnetid('retiree')
self.assertTrue(loader.del_bridge_account(retiree))
self.assertEqual(loader.get_deleted_count(), 1)
def test_fetch_users(self):
with self.settings(BRIDGE_GWS_CACHE='/tmp/gwsuser1'):
loader = GwsBridgeLoader(BridgeWorker())
user_list = loader.fetch_users()
self.assertEqual(len(user_list), 7)
self.assertEqual(sorted(user_list),
['affiemp', 'error500', 'faculty', 'javerage',
'not_in_pws', 'retiree', 'staff'])
def test_is_priority_change(self):
loader = GwsBridgeLoader(BridgeWorker())
uw_acc = UwAccount.objects.create(netid="affiemp")
self.assertTrue(loader.is_priority_change(uw_acc))
uw_acc = set_uw_account("faculty")
uw_acc.prev_netid = "tyler"
self.assertTrue(loader.is_priority_change(uw_acc))
uw_acc = set_uw_account("leftuw")
uw_acc.terminate_at = get_now()
self.assertTrue(loader.is_priority_change(uw_acc))
uw_acc = set_uw_account("staff")
uw_acc.disabled = True
self.assertTrue(loader.is_priority_change(uw_acc))
def test_match_bridge_account(self):
# 500 error
uw_acc = set_uw_account("error500")
loader = GwsBridgeLoader(BridgeWorker())
self.assertRaises(DataFailureException,
loader.match_bridge_account,
uw_acc)
# account not exist
uw_acc = set_uw_account("affiemp")
loader = GwsBridgeLoader(BridgeWorker())
bri_acc = loader.match_bridge_account(uw_acc)
self.assertIsNone(bri_acc)
# account is deleted
uw_acc = set_uw_account("staff")
bri_acc = loader.match_bridge_account(uw_acc)
self.assertIsNone(bri_acc)
# exists an account with a prior netid
uw_acc = set_uw_account("faculty")
uw_acc.prev_netid = 'tyler'
bri_acc = loader.match_bridge_account(uw_acc)
self.assertEqual(bri_acc.netid, 'tyler')
# exists two accounts (one with Lreaning History one without),
# pick the one with LH
uw_acc = set_uw_account("retiree")
uw_acc.bridge_id = 204
uw_acc.prev_netid = "ellen"
uw_acc1 = set_uw_account("ellen")
bri_acc = loader.match_bridge_account(uw_acc)
self.assertEqual(bri_acc.netid, 'ellen')
self.assertEqual(bri_acc.bridge_id, 194)
def test_apply_change_to_bridge(self):
loader = GwsBridgeLoader(BridgeWorker())
# add new account
uw_acc = set_uw_account("affiemp")
affiemp = get_person("affiemp")
loader.apply_change_to_bridge(uw_acc, affiemp)
self.assertEqual(loader.get_new_user_count(), 1)
self.assertEqual(loader.get_updated_count(), 0)
# restore
uw_acc = set_uw_account("staff")
uw_acc.set_bridge_id(196)
uw_acc.set_disable()
staff = get_person("staff")
loader.apply_change_to_bridge(uw_acc, staff)
self.assertEqual(loader.get_restored_count(), 1)
self.assertEqual(loader.get_updated_count(), 1)
# change uid and update
uw_acc = set_uw_account('faculty')
uw_acc.prev_netid = 'tyler'
uw_acc.set_bridge_id(198)
faculty = get_person("faculty")
loader.apply_change_to_bridge(uw_acc, faculty)
self.assertEqual(loader.get_netid_changed_count(), 1)
self.assertEqual(loader.get_updated_count(), 2)
# change uid and update
uw_acc = set_uw_account("retiree")
uw_acc.bridge_id = 204
uw_acc.prev_netid = "ellen"
retiree = get_person("retiree")
loader.apply_change_to_bridge(uw_acc, retiree)
self.assertEqual(loader.get_netid_changed_count(), 2)
self.assertEqual(loader.get_updated_count(), 3)
def test_load_gws(self):
with self.settings(ERRORS_TO_ABORT_LOADER=[],
BRIDGE_USER_WORK_POSITIONS=2,
BRIDGE_GWS_CACHE='/tmp/gwsuser2'):
set_db_records()
loader = GwsBridgeLoader(BridgeWorker())
loader.load()
self.assertEqual(loader.get_total_count(), 7)
self.assertEqual(loader.get_total_checked_users(), 6)
self.assertEqual(loader.get_new_user_count(), 1)
self.assertEqual(loader.get_restored_count(), 1)
self.assertEqual(loader.get_netid_changed_count(), 2)
self.assertEqual(loader.get_updated_count(), 3)
self.assertTrue(loader.has_error())
def test_load_abort(self):
with self.settings(ERRORS_TO_ABORT_LOADER=[500],
BRIDGE_USER_WORK_POSITIONS=2,
BRIDGE_GWS_CACHE='/tmp/gwsuser3'):
set_db_err_records()
loader = GwsBridgeLoader(BridgeWorker())
self.assertRaises(DataFailureException, loader.load)
def test_account_not_changed(self):
with self.settings(BRIDGE_USER_WORK_POSITIONS=2):
set_db_records()
loader = GwsBridgeLoader(BridgeWorker())
person = get_person('javerage')
hrp_wkr = get_worker(person)
bridge_account = loader.get_bridge().get_user_by_uwnetid(
'javerage')
self.assertTrue(
loader.account_not_changed(bridge_account, person, hrp_wkr))
def test_field_not_changed(self):
with self.settings(BRIDGE_USER_WORK_POSITIONS=2):
loader = GwsBridgeLoader(BridgeWorker())
person = get_person('javerage')
hrp_wkr = get_worker(person)
bridge_account = loader.get_bridge().get_user_by_uwnetid(
'javerage')
self.assertTrue(loader.regid_not_changed(bridge_account, person))
self.assertTrue(loader.eid_not_changed(bridge_account, person))
self.assertTrue(loader.sid_not_changed(bridge_account, person))
self.assertTrue(loader.pos_data_not_changed(
bridge_account, hrp_wkr))
person = get_person('faculty')
hrp_wkr = get_worker(person)
bridge_account = loader.get_bridge().get_user_by_uwnetid('tyler')
self.assertFalse(loader.regid_not_changed(bridge_account, person))
self.assertFalse(loader.eid_not_changed(bridge_account, person))
self.assertFalse(loader.sid_not_changed(bridge_account, person))
self.assertFalse(loader.pos_data_not_changed(
bridge_account, hrp_wkr))
| apache-2.0 |
evernym/zeno | plenum/test/consensus/view_change/helper.py | 2 | 4270 | from functools import partial
from typing import Optional, List
import base58
from plenum.common.messages.internal_messages import NewViewCheckpointsApplied
from plenum.common.messages.node_messages import PrePrepare, Checkpoint
from plenum.server.consensus.view_change_service import ViewChangeService
from plenum.server.consensus.batch_id import BatchID
from plenum.test.consensus.helper import SimPool
from plenum.test.simulation.sim_random import SimRandom
def some_checkpoint(random: SimRandom, view_no: int, pp_seq_no: int) -> Checkpoint:
return Checkpoint(
instId=0, viewNo=view_no, seqNoStart=pp_seq_no, seqNoEnd=pp_seq_no,
digest=base58.b58encode(random.string(32)).decode())
def some_pool(random: SimRandom) -> (SimPool, List):
pool_size = random.integer(4, 8)
pool = SimPool(pool_size, random)
view_no = pool._initial_view_no
log_size = pool.nodes[0].config.LOG_SIZE
# Create simulated history
# TODO: Move into helper?
faulty = (pool_size - 1) // 3
seq_no_per_cp = 10
max_batches = 50
batches = [BatchID(view_no, view_no, n, random.string(40)) for n in range(1, max_batches)]
checkpoints = [some_checkpoint(random, view_no, n) for n in range(0, max_batches, seq_no_per_cp)]
# Preprepares
pp_count = [random.integer(0, len(batches)) for _ in range(pool_size)]
max_pp = sorted(pp_count)[faulty]
# Prepares
p_count = [random.integer(0, min(max_pp, pp)) for pp in pp_count]
max_p = sorted(p_count)[faulty]
# Checkpoints
cp_count = [1 + random.integer(0, min(max_p, p)) // seq_no_per_cp for p in pp_count]
max_stable_cp_indx = sorted(cp_count)[faulty]
stable_cp = [checkpoints[random.integer(0, min(max_stable_cp_indx, cp) - 1)].seqNoEnd for cp in cp_count]
# Initialize consensus data
for i, node in enumerate(pool.nodes):
high_watermark = stable_cp[i] + log_size
node._data.preprepared = batches[:min(high_watermark, pp_count[i])]
node._data.prepared = batches[:min(high_watermark, p_count[i])]
node._data.checkpoints.clear()
node._data.checkpoints.update(checkpoints[:cp_count[i]])
node._data.stable_checkpoint = stable_cp[i]
# Mock Ordering service to update preprepares for new view
for node in pool.nodes:
def update_shared_data(node, msg: NewViewCheckpointsApplied):
x = [
BatchID(view_no=msg.view_no, pp_view_no=batch_id.pp_view_no, pp_seq_no=batch_id.pp_seq_no,
pp_digest=batch_id.pp_digest)
for batch_id in msg.batches
]
node._orderer._data.preprepared = x
node._orderer._subscription.subscribe(node._orderer._stasher, NewViewCheckpointsApplied, partial(update_shared_data, node))
committed = []
for i in range(1, max_batches):
prepare_count = sum(1 for node in pool.nodes if i <= len(node._data.prepared))
has_prepared_cert = prepare_count >= pool_size - faulty
if has_prepared_cert:
batch_id = batches[i - 1]
committed.append(BatchID(batch_id.view_no + 1, batch_id.pp_view_no, batch_id.pp_seq_no, batch_id.pp_digest))
return pool, committed
def calc_committed(view_changes, max_pp_seq_no, n, f) -> List[BatchID]:
def check_prepared_in_vc(vc, batch_id):
# check that batch_id is present in VC's prepared and preprepared
for p_batch_id in vc.prepared:
if batch_id != p_batch_id:
continue
for pp_batch_id in vc.preprepared:
if batch_id == pp_batch_id:
return True
return False
def find_batch_id(pp_seq_no):
for vc in view_changes:
for batch_id in vc.prepared:
if batch_id[2] != pp_seq_no:
continue
prepared_count = sum(1 for vc in view_changes if check_prepared_in_vc(vc, batch_id))
if prepared_count < n - f:
continue
return batch_id
return None
committed = []
for pp_seq_no in range(1, max_pp_seq_no):
batch_id = find_batch_id(pp_seq_no)
if batch_id is not None:
committed.append(BatchID(*batch_id))
return committed
| apache-2.0 |
j-carl/boto | tests/integration/directconnect/test_directconnect.py | 114 | 1570 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from tests.compat import unittest
class DirectConnectTest(unittest.TestCase):
"""
A very basic test to make sure signatures and
basic calls work.
"""
def test_basic(self):
conn = boto.connect_directconnect()
response = conn.describe_connections()
self.assertTrue(response)
self.assertTrue('connections' in response)
self.assertIsInstance(response['connections'], list)
| mit |
SDM-OS/playlist | cherrymusicserver/useroptiondb.py | 1 | 5796 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import json
from cherrymusicserver import log
from cherrymusicserver import configuration as cfg
from cherrymusicserver import database as db
from cherrymusicserver.database.connect import BoundConnector
DBNAME = 'useroptions'
class UserOptionDB:
def __init__(self, connector=None):
"""user configuration:
hidden values can not be set by the user in the options,
but might be subject of being set automatically, e.g. the
heartbeat.
"""
db.require(DBNAME, '0')
c = cfg.ConfigBuilder()
with c['keyboard_shortcuts'] as kbs:
kbs.valid = '\d\d?\d?'
kbs['prev'].value = 89
kbs['play'].value = 88
kbs['pause'].value = 67
kbs['stop'].value = 86
kbs['next'].value = 66
kbs['search'].value = 83
with c['misc.show_playlist_download_buttons'] as pl_download_buttons:
pl_download_buttons.value = False
with c['misc.autoplay_on_add'] as autoplay_on_add:
autoplay_on_add.value = False
with c['custom_theme.primary_color'] as primary_color:
primary_color.value = '#F02E75'
primary_color.valid = '#[0-9a-fA-F]{6}'
with c['custom_theme.white_on_black'] as white_on_black:
white_on_black.value = False
with c['media.may_download'] as may_download:
may_download.value = False
with c['ui.confirm_quit_dialog'] as confirm_quit_dialog:
confirm_quit_dialog.value = True
with c['last_time_online'] as last_time_online:
last_time_online.value = 0
last_time_online.valid = '\\d+'
last_time_online.hidden = True
last_time_online.doc = "UNIX TIME (1.1.1970 = never)"
self.DEFAULTS = c.to_configuration()
self.conn = BoundConnector(DBNAME, connector).connection()
def getOptionFromMany(self, key, userids):
result = {}
for userid in userids:
val = self.useroptiondb.conn.execute(
'''SELECT value FROM option WHERE userid = ? AND name = ?''',
(userid, key,)).fetchone()
if val:
result[userid] = val
else:
result[userid] = self.DEFAULTS[key]
return result
def forUser(self, userid):
return UserOptionDB.UserOptionProxy(self, userid)
class UserOptionProxy:
def __init__(self, useroptiondb, userid):
self.useroptiondb = useroptiondb
self.userid = userid
def getChangableOptions(self):
opts = self.getOptions()
visible_props = (p for p in opts.to_properties() if not p.hidden)
return cfg.from_list(visible_props).to_nested_dict()
def getOptions(self):
results = self.useroptiondb.conn.execute(
'''SELECT name, value FROM option WHERE userid = ?''',
(self.userid,)).fetchall()
useropts = dict((r[0], json.loads(r[1])) for r in results)
return self.useroptiondb.DEFAULTS.replace(
useropts,
on_error=self.delete_bad_option)
def getOptionValue(self, key):
return self.getOptions()[key]
def setOption(self, key, value):
opts = self.getOptions().replace({key: value})
self.setOptions(opts)
def setOptions(self, c):
for k in cfg.to_list(c):
value = json.dumps(k.value)
key = k.key
sel = self.useroptiondb.conn.execute(
'''SELECT name, value FROM option
WHERE userid = ? AND name = ?''',
(self.userid, key)).fetchone()
if sel:
self.useroptiondb.conn.execute(
'''UPDATE option SET value = ?
WHERE userid = ? AND name = ?''',
(value, self.userid, key))
else:
self.useroptiondb.conn.execute(
'''INSERT INTO option (userid, name, value) VALUES
(?,?,?)''', (self.userid, key, value))
self.useroptiondb.conn.commit()
def deleteOptionIfExists(self, key):
stmt = """DELETE FROM option WHERE userid = ? AND name = ?;"""
with self.useroptiondb.conn as conn:
conn.execute(stmt, (self.userid, key))
def delete_bad_option(self, error):
self.deleteOptionIfExists(error.key)
log.warning('deleted bad option %r for userid %r (%s)',
error.key, self.userid, error.msg)
| gpl-3.0 |
multicoins/marycoin | test/functional/invalidateblock.py | 35 | 2817 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the invalidateblock RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class InvalidateTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
self.log.info("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
self.log.info("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
self.log.info("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
self.log.info("Make sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
self.log.info("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
self.log.info("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
self.log.info("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
self.log.info("..and then mine a block")
self.nodes[2].generate(1)
self.log.info("Verify all nodes are at the right height")
time.sleep(5)
assert_equal(self.nodes[2].getblockcount(), 3)
assert_equal(self.nodes[0].getblockcount(), 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
| mit |
qedsoftware/commcare-hq | custom/reports/mc/reports/definitions.py | 2 | 6090 | from django.utils.translation import ugettext_noop as _
WEEKLY_SUMMARY_XMLNS = 'http://openrosa.org/formdesigner/7EFB54F1-337B-42A7-9C6A-460AE8B0CDD8'
HF_MONTHLY_REPORT = [
{
'section': _('mc_section_home_visits'),
'total_column': _('home_visits_total'),
'columns': [
_('home_visits_pregnant'),
_('home_visits_postpartem'),
_('home_visits_newborn'),
_('home_visits_children'),
_('home_visits_other'),
]
},
{
'section': _('mc_section_rdt'),
'total_column': _('rdt_total'),
'columns': [
_('rdt_positive_children'),
_('rdt_positive_adults'),
_('rdt_others'),
]
},
{
'section': _('mc_section_diagnosed_cases'),
'total_column': _('diagnosed_total'),
'columns': [
_('diagnosed_malaria_child'),
_('diagnosed_malaria_adult'),
_('diagnosed_diarrhea'),
_('diagnosed_ari'),
]
},
{
'section': _('mc_section_treated_cases'),
'total_column': _('treated_total'),
'columns': [
_('treated_malaria'),
_('treated_diarrhea'),
_('treated_ari'),
]
},
{
'section': _('mc_section_transfers'),
'total_column': _('transfer_total'),
'columns': [
_('transfer_malnutrition'),
_('transfer_incomplete_vaccination'),
_('transfer_danger_signs'),
_('transfer_prenatal_consult'),
_('transfer_missing_malaria_meds'),
_('transfer_other'),
]
},
{
'section': _('mc_section_deaths'),
'total_column': _('deaths_total'),
'columns': [
_('deaths_newborn'),
_('deaths_children'),
_('deaths_mothers'),
_('deaths_other'),
]
},
{
'section': _('mc_section_health_ed'),
'columns': [
_('heath_ed_talks'),
_('heath_ed_participants'),
]
},
]
DISTRICT_MONTHLY_REPORT = HF_MONTHLY_REPORT + [
{
'section': _('mc_section_stock_balance'),
'type': 'form_lookup',
'xmlns': WEEKLY_SUMMARY_XMLNS,
'columns': [
_('stock_amox_pink'),
_('stock_amox_green'),
_('stock_ors'),
_('stock_ra_50'),
_('stock_ra_200'),
_('stock_zinc'),
_('stock_coartem_yellow'),
_('stock_coartem_blue'),
_('stock_coartem_green'),
_('stock_coartem_brown'),
_('stock_paracetamol_250'),
_('stock_paracetamol_500'),
_('stock_rdt'),
_('stock_gloves'),
]
},
]
DISTRICT_WEEKLY_REPORT = [
{
'section': _('mc_section_home_visits'),
'total_column': _('home_visits_total'),
'columns': [
_('home_visits_newborn'),
_('home_visits_children'),
_('home_visits_pregnant'),
_('home_visits_non_pregnant'),
_('home_visits_followup'),
]
},
{
'section': _('mc_section_deaths_in_community'),
'columns': [
_('deaths_children'),
]
},
{
'section': _('mc_section_stock_balance'),
'type': 'form_lookup',
'xmlns': WEEKLY_SUMMARY_XMLNS,
'columns': [
_('stock_coartem_yellow'),
_('stock_coartem_blue'),
]
},
{
'section': _('mc_section_validation'),
'columns': [
_('patients_given_pneumonia_meds'),
_('patients_given_diarrhoea_meds'),
_('patients_given_malaria_meds'),
_('patients_correctly_referred'),
_('cases_rdt_not_done'),
_('cases_danger_signs_not_referred'),
_('cases_no_malaria_meds'),
]
},
]
HF_WEEKLY_REPORT = [
{
'section': _('mc_section_home_visits'),
'total_column': _('home_visits_total'),
'columns': [
_('home_visits_newborn'),
_('home_visits_children'),
_('home_visits_adult'),
]
},
{
'section': _('mc_section_transfers'),
'columns': [
_('cases_transferred'),
_('home_visits_followup'),
_('patients_given_pneumonia_meds'),
_('patients_given_diarrhoea_meds'),
_('patients_given_malaria_meds'),
_('patients_correctly_referred'),
_('cases_rdt_not_done'),
]
},
]
# for now this is just a lookup for translations
HF_WEEKLY_MESSAGES = {
'msg_children': _('Congratulations! This APE has visited {number} children this week. Call and congratulate them! Please help other supervisors learn from your success.'),
'msg_pneumonia': _('This APE has treated {number} of patients with the incorrect medicine for pneumonia. Please contact him/her and find out why and provide supportive supervision on use of amoxicillin.'),
'msg_diarrhoea': _('This APE has treated {number} of patients with the incorrect medicine for diarrhoea. Please contact them and find out why and provide supportive supervision on use of zinc and ORS.'),
'msg_malaria': _('This APE has treated {number} of patients with the incorrect medicine for malaria. Please contact them and find out why and provide supportive supervision on use of Coartem and Paracetamol.'),
'msg_good_referrals': _('Congratulations! This APE has correctly referred all children they visited this week. Call those APEs to congratulate them! Please help other supervisors learn from your success.'),
'msg_bad_referrals': _('This APE incorrectly referred {number} patients they visited this week. Please contact them and find out why and provide supportive supervision on correct referral.'),
'msg_rdt': _('This APE did not perform a RDT on {number} patients with fever this week. Please contact them and find out why and provide supportive supervision on when to perform a RDT.'),
}
| bsd-3-clause |
slisson/intellij-community | plugins/hg4idea/testData/bin/mercurial/statichttprepo.py | 91 | 5175 | # statichttprepo.py - simple http repository class for mercurial
#
# This provides read-only repo access to repositories exported via static http
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import changelog, byterange, url, error
import localrepo, manifest, util, scmutil, store
import urllib, urllib2, errno, os
class httprangereader(object):
def __init__(self, url, opener):
# we assume opener has HTTPRangeHandler
self.url = url
self.pos = 0
self.opener = opener
self.name = url
def seek(self, pos):
self.pos = pos
def read(self, bytes=None):
req = urllib2.Request(self.url)
end = ''
if bytes:
end = self.pos + bytes - 1
if self.pos or end:
req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
try:
f = self.opener.open(req)
data = f.read()
# Python 2.6+ defines a getcode() function, and 2.4 and
# 2.5 appear to always have an undocumented code attribute
# set. If we can't read either of those, fall back to 206
# and hope for the best.
code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
except urllib2.HTTPError, inst:
num = inst.code == 404 and errno.ENOENT or None
raise IOError(num, inst)
except urllib2.URLError, inst:
raise IOError(None, inst.reason[1])
if code == 200:
# HTTPRangeHandler does nothing if remote does not support
# Range headers and returns the full entity. Let's slice it.
if bytes:
data = data[self.pos:self.pos + bytes]
else:
data = data[self.pos:]
elif bytes:
data = data[:bytes]
self.pos += len(data)
return data
def __iter__(self):
return iter(self.read().splitlines(1))
def close(self):
pass
def build_opener(ui, authinfo):
# urllib cannot handle URLs with embedded user or passwd
urlopener = url.opener(ui, authinfo)
urlopener.add_handler(byterange.HTTPRangeHandler())
class statichttpvfs(scmutil.abstractvfs):
def __init__(self, base):
self.base = base
def __call__(self, path, mode="r", atomictemp=None):
if mode not in ('r', 'rb'):
raise IOError('Permission denied')
f = "/".join((self.base, urllib.quote(path)))
return httprangereader(f, urlopener)
def join(self, path):
if path:
return os.path.join(self.base, path)
else:
return self.base
return statichttpvfs
class statichttppeer(localrepo.localpeer):
def local(self):
return None
def canpush(self):
return False
class statichttprepository(localrepo.localrepository):
def __init__(self, ui, path):
self._url = path
self.ui = ui
self.root = path
u = util.url(path.rstrip('/') + "/.hg")
self.path, authinfo = u.authinfo()
opener = build_opener(ui, authinfo)
self.opener = opener(self.path)
self.vfs = self.opener
self._phasedefaults = []
try:
requirements = scmutil.readrequires(self.opener, self.supported)
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
requirements = set()
# check if it is a non-empty old-style repository
try:
fp = self.opener("00changelog.i")
fp.read(1)
fp.close()
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
# we do not care about empty old-style repositories here
msg = _("'%s' does not appear to be an hg repository") % path
raise error.RepoError(msg)
# setup store
self.store = store.store(requirements, self.path, opener)
self.spath = self.store.path
self.sopener = self.store.opener
self.svfs = self.sopener
self.sjoin = self.store.join
self._filecache = {}
self.requirements = requirements
self.manifest = manifest.manifest(self.sopener)
self.changelog = changelog.changelog(self.sopener)
self._tags = None
self.nodetagscache = None
self._branchcaches = {}
self.encodepats = None
self.decodepats = None
def _restrictcapabilities(self, caps):
return caps.difference(["pushkey"])
def url(self):
return self._url
def local(self):
return False
def peer(self):
return statichttppeer(self)
def lock(self, wait=True):
raise util.Abort(_('cannot lock static-http repository'))
def instance(ui, path, create):
if create:
raise util.Abort(_('cannot create new static-http repository'))
return statichttprepository(ui, path[7:])
| apache-2.0 |
t0x1cigyt/Illinil | Illinil.py | 1 | 52419 | #!/usr/bin/python
import os
import sys, traceback
def main():
try:
print ('''
$$$$$$\ $$\ $$\ $$\ $$\ $$\
\_$$ _|$$ |$$ |\__| \__|$$ |
$$ | $$ |$$ |$$\ $$$$$$$\ $$\ $$ |
$$ | $$ |$$ |$$ |$$ __$$\ $$ |$$ |
$$ | $$ |$$ |$$ |$$ | $$ |$$ |$$ |
$$ | $$ |$$ |$$ |$$ | $$ |$$ |$$ |
$$$$$$\ $$ |$$ |$$ |$$ | $$ |$$ |$$ |
\______|\__|\__|\__|\__| \__|\__|\__|
V1.1 \033[1;m
\033[1;32m+ -- -- +=[ Developer: t0x1c | Homepage: www.instagram.com/_t0x1c\033[1;m
\033[1;32m+ -- -- +=[ 331 Tools \033[1;m
\033[1;91m[W] Before updating your system , please remove all Kali-linux repositories to avoid any kind of problem .\033[1;m
''')
def inicio1():
while True:
print ('''
1) Add Kali repositories & Update
2) View Categories
3) Install classicmenu indicator
4) Install Kali menu
5) Help
''')
opcion0 = raw_input("\033[1;36mkat > \033[1;m")
while opcion0 == "1":
print ('''
1) Add kali linux repositories
2) Update
3) Remove all kali linux repositories
4) View the contents of sources.list file
''')
repo = raw_input("\033[1;32mWhat do you want to do ?> \033[1;m")
if repo == "1":
cmd1 = os.system("apt-key adv --keyserver pgp.mit.edu --recv-keys ED444FF07D8D0BF6")
cmd2 = os.system("echo '# Kali linux repositories | Added by Katoolin\ndeb http://http.kali.org/kali kali-rolling main contrib non-free' >> /etc/apt/sources.list")
elif repo == "2":
cmd3 = os.system("apt-get update -m")
elif repo == "3":
infile = "/etc/apt/sources.list"
outfile = "/etc/apt/sources.list"
delete_list = ["# Kali linux repositories | Added by Katoolin\n", "deb http://http.kali.org/kali kali-rolling main contrib non-free\n"]
fin = open(infile)
os.remove("/etc/apt/sources.list")
fout = open(outfile, "w+")
for line in fin:
for word in delete_list:
line = line.replace(word, "")
fout.write(line)
fin.close()
fout.close()
print ("\033[1;31m\nAll kali linux repositories have been deleted !\n\033[1;m")
elif repo == "back":
inicio1()
elif repo == "gohome":
inicio1()
elif repo == "4":
file = open('/etc/apt/sources.list', 'r')
print (file.read())
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
if opcion0 == "3":
print ('''
ClassicMenu Indicator is a notification area applet (application indicator) for the top panel of Ubuntu's Unity desktop environment.
It provides a simple way to get a classic GNOME-style application menu for those who prefer this over the Unity dash menu.
Like the classic GNOME menu, it includes Wine games and applications if you have those installed.
For more information , please visit : http://www.florian-diesch.de/software/classicmenu-indicator/
''')
repo = raw_input("\033[1;32mDo you want to install classicmenu indicator ? [y/n]> \033[1;m")
if repo == "y":
cmd1 = os.system("add-apt-repository ppa:diesch/testing && apt-get update")
cmd = os.system("sudo apt-get install classicmenu-indicator")
elif opcion0 == "4" :
repo = raw_input("\033[1;32mDo you want to install Kali menu ? [y/n]> \033[1;m")
if repo == "y":
cmd1 = os.system("apt-get install kali-menu")
elif opcion0 == "5":
print ('''
****************** +Commands+ ******************
\033[1;32mback\033[1;m \033[1;33mGo back\033[1;m
\033[1;32mgohome\033[1;m \033[1;33mGo to the main menu\033[1;m
''')
def inicio():
while opcion0 == "2":
print ('''
\033[1;36m**************************** All Categories *****************************\033[1;m
1) Information Gathering 8) Exploitation Tools
2) Vulnerability Analysis 9) Forensics Tools
3) Wireless Attacks 10) Stress Testing
4) Web Applications 11) Password Attacks
5) Sniffing & Spoofing 12) Reverse Engineering
6) Maintaining Access 13) Hardware Hacking
7) Reporting Tools 14) Extra
0) All
''')
print ("\033[1;32mSelect a category or press (0) to install all Kali linux tools .\n\033[1;m")
opcion1 = raw_input("\033[1;36mkat > \033[1;m")
if opcion1 == "back":
inicio1()
elif opcion1 == "gohome":
inicio1()
elif opcion1 == "0":
cmd = os.system("apt-get -f install acccheck ace-voip amap automater braa casefile cdpsnarf cisco-torch cookie-cadger copy-router-config dmitry dnmap dnsenum dnsmap dnsrecon dnstracer dnswalk dotdotpwn enum4linux enumiax exploitdb fierce firewalk fragroute fragrouter ghost-phisher golismero goofile lbd maltego-teeth masscan metagoofil miranda nmap p0f parsero recon-ng set smtp-user-enum snmpcheck sslcaudit sslsplit sslstrip sslyze thc-ipv6 theharvester tlssled twofi urlcrazy wireshark wol-e xplico ismtp intrace hping3 bbqsql bed cisco-auditing-tool cisco-global-exploiter cisco-ocs cisco-torch copy-router-config doona dotdotpwn greenbone-security-assistant hexorbase jsql lynis nmap ohrwurm openvas-cli openvas-manager openvas-scanner oscanner powerfuzzer sfuzz sidguesser siparmyknife sqlmap sqlninja sqlsus thc-ipv6 tnscmd10g unix-privesc-check yersinia aircrack-ng asleap bluelog blueranger bluesnarfer bully cowpatty crackle eapmd5pass fern-wifi-cracker ghost-phisher giskismet gqrx kalibrate-rtl killerbee kismet mdk3 mfcuk mfoc mfterm multimon-ng pixiewps reaver redfang spooftooph wifi-honey wifitap wifite apache-users arachni bbqsql blindelephant burpsuite cutycapt davtest deblaze dirb dirbuster fimap funkload grabber jboss-autopwn joomscan jsql maltego-teeth padbuster paros parsero plecost powerfuzzer proxystrike recon-ng skipfish sqlmap sqlninja sqlsus ua-tester uniscan vega w3af webscarab websploit wfuzz wpscan xsser zaproxy burpsuite dnschef fiked hamster-sidejack hexinject iaxflood inviteflood ismtp mitmproxy ohrwurm protos-sip rebind responder rtpbreak rtpinsertsound rtpmixsound sctpscan siparmyknife sipp sipvicious sniffjoke sslsplit sslstrip thc-ipv6 voiphopper webscarab wifi-honey wireshark xspy yersinia zaproxy cryptcat cymothoa dbd dns2tcp http-tunnel httptunnel intersect nishang polenum powersploit pwnat ridenum sbd u3-pwn webshells weevely casefile cutycapt dos2unix dradis keepnote magictree metagoofil nipper-ng pipal armitage backdoor-factory cisco-auditing-tool cisco-global-exploiter cisco-ocs cisco-torch crackle jboss-autopwn linux-exploit-suggester maltego-teeth set shellnoob sqlmap thc-ipv6 yersinia beef-xss binwalk bulk-extractor chntpw cuckoo dc3dd ddrescue dumpzilla extundelete foremost galleta guymager iphone-backup-analyzer p0f pdf-parser pdfid pdgmail peepdf volatility xplico dhcpig funkload iaxflood inviteflood ipv6-toolkit mdk3 reaver rtpflood slowhttptest t50 termineter thc-ipv6 thc-ssl-dos acccheck burpsuite cewl chntpw cisco-auditing-tool cmospwd creddump crunch findmyhash gpp-decrypt hash-identifier hexorbase john johnny keimpx maltego-teeth maskprocessor multiforcer ncrack oclgausscrack pack patator polenum rainbowcrack rcracki-mt rsmangler statsprocessor thc-pptp-bruter truecrack webscarab wordlists zaproxy apktool dex2jar python-distorm3 edb-debugger jad javasnoop jd ollydbg smali valgrind yara android-sdk apktool arduino dex2jar sakis3g smali && wget http://www.morningstarsecurity.com/downloads/bing-ip2hosts-0.4.tar.gz && tar -xzvf bing-ip2hosts-0.4.tar.gz && cp bing-ip2hosts-0.4/bing-ip2hosts /usr/local/bin/")
while opcion1 == "1":
print ('''
\033[1;36m=+[ Information Gathering\033[1;m
1) acccheck 30) lbd
2) ace-voip 31) Maltego Teeth
3) Amap 32) masscan
4) Automater 33) Metagoofil
5) bing-ip2hosts 34) Miranda
6) braa 35) Nmap
7) CaseFile 36) ntop
8) CDPSnarf 37) p0f
9) cisco-torch 38) Parsero
10) Cookie Cadger 39) Recon-ng
11) copy-router-config 40) SET
12) DMitry 41) smtp-user-enum
13) dnmap 42) snmpcheck
14) dnsenum 43) sslcaudit
15) dnsmap 44) SSLsplit
16) DNSRecon 45) sslstrip
17) dnstracer 46) SSLyze
18) dnswalk 47) THC-IPV6
19) DotDotPwn 48) theHarvester
20) enum4linux 49) TLSSLed
21) enumIAX 50) twofi
22) exploitdb 51) URLCrazy
23) Fierce 52) Wireshark
24) Firewalk 53) WOL-E
25) fragroute 54) Xplico
26) fragrouter 55) iSMTP
27) Ghost Phisher 56) InTrace
28) GoLismero 57) hping3
29) goofile
0) Install all Information Gathering tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install acccheck")
elif opcion2 == "2":
cmd = os.system("apt-get install ace-voip")
elif opcion2 == "3":
cmd = os.system("apt-get install amap")
elif opcion2 == "4":
cmd = os.system("apt-get install automater")
elif opcion2 == "5":
cmd = os.system("wget http://www.morningstarsecurity.com/downloads/bing-ip2hosts-0.4.tar.gz && tar -xzvf bing-ip2hosts-0.4.tar.gz && cp bing-ip2hosts-0.4/bing-ip2hosts /usr/local/bin/")
elif opcion2 == "6":
cmd = os.system("apt-get install braa")
elif opcion2 == "7":
cmd = os.system("apt-get install casefile")
elif opcion2 == "8":
cmd = os.system("apt-get install cdpsnarf")
elif opcion2 == "9":
cmd = os.system("apt-get install cisco-torch")
elif opcion2 == "10":
cmd = os.system("apt-get install cookie-cadger")
elif opcion2 == "11":
cmd = os.system("apt-get install copy-router-config")
elif opcion2 == "12":
cmd = os.system("apt-get install dmitry")
elif opcion2 == "13":
cmd = os.system("apt-get install dnmap")
elif opcion2 == "14":
cmd = os.system("apt-get install dnsenum")
elif opcion2 == "15":
cmd = os.system("apt-get install dnsmap")
elif opcion2 == "16":
cmd = os.system("apt-get install dnsrecon")
elif opcion2 == "17":
cmd = os.system("apt-get install dnstracer")
elif opcion2 == "18":
cmd = os.system("apt-get install dnswalk")
elif opcion2 == "19":
cmd = os.system("apt-get install dotdotpwn")
elif opcion2 == "20":
cmd = os.system("apt-get install enum4linux")
elif opcion2 == "21":
cmd = os.system("apt-get install enumiax")
elif opcion2 == "22":
cmd = os.system("apt-get install exploitdb")
elif opcion2 == "23":
cmd = os.system("apt-get install fierce")
elif opcion2 == "24":
cmd = os.system("apt-get install firewalk")
elif opcion2 == "25":
cmd = os.system("apt-get install fragroute")
elif opcion2 == "26":
cmd = os.system("apt-get install fragrouter")
elif opcion2 == "27":
cmd = os.system("apt-get install ghost-phisher")
elif opcion2 == "28":
cmd = os.system("apt-get install golismero")
elif opcion2 == "29":
cmd = os.system("apt-get install goofile")
elif opcion2 == "30":
cmd = os.system("apt-get install lbd")
elif opcion2 == "31":
cmd = os.system("apt-get install maltego-teeth")
elif opcion2 == "32":
cmd = os.system("apt-get install masscan")
elif opcion2 == "33":
cmd = os.system("apt-get install metagoofil")
elif opcion2 == "34":
cmd = os.system("apt-get install miranda")
elif opcion2 == "35":
cmd = os.system("apt-get install nmap")
elif opcion2 == "36":
print ('ntop is unavailable')
elif opcion2 == "37":
cmd = os.system("apt-get install p0f")
elif opcion2 == "38":
cmd = os.system("apt-get install parsero")
elif opcion2 == "39":
cmd = os.system("apt-get install recon-ng")
elif opcion2 == "40":
cmd = os.system("apt-get install set")
elif opcion2 == "41":
cmd = os.system("apt-get install smtp-user-enum")
elif opcion2 == "42":
cmd = os.system("apt-get install snmpcheck")
elif opcion2 == "43":
cmd = os.system("apt-get install sslcaudit")
elif opcion2 == "44":
cmd = os.system("apt-get install sslsplit")
elif opcion2 == "45":
cmd = os.system("apt-get install sslstrip")
elif opcion2 == "46":
cmd = os.system("apt-get install sslyze")
elif opcion2 == "47":
cmd = os.system("apt-get install thc-ipv6")
elif opcion2 == "48":
cmd = os.system("apt-get install theharvester")
elif opcion2 == "49":
cmd = os.system("apt-get install tlssled")
elif opcion2 == "50":
cmd = os.system("apt-get install twofi")
elif opcion2 == "51":
cmd = os.system("apt-get install urlcrazy")
elif opcion2 == "52":
cmd = os.system("apt-get install wireshark")
elif opcion2 == "53":
cmd = os.system("apt-get install wol-e")
elif opcion2 == "54":
cmd = os.system("apt-get install xplico")
elif opcion2 == "55":
cmd = os.system("apt-get install ismtp")
elif opcion2 == "56":
cmd = os.system("apt-get install intrace")
elif opcion2 == "57":
cmd = os.system("apt-get install hping3")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y acccheck ace-voip amap automater braa casefile cdpsnarf cisco-torch cookie-cadger copy-router-config dmitry dnmap dnsenum dnsmap dnsrecon dnstracer dnswalk dotdotpwn enum4linux enumiax exploitdb fierce firewalk fragroute fragrouter ghost-phisher golismero goofile lbd maltego-teeth masscan metagoofil miranda nmap p0f parsero recon-ng set smtp-user-enum snmpcheck sslcaudit sslsplit sslstrip sslyze thc-ipv6 theharvester tlssled twofi urlcrazy wireshark wol-e xplico ismtp intrace hping3 && wget http://www.morningstarsecurity.com/downloads/bing-ip2hosts-0.4.tar.gz && tar -xzvf bing-ip2hosts-0.4.tar.gz && cp bing-ip2hosts-0.4/bing-ip2hosts /usr/local/bin/")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "2":
print ('''
\033[1;36m=+[ Vulnerability Analysis\033[1;m
1) BBQSQL 18) Nmap
2) BED 19)ohrwurm
3) cisco-auditing-tool 20) openvas-administrator
4) cisco-global-exploiter 21) openvas-cli
5) cisco-ocs 22) openvas-manager
6) cisco-torch 23) openvas-scanner
7) copy-router-config 24) Oscanner
8) commix 25) Powerfuzzer
9) DBPwAudit 26) sfuzz
10) DoonaDot 27) SidGuesser
11) DotPwn 28) SIPArmyKnife
12) Greenbone Security Assistant 29) sqlmap
13) GSD 30) Sqlninja
14) HexorBase 31) sqlsus
15) Inguma 32) THC-IPV6
16) jSQL 33) tnscmd10g
17) Lynis 34) unix-privesc-check
35) Yersinia
0) Install all Vulnerability Analysis tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install bbqsql")
elif opcion2 == "2":
cmd = os.system("apt-get install bed")
elif opcion2 == "3":
cmd = os.system("apt-get install cisco-auditing-tool")
elif opcion2 == "4":
cmd = os.system("apt-get install cisco-global-exploiter")
elif opcion2 == "5":
cmd = os.system("apt-get install cisco-ocs")
elif opcion2 == "6":
cmd = os.system("apt-get install cisco-torch")
elif opcion2 == "7":
cmd = os.system("apt-get install copy-router-config")
elif opcion2 == "8":
cmd = os.system("apt-get install git && git clone https://github.com/stasinopoulos/commix.git commix && cd commix && python ./commix.py --install")
elif opcion2 == "9":
cmd = os.system("echo 'download page : http://www.cqure.net/wp/tools/database/dbpwaudit/'")
elif opcion2 == "10":
cmd = os.system("apt-get install doona")
elif opcion2 == "11":
cmd = os.system("apt-get install dotdotpwn")
elif opcion2 == "12":
cmd = os.system("apt-get install greenbone-security-assistant")
elif opcion2 == "13":
cmd = os.system("apt-get install git && git clone git://git.kali.org/packages/gsd.git")
elif opcion2 == "14":
cmd = os.system("apt-get install hexorbase")
elif opcion2 == "15":
print ("Please download inguma from : http://inguma.sourceforge.net")
elif opcion2 == "16":
cmd = os.system("apt-get install jsql")
elif opcion2 == "17":
cmd = os.system("apt-get install lynis")
elif opcion2 == "18":
cmd = os.system("apt-get install nmap")
elif opcion2 == "19":
cmd = os.system("apt-get install ohrwurm")
elif opcion2 == "20":
cmd = os.system("apt-get install openvas-administrator")
elif opcion2 == "21":
cmd = os.system("apt-get install openvas-cli")
elif opcion2 == "22":
cmd = os.system("apt-get install openvas-manager")
elif opcion2 == "23":
cmd = os.system("apt-get install openvas-scanner")
elif opcion2 == "24":
cmd = os.system("apt-get install oscanner")
elif opcion2 == "25":
cmd = os.system("apt-get install powerfuzzer")
elif opcion2 == "26":
cmd = os.system("apt-get install sfuzz")
elif opcion2 == "27":
cmd = os.system("apt-get install sidguesser")
elif opcion2 == "28":
cmd = os.system("apt-get install siparmyknife")
elif opcion2 == "29":
cmd = os.system("apt-get install sqlmap")
elif opcion2 == "30":
cmd = os.system("apt-get install sqlninja")
elif opcion2 == "31":
cmd = os.system("apt-get install sqlsus")
elif opcion2 == "32":
cmd = os.system("apt-get install thc-ipv6")
elif opcion2 == "33":
cmd = os.system("apt-get install tnscmd10g")
elif opcion2 == "34":
cmd = os.system("apt-get install unix-privesc-check")
elif opcion2 == "35":
cmd = os.system("apt-get install yersinia")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y bbqsql bed cisco-auditing-tool cisco-global-exploiter cisco-ocs cisco-torch copy-router-config doona dotdotpwn greenbone-security-assistant hexorbase jsql lynis nmap ohrwurm openvas-cli openvas-manager openvas-scanner oscanner powerfuzzer sfuzz sidguesser siparmyknife sqlmap sqlninja sqlsus thc-ipv6 tnscmd10g unix-privesc-check yersinia")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "3":
print ('''
\033[1;36m=+[ Wireless Attacks\033[1;m
1) Aircrack-ng 17) kalibrate-rtl
2) Asleap 18) KillerBee
3) Bluelog 19) Kismet
4) BlueMaho 20) mdk3
5) Bluepot 21) mfcuk
6) BlueRanger 22) mfoc
7) Bluesnarfer 23) mfterm
8) Bully 24) Multimon-NG
9) coWPAtty 25) PixieWPS
10) crackle 26) Reaver
11) eapmd5pass 27) redfang
12) Fern Wifi Cracker 28) RTLSDR Scanner
13) Ghost Phisher 29) Spooftooph
14) GISKismet 30) Wifi Honey 31) Wifitap
16) gr-scan 32) Wifite
0) Install all Wireless Attacks tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install aircrack-ng")
elif opcion2 == "2":
cmd = os.system("apt-get install asleap")
elif opcion2 == "3":
cmd = os.system("apt-get install bluelog")
elif opcion2 == "4":
cmd = os.system("apt-get install git && git clone git://git.kali.org/packages/bluemaho.git")
elif opcion2 == "5":
cmd = os.system("apt-get install git && git clone git://git.kali.org/packages/bluepot.git")
elif opcion2 == "6":
cmd = os.system("apt-get install blueranger")
elif opcion2 == "7":
cmd = os.system("apt-get install bluesnarfer")
elif opcion2 == "8":
cmd = os.system("apt-get install bully")
elif opcion2 == "9":
cmd = os.system("apt-get install cowpatty")
elif opcion2 == "10":
cmd = os.system("apt-get install crackle")
elif opcion2 == "11":
cmd = os.system("apt-get install eapmd5pass")
elif opcion2 == "12":
cmd = os.system("apt-get install fern-wifi-cracker")
elif opcion2 == "13":
cmd = os.system("apt-get install ghost-phisher")
elif opcion2 == "14":
cmd = os.system("apt-get install giskismet")
elif opcion2 == "16":
cmd = os.system("apt-get install git && git clone git://git.kali.org/packages/gr-scan.git")
elif opcion2 == "17":
cmd = os.system("apt-get install kalibrate-rtl")
elif opcion2 == "18":
cmd = os.system("apt-get install killerbee")
elif opcion2 == "19":
cmd = os.system("apt-get install kismet")
elif opcion2 == "20":
cmd = os.system("apt-get install mdk3")
elif opcion2 == "21":
cmd = os.system("apt-get install mfcuk")
elif opcion2 == "22":
cmd = os.system("apt-get install mfoc")
elif opcion2 == "23":
cmd = os.system("apt-get install mfterm")
elif opcion2 == "24":
cmd = os.system("apt-get install multimon-ng")
elif opcion2 == "25":
cmd = os.system("apt-get install pixiewps")
elif opcion2 == "26":
cmd = os.system("apt-get install reaver")
elif opcion2 == "27":
cmd = os.system("apt-get install redfang")
elif opcion2 == "28":
cmd = os.system("apt-get install rtlsdr-scanner")
elif opcion2 == "29":
cmd = os.system("apt-get install spooftooph")
elif opcion2 == "30":
cmd = os.system("apt-get install wifi-honey")
elif opcion2 == "31":
cmd = os.system("apt-get install wifitap")
elif opcion2 == "32":
cmd = os.system("apt-get install wifite")
elif opcion2 == "0":
cmd = os.system("apt-get install -y aircrack-ng asleap bluelog blueranger bluesnarfer bully cowpatty crackle eapmd5pass fern-wifi-cracker ghost-phisher giskismet gqrx kalibrate-rtl killerbee kismet mdk3 mfcuk mfoc mfterm multimon-ng pixiewps reaver redfang spooftooph wifi-honey wifitap wifite")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "4":
print ('''
\033[1;36m=+[ Web Applications\033[1;m
1) apache-users 21) Parsero
2) Arachni 22) plecost
3) BBQSQL 23) Powerfuzzer
4) BlindElephant 24) ProxyStrike
5) Burp Suite 25) Recon-ng
6) commix 26) Skipfish
7) CutyCapt 27) sqlmap
8) DAVTest 28) Sqlninja
9) deblaze 29) sqlsus
10) DIRB 30) ua-tester
11) DirBuster 31) Uniscan
12) fimap 32) Vega
13) FunkLoad 33) w3af
14) Grabber 34) WebScarab
15) jboss-autopwn 35) Webshag
16) joomscan 36) WebSlayer
17) jSQL 37) WebSploit
18) Maltego Teeth 38) Wfuzz
19) PadBuster 39) WPScan
20) Paros 40) XSSer
41) zaproxy
0) Install all Web Applications tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install apache-users")
elif opcion2 == "2":
cmd = os.system("apt-get install arachni")
elif opcion2 == "3":
cmd = os.system("apt-get install bbqsql")
elif opcion2 == "4":
cmd = os.system("apt-get install blindelephant")
elif opcion2 == "5":
cmd = os.system("apt-get install burpsuite")
elif opcion2 == "6":
cmd = os.system("apt-get install cutycapt")
elif opcion2 == "7":
cmd = os.system("apt-get install git && git clone https://github.com/stasinopoulos/commix.git commix && cd commix && python ./commix.py --install")
elif opcion2 == "8":
cmd = os.system("apt-get install davtest")
elif opcion2 == "9":
cmd = os.system("apt-get install deblaze")
elif opcion2 == "10":
cmd = os.system("apt-get install dirb")
elif opcion2 == "11":
cmd = os.system("apt-get install dirbuster")
elif opcion2 == "12":
cmd = os.system("apt-get install fimap")
elif opcion2 == "13":
cmd = os.system("apt-get install funkload")
elif opcion2 == "14":
cmd = os.system("apt-get install grabber")
elif opcion2 == "15":
cmd = os.system("apt-get install jboss-autopwn")
elif opcion2 == "16":
cmd = os.system("apt-get install joomscan")
elif opcion2 == "17":
cmd = os.system("apt-get install jsql")
elif opcion2 == "18":
cmd = os.system("apt-get install maltego-teeth")
elif opcion2 == "19":
cmd = os.system("apt-get install padbuster")
elif opcion2 == "20":
cmd = os.system("apt-get install paros")
elif opcion2 == "21":
cmd = os.system("apt-get install parsero")
elif opcion2 == "22":
cmd = os.system("apt-get install plecost")
elif opcion2 == "23":
cmd = os.system("apt-get install powerfuzzer")
elif opcion2 == "24":
cmd = os.system("apt-get install proxystrike")
elif opcion2 == "25":
cmd = os.system("apt-get install recon-ng")
elif opcion2 == "26":
cmd = os.system("apt-get install skipfish")
elif opcion2 == "27":
cmd = os.system("apt-get install sqlmap")
elif opcion2 == "28":
cmd = os.system("apt-get install sqlninja")
elif opcion2 == "29":
cmd = os.system("apt-get install sqlsus")
elif opcion2 == "30":
cmd = os.system("apt-get install ua-tester")
elif opcion2 == "31":
cmd = os.system("apt-get install uniscan")
elif opcion2 == "32":
cmd = os.system("apt-get install vega")
elif opcion2 == "33":
cmd = os.system("apt-get install w3af")
elif opcion2 == "34":
cmd = os.system("apt-get install webscarab")
elif opcion2 == "35":
print ("Webshag is unavailable")
elif opcion2 == "36":
cmd = os.system("apt-get install git && git clone git://git.kali.org/packages/webslayer.git")
elif opcion2 == "37":
cmd = os.system("apt-get install websploit")
elif opcion2 == "38":
cmd = os.system("apt-get install wfuzz")
elif opcion2 == "39":
cmd = os.system("apt-get install wpscan")
elif opcion2 == "40":
cmd = os.system("apt-get install xsser")
elif opcion2 == "41":
cmd = os.system("apt-get install zaproxy")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y apache-users arachni bbqsql blindelephant burpsuite cutycapt davtest deblaze dirb dirbuster fimap funkload grabber jboss-autopwn joomscan jsql maltego-teeth padbuster paros parsero plecost powerfuzzer proxystrike recon-ng skipfish sqlmap sqlninja sqlsus ua-tester uniscan vega w3af webscarab websploit wfuzz wpscan xsser zaproxy")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "5":
print ('''
\033[1;36m=+[ Sniffing & Spoofing\033[1;m
1) Burp Suite 17) rtpmixsound
2) DNSChef 18) sctpscan
3) fiked 19) SIPArmyKnife
4) hamster-sidejack 20) SIPp
5) HexInject 21) SIPVicious
6) iaxflood 22) SniffJoke
7) inviteflood 23) SSLsplit
8) iSMTP 24) sslstrip
9) isr-evilgrade 25) THC-IPV6
10) mitmproxy 26) VoIPHopper
11) ohrwurm 27) WebScarab
12) protos-sip 28) Wifi Honey
13) rebind 29) Wireshark
14) responder 30) xspy
15) rtpbreak 31) Yersinia
16) rtpinsertsound 32) zaproxy
0) Install all Sniffing & Spoofing tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install burpsuite")
elif opcion2 == "2":
cmd = os.system("apt-get install dnschef")
elif opcion2 == "3":
cmd = os.system("apt-get install fiked")
elif opcion2 == "4":
cmd = os.system("apt-get install hamster-sidejack")
elif opcion2 == "5":
cmd = os.system("apt-get install hexinject")
elif opcion2 == "6":
cmd = os.system("apt-get install iaxflood")
elif opcion2 == "7":
cmd = os.system("apt-get install inviteflood")
elif opcion2 == "8":
cmd = os.system("apt-get install ismtp")
elif opcion2 == "9":
cmd = os.system("apt-get install git && git clone git://git.kali.org/packages/isr-evilgrade.git")
elif opcion2 == "10":
cmd = os.system("apt-get install mitmproxy")
elif opcion2 == "11":
cmd = os.system("apt-get install ohrwurm")
elif opcion2 == "12":
cmd = os.system("apt-get install protos-sip")
elif opcion2 == "13":
cmd = os.system("apt-get install rebind")
elif opcion2 == "14":
cmd = os.system("apt-get install responder")
elif opcion2 == "15":
cmd = os.system("apt-get install rtpbreak")
elif opcion2 == "16":
cmd = os.system("apt-get install rtpinsertsound")
elif opcion2 == "17":
cmd = os.system("apt-get install rtpmixsound")
elif opcion2 == "18":
cmd = os.system("apt-get install sctpscan")
elif opcion2 == "19":
cmd = os.system("apt-get install siparmyknife")
elif opcion2 == "20":
cmd = os.system("apt-get install sipp")
elif opcion2 == "21":
cmd = os.system("apt-get install sipvicious")
elif opcion2 == "22":
cmd = os.system("apt-get install sniffjoke")
elif opcion2 == "23":
cmd = os.system("apt-get install sslsplit")
elif opcion2 == "24":
cmd = os.system("apt-get install sslstrip")
elif opcion2 == "25":
cmd = os.system("apt-get install thc-ipv6")
elif opcion2 == "26":
cmd = os.system("apt-get install voiphopper")
elif opcion2 == "27":
cmd = os.system("apt-get install webscarab")
elif opcion2 == "28":
cmd = os.system("apt-get install wifi-honey")
elif opcion2 == "29":
cmd = os.system("apt-get install wireshark")
elif opcion2 == "30":
cmd = os.system("apt-get install xspy")
elif opcion2 == "31":
cmd = os.system("apt-get install yersinia")
elif opcion2 == "32":
cmd = os.system("apt-get install zaproxy")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y burpsuite dnschef fiked hamster-sidejack hexinject iaxflood inviteflood ismtp mitmproxy ohrwurm protos-sip rebind responder rtpbreak rtpinsertsound rtpmixsound sctpscan siparmyknife sipp sipvicious sniffjoke sslsplit sslstrip thc-ipv6 voiphopper webscarab wifi-honey wireshark xspy yersinia zaproxy")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "6":
print ('''
\033[1;36m=+[ Maintaining Access\033[1;m
1) CryptCat
2) Cymothoa
3) dbd
4) dns2tcp
5) http-tunnel
6) HTTPTunnel
7) Intersect
8) Nishang
9) polenum
10) PowerSploit
11) pwnat
12) RidEnum
13) sbd
14) U3-Pwn
15) Webshells
16) Weevely
0) Install all Maintaining Access tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install cryptcat")
elif opcion2 == "2":
cmd = os.system("apt-get install cymothoa")
elif opcion2 == "3":
cmd = os.system("apt-get install dbd")
elif opcion2 == "4":
cmd = os.system("apt-get install dns2tcp")
elif opcion2 == "5":
cmd = os.system("apt-get install http-tunnel")
elif opcion2 == "6":
cmd = os.system("apt-get install httptunnel")
elif opcion2 == "7":
cmd = os.system("apt-get install intersect")
elif opcion2 == "8":
cmd = os.system("apt-get install nishang")
elif opcion2 == "9":
cmd = os.system("apt-get install polenum")
elif opcion2 == "10":
cmd = os.system("apt-get install powersploit")
elif opcion2 == "11":
cmd = os.system("apt-get install pwnat")
elif opcion2 == "12":
cmd = os.system("apt-get install ridenum")
elif opcion2 == "13":
cmd = os.system("apt-get install sbd")
elif opcion2 == "14":
cmd = os.system("apt-get install u3-pwn")
elif opcion2 == "15":
cmd = os.system("apt-get install webshells")
elif opcion2 == "16":
cmd = os.system("apt-get install weevely")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y cryptcat cymothoa dbd dns2tcp http-tunnel httptunnel intersect nishang polenum powersploit pwnat ridenum sbd u3-pwn webshells weevely")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "7":
print ('''
\033[1;36m=+[ Reporting Tools\033[1;m
1) CaseFile
2) CutyCapt
3) dos2unix
4) Dradis
5) KeepNote
6) MagicTree
7) Metagoofil
8) Nipper-ng
9) pipal
0) Install all Reporting Tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install casefile")
elif opcion2 == "2":
cmd = os.system("apt-get install cutycapt")
elif opcion2 == "3":
cmd = os.system("apt-get install dos2unix")
elif opcion2 == "4":
cmd = os.system("apt-get install dradis")
elif opcion2 == "5":
cmd = os.system("apt-get install keepnote")
elif opcion2 == "6":
cmd = os.system("apt-get install magictree")
elif opcion2 == "7":
cmd = os.system("apt-get install metagoofil")
elif opcion2 == "8":
cmd = os.system("apt-get install nipper-ng")
elif opcion2 == "9":
cmd = os.system("apt-get install pipal")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y casefile cutycapt dos2unix dradis keepnote magictree metagoofil nipper-ng pipal")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "8":
print ('''
\033[1;36m=+[ Exploitation Tools\033[1;m
1) Armitage
2) Backdoor Factory
3) BeEF
4) cisco-auditing-tool
5) cisco-global-exploiter
6) cisco-ocs
7) cisco-torch
8) commix
9) crackle
10) jboss-autopwn
11) Linux Exploit Suggester
12) Maltego Teeth
13) SET
14) ShellNoob
15) sqlmap
16) THC-IPV6
17) Yersinia
0) Install all Exploitation Tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install armitage")
elif opcion2 == "2":
cmd = os.system("apt-get install backdoor-factory")
elif opcion2 == "3":
cmd = os.system("apt-get install beef-xss")
elif opcion2 == "4":
cmd = os.system("apt-get install cisco-auditing-tool")
elif opcion2 == "5":
cmd = os.system("apt-get install cisco-global-exploiter")
elif opcion2 == "6":
cmd = os.system("apt-get install cisco-ocs")
elif opcion2 == "7":
cmd = os.system("apt-get install cisco-torch")
elif opcion2 == "8":
cmd = os.system("apt-get install git && git clone https://github.com/stasinopoulos/commix.git commix && cd commix && python ./commix.py --install")
elif opcion2 == "9":
cmd = os.system("apt-get install crackle")
elif opcion2 == "10":
cmd = os.system("apt-get install jboss-autopwn")
elif opcion2 == "11":
cmd = os.system("apt-get install linux-exploit-suggester")
elif opcion2 == "12":
cmd = os.system("apt-get install maltego-teeth")
elif opcion2 == "13":
cmd = os.system("apt-get install set")
elif opcion2 == "14":
cmd = os.system("apt-get install shellnoob")
elif opcion2 == "15":
cmd = os.system("apt-get install sqlmap")
elif opcion2 == "16":
cmd = os.system("apt-get install thc-ipv6")
elif opcion2 == "17":
cmd = os.system("apt-get install yersinia")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y armitage backdoor-factory cisco-auditing-tool cisco-global-exploiter cisco-ocs cisco-torch crackle jboss-autopwn linux-exploit-suggester maltego-teeth set shellnoob sqlmap thc-ipv6 yersinia beef-xss")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "9":
print ('''
\033[1;36m=+[ Forensics Tools\033[1;m
1) Binwalk 11) extundelete
2) bulk-extractor 12) Foremost
3) Capstone 13) Galleta
4) chntpw 14) Guymager
5) Cuckoo 15) iPhone Backup Analyzer
6) dc3dd 16) p0f
7) ddrescue 17) pdf-parser
8) DFF 18) pdfid
9) diStorm3 19) pdgmail
10) Dumpzilla 20) peepdf
21) RegRipper
22) Volatility
23) Xplico
0) Install all Forensics Tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install binwalk")
elif opcion2 == "2":
cmd = os.system("apt-get install bulk-extractor")
elif opcion2 == "3":
cmd = os.system("apt-get install git && git clone git://git.kali.org/packages/capstone.git")
elif opcion2 == "4":
cmd = os.system("apt-get install chntpw")
elif opcion2 == "5":
cmd = os.system("apt-get install cuckoo")
elif opcion2 == "6":
cmd = os.system("apt-get install dc3dd")
elif opcion2 == "7":
cmd = os.system("apt-get install ddrescue")
elif opcion2 == "8":
print ('dff is unavailable')
elif opcion2 == "9":
cmd = os.system("apt-get install git && git clone git://git.kali.org/packages/distorm3.git")
elif opcion2 == "10":
cmd = os.system("apt-get install dumpzilla")
elif opcion2 == "11":
cmd = os.system("apt-get install extundelete")
elif opcion2 == "12":
cmd = os.system("apt-get install foremost")
elif opcion2 == "13":
cmd = os.system("apt-get install galleta")
elif opcion2 == "14":
cmd = os.system("apt-get install guymager")
elif opcion2 == "15":
cmd = os.system("apt-get install iphone-backup-analyzer")
elif opcion2 == "16":
cmd = os.system("apt-get install p0f")
elif opcion2 == "17":
cmd = os.system("apt-get install pdf-parser")
elif opcion2 == "18":
cmd = os.system("apt-get install pdfid")
elif opcion2 == "19":
cmd = os.system("apt-get install pdgmail")
elif opcion2 == "20":
cmd = os.system("apt-get install peepdf")
elif opcion2 == "21":
print ("Regripper is unavailable")
elif opcion2 == "22":
cmd = os.system("apt-get install volatility")
elif opcion2 == "23":
cmd = os.system("apt-get install xplico")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y binwalk bulk-extractor chntpw cuckoo dc3dd ddrescue dumpzilla extundelete foremost galleta guymager iphone-backup-analyzer p0f pdf-parser pdfid pdgmail peepdf volatility xplico")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "10":
print ('''
\033[1;36m=+[ Stress Testing\033[1;m
1) DHCPig
2) FunkLoad
3) iaxflood
4) Inundator
5) inviteflood
6) ipv6-toolkit
7) mdk3
8) Reaver
9) rtpflood
10) SlowHTTPTest
11) t50
12) Termineter
13) THC-IPV6
14) THC-SSL-DOS
0) Install all Stress Testing tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install dhcpig")
elif opcion2 == "2":
cmd = os.system("apt-get install funkload")
elif opcion2 == "3":
cmd = os.system("apt-get install iaxflood")
elif opcion2 == "4":
cmd = os.system("apt-get install git && git clone git://git.kali.org/packages/inundator.git")
elif opcion2 == "5":
cmd = os.system("apt-get install inviteflood")
elif opcion2 == "6":
cmd = os.system("apt-get install ipv6-toolkit")
elif opcion2 == "7":
cmd = os.system("apt-get install mdk3")
elif opcion2 == "8":
cmd = os.system("apt-get install reaver")
elif opcion2 == "9":
cmd = os.system("apt-get install rtpflood")
elif opcion2 == "10":
cmd = os.system("apt-get install slowhttptest")
elif opcion2 == "11":
cmd = os.system("apt-get install t50")
elif opcion2 == "12":
cmd = os.system("apt-get install termineter")
elif opcion2 == "13":
cmd = os.system("apt-get install thc-ipv6")
elif opcion2 == "14":
cmd = os.system("apt-get install thc-ssl-dos ")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y dhcpig funkload iaxflood inviteflood ipv6-toolkit mdk3 reaver rtpflood slowhttptest t50 termineter thc-ipv6 thc-ssl-dos")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "11":
print ('''
\033[1;36m=+[ Password Attacks\033[1;m
1) acccheck 19) Maskprocessor
2) Burp Suite 20) multiforcer
3) CeWL 21) Ncrack
4) chntpw 22) oclgausscrack
5) cisco-auditing-tool 23) PACK
6) CmosPwd 24) patator
7) creddump 25) phrasendrescher
8) crunch 26) polenum
9) DBPwAudit 27) RainbowCrack
10) findmyhash 28) rcracki-mt
11) gpp-decrypt 29) RSMangler
12) hash-identifier 30) SQLdict
13) HexorBase 31) Statsprocessor
14) THC-Hydra 32) THC-pptp-bruter
15) John the Ripper 33) TrueCrack
16) Johnny 34) WebScarab
17) keimpx 35) wordlists
18) Maltego Teeth 36) zaproxy
0) Install all Password Attacks tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install acccheck")
elif opcion2 == "2":
cmd = os.system("apt-get install burpsuite")
elif opcion2 == "3":
cmd = os.system("apt-get install cewl")
elif opcion2 == "4":
cmd = os.system("apt-get install chntpw")
elif opcion2 == "5":
cmd = os.system("apt-get install cisco-auditing-tool")
elif opcion2 == "6":
cmd = os.system("apt-get install cmospwd")
elif opcion2 == "7":
cmd = os.system("apt-get install creddump")
elif opcion2 == "8":
cmd = os.system("apt-get install crunch")
elif opcion2 == "9":
cmd = os.system("apt-get install git && git clone git://git.kali.org/packages/dbpwaudit.git")
elif opcion2 == "10":
cmd = os.system("apt-get install findmyhash")
elif opcion2 == "11":
cmd = os.system("apt-get install gpp-decrypt")
elif opcion2 == "12":
cmd = os.system("apt-get install hash-identifier")
elif opcion2 == "13":
cmd = os.system("apt-get install hexorbase")
elif opcion2 == "14":
cmd = os.system("echo 'please visit : https://www.thc.org/thc-hydra/' ")
elif opcion2 == "15":
cmd = os.system("apt-get install john")
elif opcion2 == "16":
cmd = os.system("apt-get install johnny")
elif opcion2 == "17":
cmd = os.system("apt-get install keimpx")
elif opcion2 == "18":
cmd = os.system("apt-get install maltego-teeth")
elif opcion2 == "19":
cmd = os.system("apt-get install maskprocessor")
elif opcion2 == "20":
cmd = os.system("apt-get install multiforcer")
elif opcion2 == "21":
cmd = os.system("apt-get install ncrack")
elif opcion2 == "22":
cmd = os.system("apt-get install oclgausscrack")
elif opcion2 == "23":
cmd = os.system("apt-get install pack")
elif opcion2 == "24":
cmd = os.system("apt-get install patator")
elif opcion2 == "25":
cmd = os.system("echo 'please visit : http://www.leidecker.info/projects/phrasendrescher/index.shtml' ")
elif opcion2 == "26":
cmd = os.system("apt-get install polenum")
elif opcion2 == "27":
cmd = os.system("apt-get install rainbowcrack")
elif opcion2 == "28":
cmd = os.system("apt-get install rcracki-mt")
elif opcion2 == "29":
cmd = os.system("apt-get install rsmangler")
elif opcion2 == "30":
print ("Sqldict is unavailable")
elif opcion2 == "31":
cmd = os.system("apt-get install statsprocessor")
elif opcion2 == "32":
cmd = os.system("apt-get install thc-pptp-bruter")
elif opcion2 == "33":
cmd = os.system("apt-get install truecrack")
elif opcion2 == "34":
cmd = os.system("apt-get install webscarab")
elif opcion2 == "35":
cmd = os.system("apt-get install wordlists")
elif opcion2 == "36":
cmd = os.system("apt-get install zaproxy")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y acccheck burpsuite cewl chntpw cisco-auditing-tool cmospwd creddump crunch findmyhash gpp-decrypt hash-identifier hexorbase john johnny keimpx maltego-teeth maskprocessor multiforcer ncrack oclgausscrack pack patator polenum rainbowcrack rcracki-mt rsmangler statsprocessor thc-pptp-bruter truecrack webscarab wordlists zaproxy")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "12" :
print ('''
\033[1;36m=+[ Reverse Engineering\033[1;m
1) apktool
2) dex2jar
3) diStorm3
4) edb-debugger
5) jad
6) javasnoop
7) JD-GUI
8) OllyDbg
9) smali
10) Valgrind
11) YARA
0) Install all Reverse Engineering tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install apktool")
elif opcion2 == "2":
cmd = os.system("apt-get install dex2jar")
elif opcion2 == "3":
cmd = os.system("apt-get install python-diStorm3")
elif opcion2 == "4":
cmd = os.system("apt-get install edb-debugger")
elif opcion2 == "5":
cmd = os.system("apt-get install jad")
elif opcion2 == "6":
cmd = os.system("apt-get install javasnoop")
elif opcion2 == "7":
cmd = os.system("apt-get install JD")
elif opcion2 == "8":
cmd = os.system("apt-get install OllyDbg")
elif opcion2 == "9":
cmd = os.system("apt-get install smali")
elif opcion2 == "10":
cmd = os.system("apt-get install Valgrind")
elif opcion2 == "11":
cmd = os.system("apt-get install YARA")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y apktool dex2jar python-diStorm3 edb-debugger jad javasnoop JD OllyDbg smali Valgrind YARA")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "13" :
print ('''
\033[1;36m=+[ Hardware Hacking\033[1;m
1) android-sdk
2) apktool
3) Arduino
4) dex2jar
5) Sakis3G
6) smali
0) Install all Hardware Hacking tools
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("apt-get install android-sdk")
elif opcion2 == "2":
cmd = os.system("apt-get install apktool")
elif opcion2 == "3":
cmd = os.system("apt-get install arduino")
elif opcion2 == "4":
cmd = os.system("apt-get install dex2jar")
elif opcion2 == "5":
cmd = os.system("apt-get install sakis3g")
elif opcion2 == "6":
cmd = os.system("apt-get install smali")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
elif opcion2 == "0":
cmd = os.system("apt-get install -y android-sdk apktool arduino dex2jar sakis3g smali")
else:
print ("\033[1;31mSorry, that was an invalid command!\033[1;m")
while opcion1 == "14" :
print ('''
\033[1;36m=+[ Extra\033[1;m
1) Wifresti
2) Squid3
''')
print ("\033[1;32mInsert the number of the tool to install it .\n\033[1;m")
opcion2 = raw_input("\033[1;36mkat > \033[1;m")
if opcion2 == "1":
cmd = os.system("git clone https://github.com/LionSec/wifresti.git && cp wifresti/wifresti.py /usr/bin/wifresti && chmod +x /usr/bin/wifresti && wifresti")
print (" ")
elif opcion2 == "2":
cmd = os.system("apt-get install squid3")
print (" ")
elif opcion2 == "back":
inicio()
elif opcion2 == "gohome":
inicio1()
inicio()
inicio1()
except KeyboardInterrupt:
print ("Shutdown requested...Goodbye...")
except Exception:
traceback.print_exc(file=sys.stdout)
sys.exit(0)
if __name__ == "__main__":
main()
| gpl-3.0 |
Toreny/UAV | Tools/LogAnalyzer/tests/TestCompass.py | 124 | 6034 | from LogAnalyzer import Test,TestResult
import DataflashLog
import math
class TestCompass(Test):
'''test for compass offsets and throttle interference'''
def __init__(self):
Test.__init__(self)
self.name = "Compass"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def vec_len(x):
return sqrt(x[0]**2+x[1]**2+x[2]**2)
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
warnOffset = 300
failOffset = 500
param_offsets = (
logdata.parameters["COMPASS_OFS_X"],
logdata.parameters["COMPASS_OFS_Y"],
logdata.parameters["COMPASS_OFS_Z"]
)
if vec_len(param_offsets) > failOffset:
FAIL()
self.result.statusMessage = "FAIL: Large compass offset params (X:%.2f, Y:%.2f, Z:%.2f)\n" % (param_offsets[0],param_offsets[1],param_offsets[2])
elif vec_len(param_offsets) > warnOffset:
WARN()
self.result.statusMessage = "WARN: Large compass offset params (X:%.2f, Y:%.2f, Z:%.2f)\n" % (param_offsets[0],param_offsets[1],param_offsets[2])
if "MAG" in logdata.channels:
max_log_offsets = zip(
map(lambda x: x[1],logdata.channels["MAG"]["OfsX"].listData),
map(lambda x: x[1],logdata.channels["MAG"]["OfsY"].listData),
map(lambda x: x[1],logdata.channels["MAG"]["OfsZ"].listData)
)
max_log_offsets = reduce(lambda x,y: x if vec_len(x) > vec_len(y) else y, max_log_offsets)
if vec_len(max_log_offsets) > failOffset:
FAIL()
self.result.statusMessage += "FAIL: Large compass offset in MAG data (X:%.2f, Y:%.2f, Z:%.2f)\n" % (max_log_offsets[0],max_log_offsets[1],max_log_offsets[2])
elif vec_len(max_log_offsets) > warnOffset:
WARN()
self.result.statusMessage += "WARN: Large compass offset in MAG data (X:%.2f, Y:%.2f, Z:%.2f)\n" % (max_log_offsets[0],max_log_offsets[1],max_log_offsets[2])
# check for mag field length change, and length outside of recommended range
if "MAG" in logdata.channels:
percentDiffThresholdWARN = 0.25
percentDiffThresholdFAIL = 0.35
minMagFieldThreshold = 120.0
maxMagFieldThreshold = 550.0
index = 0
length = len(logdata.channels["MAG"]["MagX"].listData)
magField = []
(minMagField, maxMagField) = (None,None)
(minMagFieldLine, maxMagFieldLine) = (None,None)
zerosFound = False
while index<length:
mx = logdata.channels["MAG"]["MagX"].listData[index][1]
my = logdata.channels["MAG"]["MagY"].listData[index][1]
mz = logdata.channels["MAG"]["MagZ"].listData[index][1]
if ((mx==0) and (my==0) and (mz==0)): # sometimes they're zero, not sure why, same reason as why we get NaNs as offsets?
zerosFound = True
else:
mf = math.sqrt(mx*mx + my*my + mz*mz)
magField.append(mf)
if mf<minMagField:
minMagField = mf
minMagFieldLine = logdata.channels["MAG"]["MagX"].listData[index][0]
if mf>maxMagField:
maxMagField = mf
maxMagFieldLine = logdata.channels["MAG"]["MagX"].listData[index][0]
if index == 0:
(minMagField, maxMagField) = (mf,mf)
index += 1
percentDiff = (maxMagField-minMagField) / minMagField
if percentDiff > percentDiffThresholdFAIL:
FAIL()
self.result.statusMessage = self.result.statusMessage + "Large change in mag_field (%.2f%%)\n" % (percentDiff*100)
elif percentDiff > percentDiffThresholdWARN:
WARN()
self.result.statusMessage = self.result.statusMessage + "Moderate change in mag_field (%.2f%%)\n" % (percentDiff*100)
else:
self.result.statusMessage = self.result.statusMessage + "mag_field interference within limits (%.2f%%)\n" % (percentDiff*100)
if minMagField < minMagFieldThreshold:
self.result.statusMessage = self.result.statusMessage + "Min mag field length (%.2f) < recommended (%.2f)\n" % (minMagField,minMagFieldThreshold)
if maxMagField > maxMagFieldThreshold:
self.result.statusMessage = self.result.statusMessage + "Max mag field length (%.2f) > recommended (%.2f)\n" % (maxMagField,maxMagFieldThreshold)
if zerosFound:
WARN()
self.result.statusMessage = self.result.statusMessage + "All zeros found in MAG X/Y/Z log data\n"
if verbose:
self.result.statusMessage = self.result.statusMessage + "Min mag_field of %.2f on line %d\n" % (minMagField,minMagFieldLine)
self.result.statusMessage = self.result.statusMessage + "Max mag_field of %.2f on line %d\n" % (maxMagField,maxMagFieldLine)
else:
self.result.statusMessage = self.result.statusMessage + "No MAG data, unable to test mag_field interference\n"
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
| gpl-3.0 |
johndpope/tensorflow | tensorflow/python/debug/cli/stepper_cli_test.py | 47 | 19722 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the Stepper CLI Backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.debug.cli import stepper_cli
from tensorflow.python.debug.lib import stepper
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
# Regex pattern for a node line in the stepper CLI output.
NODE_LINE_PATTERN = re.compile(r".*\(.*\).*\[.*\].*")
def _parse_sorted_nodes_list(lines):
"""Parsed a list of lines to extract the node list.
Args:
lines: (list of str) Lines from which the node list and associated
information will be extracted.
Returns:
(list of str) The list of node names.
(list of str) The list of status labels.
(int) 0-based index among the nodes for the node pointed by the next-node
pointer. If no such node exists, -1.
"""
node_names = []
status_labels = []
node_pointer = -1
node_line_counter = 0
for line in lines:
if NODE_LINE_PATTERN.match(line):
node_names.append(line.split(" ")[-1])
idx_left_bracket = line.index("[")
idx_right_bracket = line.index("]")
status_labels.append(line[idx_left_bracket + 1:idx_right_bracket])
if line.strip().startswith(
stepper_cli.NodeStepperCLI.NEXT_NODE_POINTER_STR):
node_pointer = node_line_counter
node_line_counter += 1
return node_names, status_labels, node_pointer
def _parsed_used_feeds(lines):
feed_types = {}
begin_line = -1
for i, line in enumerate(lines):
if line.startswith("Stepper used feeds:"):
begin_line = i + 1
break
if begin_line == -1:
return feed_types
for line in lines[begin_line:]:
line = line.strip()
if not line:
return feed_types
else:
feed_name = line.split(" : ")[0].strip()
feed_type = line.split(" : ")[1].strip()
feed_types[feed_name] = feed_type
def _parse_updated(lines):
"""Parse the Updated section in the output text lines.
Args:
lines: (list of str) The output text lines to be parsed.
Returns:
If the Updated section does not exist, returns None.
Otherwise, returns the Tensor names included in the section.
"""
updated = None
begin_line = -1
for i, line in enumerate(lines):
if line.startswith("Updated:"):
updated = []
begin_line = i + 1
break
if begin_line == -1:
return updated
for line in lines[begin_line:]:
line = line.strip()
if not line:
return updated
else:
updated.append(line.strip())
return updated
class NodeStepperSimpleGraphTest(test_util.TensorFlowTestCase):
def setUp(self):
self.a = variables.Variable(10.0, name="a")
self.b = variables.Variable(20.0, name="b")
self.c = math_ops.add(self.a, self.b, name="c") # Should be 30.0.
self.d = math_ops.subtract(self.a, self.c, name="d") # Should be -20.0.
self.e = math_ops.multiply(self.c, self.d, name="e") # Should be -600.0.
self.ph = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph")
self.f = math_ops.multiply(self.e, self.ph, name="f")
self.opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
self.e, name="opt")
self.sess = session.Session()
self.sess.run(self.a.initializer)
self.sess.run(self.b.initializer)
def tearDown(self):
ops.reset_default_graph()
def _assert_nodes_topologically_sorted_with_target_e(self, node_names):
"""Check the topologically sorted order of the node names."""
self.assertGreaterEqual(len(node_names), 7)
self.assertLess(node_names.index("a"), node_names.index("a/read"))
self.assertLess(node_names.index("b"), node_names.index("b/read"))
self.assertLess(node_names.index("a/read"), node_names.index("c"))
self.assertLess(node_names.index("b/read"), node_names.index("c"))
self.assertLess(node_names.index("a/read"), node_names.index("d"))
self.assertLess(node_names.index("c"), node_names.index("d"))
self.assertLess(node_names.index("c"), node_names.index("e"))
self.assertLess(node_names.index("d"), node_names.index("e"))
def _assert_nodes_topologically_sorted_with_target_f(self, node_names):
self._assert_nodes_topologically_sorted_with_target_e(node_names)
self.assertGreaterEqual(len(node_names), 9)
self.assertLess(node_names.index("ph"), node_names.index("f"))
self.assertLess(node_names.index("e"), node_names.index("f"))
def testListingSortedNodesPresentsTransitveClosure(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.list_sorted_nodes([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
self._assert_nodes_topologically_sorted_with_target_e(node_names)
self.assertEqual(len(node_names), len(stat_labels))
for stat_label in stat_labels:
self.assertEqual(" ", stat_label)
self.assertEqual(0, node_pointer)
def testListingSortedNodesLabelsPlaceholders(self):
with stepper.NodeStepper(self.sess, self.f) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.list_sorted_nodes([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
self._assert_nodes_topologically_sorted_with_target_f(node_names)
index_ph = node_names.index("ph")
self.assertEqual(len(node_names), len(stat_labels))
for i in xrange(len(stat_labels)):
if index_ph == i:
self.assertIn(stepper_cli.NodeStepperCLI.STATE_IS_PLACEHOLDER,
stat_labels[i])
else:
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_IS_PLACEHOLDER,
stat_labels[i])
self.assertEqual(0, node_pointer)
def testContToNonexistentNodeShouldError(self):
with stepper.NodeStepper(self.sess, self.f) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.cont(["foobar"])
self.assertEqual([
"ERROR: foobar is not in the transitive closure of this stepper "
"instance."
], output.lines)
def testContToNodeOutsideTransitiveClosureShouldError(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.cont(["f"])
self.assertEqual([
"ERROR: f is not in the transitive closure of this stepper "
"instance."
], output.lines)
def testContToValidNodeShouldUpdateStatus(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.list_sorted_nodes([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
index_c = node_names.index("c")
self.assertEqual(" ", stat_labels[index_c])
self.assertEqual(0, node_pointer)
output = cli.cont("c")
self.assertIsNone(_parse_updated(output.lines))
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
self.assertGreaterEqual(len(node_names), 3)
self.assertIn("c", node_names)
index_c = node_names.index("c")
self.assertEqual(index_c, node_pointer)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_CONT, stat_labels[index_c])
output = cli.cont("d")
self.assertIsNone(_parse_updated(output.lines))
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
used_feed_types = _parsed_used_feeds(output.lines)
self.assertEqual({
"c:0": stepper.NodeStepper.FEED_TYPE_HANDLE,
"a/read:0": stepper.NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, used_feed_types)
self.assertGreaterEqual(len(node_names), 3)
self.assertIn("d", node_names)
index_d = node_names.index("d")
self.assertEqual(index_d, node_pointer)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_CONT, stat_labels[index_d])
def testSteppingOneStepAtATimeShouldUpdateStatus(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.list_sorted_nodes([])
orig_node_names, _, node_pointer = _parse_sorted_nodes_list(output.lines)
self.assertEqual(0, node_pointer)
for i in xrange(len(orig_node_names)):
output = cli.step([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
next_node_name = node_names[node_pointer]
self.assertEqual(orig_node_names[i], next_node_name)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_CONT,
stat_labels[node_pointer])
# The order in which the nodes are listed should not change as the
# stepping happens.
output = cli.list_sorted_nodes([])
node_names, _, node_pointer = _parse_sorted_nodes_list(output.lines)
self.assertEqual(orig_node_names, node_names)
if i < len(orig_node_names) - 1:
self.assertEqual(i + 1, node_pointer)
else:
# Stepped over the limit. Pointer should be at -1.
self.assertEqual(-1, node_pointer)
# Attempt to step once more after the end has been reached should error
# out.
output = cli.step([])
self.assertEqual([
"ERROR: Cannot step any further because the end of the sorted "
"transitive closure has been reached."
], output.lines)
def testSteppingMultipleStepsUpdatesStatus(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.list_sorted_nodes([])
orig_node_names, _, _ = _parse_sorted_nodes_list(output.lines)
output = cli.step(["-t", "3"])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
self.assertEqual(orig_node_names[2], node_names[node_pointer])
for i in xrange(node_pointer):
self.assertIn(stepper_cli.NodeStepperCLI.STATE_CONT, stat_labels[i])
for i in xrange(node_pointer + 1, len(stat_labels)):
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_CONT, stat_labels[i])
def testContToNodeWithoutOutputTensorInClosureShowsNoHandleCached(self):
with stepper.NodeStepper(self.sess, self.opt) as node_stepper:
sorted_nodes = node_stepper.sorted_nodes()
closure_elements = node_stepper.closure_elements()
# Find a node which is in the list of sorted nodes, but whose output
# Tensor is not in the transitive closure.
no_output_node = None
for node in sorted_nodes:
if (node + ":0" not in closure_elements and
node + ":1" not in closure_elements):
no_output_node = node
break
self.assertIsNotNone(no_output_node)
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.cont([no_output_node])
self.assertIsNone(_parse_updated(output.lines))
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
self.assertEqual(no_output_node, node_names[node_pointer])
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_CONT,
stat_labels[node_pointer])
def testContToUpdateNodeWithTrackingLeadsToDirtyVariableLabel(self):
with stepper.NodeStepper(self.sess, self.opt) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.cont(["opt/update_b/ApplyGradientDescent", "-i"])
output = cli.list_sorted_nodes([])
node_names, stat_labels, _ = _parse_sorted_nodes_list(output.lines)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("b")])
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("a")])
def testContToUpdateNodeWithoutTrackingLeadsToNoDirtyVariableLabel(self):
with stepper.NodeStepper(self.sess, self.opt) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.cont(["opt/update_b/ApplyGradientDescent"])
self.assertItemsEqual([self.b.name], _parse_updated(output.lines))
output = cli.list_sorted_nodes([])
node_names, stat_labels, _ = _parse_sorted_nodes_list(output.lines)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("b")])
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("a")])
def testContWithRestoreVariablesOptionShouldRestoreVariableValue(self):
with stepper.NodeStepper(self.sess, self.opt) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.cont(["opt/update_a/ApplyGradientDescent",
"--invalidate_from_updated_variables"])
self.assertItemsEqual([self.a.name], _parse_updated(output.lines))
# After cont() call on .../update_a/..., Variable a should have been
# marked as dirty, whereas b should not have.
output = cli.list_sorted_nodes([])
node_names, stat_labels, _ = _parse_sorted_nodes_list(output.lines)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("a")])
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("b")])
output = cli.cont(["opt/update_b/ApplyGradientDescent", "-r", "-i"])
self.assertItemsEqual([self.b.name], _parse_updated(output.lines))
# After cont() call on .../update_b/... with the -r flag, Variable b
# should have been marked as dirty, whereas Variable a should not be
# because it should have been restored.
output = cli.list_sorted_nodes([])
node_names, stat_labels, _ = _parse_sorted_nodes_list(output.lines)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("b")])
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("a")])
def testPrintTensorShouldWorkWithTensorName(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
cli.cont("d")
output = cli.print_tensor(["d:0"])
self.assertEqual("Tensor \"d:0\":", output.lines[0])
self.assertEqual("-20.0", output.lines[-1])
def testPrintTensorShouldWorkWithNodeNameWithOutputTensor(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
cli.cont("d")
output = cli.print_tensor(["d"])
self.assertEqual("Tensor \"d:0\":", output.lines[0])
self.assertEqual("-20.0", output.lines[-1])
def testPrintTensorShouldWorkSlicingString(self):
ph_value = np.array([[1.0, 0.0], [0.0, 2.0]])
with stepper.NodeStepper(
self.sess, self.f, feed_dict={self.ph: ph_value}) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.print_tensor(["ph:0[:, 1]"])
self.assertEqual("Tensor \"ph:0[:, 1]\":", output.lines[0])
self.assertEqual(repr(ph_value[:, 1]), output.lines[-1])
output = cli.print_tensor(["ph[:, 1]"])
self.assertEqual("Tensor \"ph:0[:, 1]\":", output.lines[0])
self.assertEqual(repr(ph_value[:, 1]), output.lines[-1])
def testPrintTensorWithNonexistentTensorShouldError(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.print_tensor(["foobar"])
self.assertEqual([
"ERROR: foobar is not in the transitive closure of this stepper "
"instance."
], output.lines)
def testPrintTensorWithNoHandleShouldError(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.print_tensor("e")
self.assertEqual([
"This stepper instance does not have access to the value of tensor "
"\"e:0\""
], output.lines)
def testInjectTensorValueByTensorNameShouldBeReflected(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.cont(["d"])
node_names, _, node_pointer = _parse_sorted_nodes_list(output.lines)
self.assertEqual("d", node_names[node_pointer])
output = cli.list_sorted_nodes([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
index_d = node_names.index("d")
self.assertIn(stepper_cli.NodeStepperCLI.STATE_CONT, stat_labels[index_d])
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_OVERRIDDEN,
stat_labels[index_d])
self.assertAllClose(-20.0, node_stepper.get_tensor_value("d:0"))
output = cli.inject_value(["d:0", "20.0"])
# Verify that the override is available.
self.assertEqual(["d:0"], node_stepper.override_names())
# Verify that the list of sorted nodes reflects the existence of the value
# override (i.e., injection).
output = cli.list_sorted_nodes([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
index_d = node_names.index("d")
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_CONT,
stat_labels[index_d])
self.assertIn(stepper_cli.NodeStepperCLI.STATE_OVERRIDDEN,
stat_labels[index_d])
def testInjectTensorValueByNodeNameShouldBeReflected(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
cli.inject_value(["d", "20.0"])
self.assertEqual(["d:0"], node_stepper.override_names())
def testInjectToNonexistentTensorShouldError(self):
with stepper.NodeStepper(self.sess, self.e) as node_stepper:
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.inject_value(["foobar:0", "20.0"])
self.assertEqual([
"ERROR: foobar:0 is not in the transitive closure of this stepper "
"instance."
], output.lines)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
clearcare/salesforce-python-toolkit | tests/test_base.py | 17 | 33361 | # coding: utf-8
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Written by: David Lanstein ( dlanstein gmail com )
import datetime
import re
import string
import sys
import unittest
sys.path.append('../')
from sforce.base import SforceBaseClient
from suds import WebFault
# strings we can look for to ensure headers sent
ALLOW_FIELD_TRUNCATION_HEADER_STRING = '<tns:allowFieldTruncation>false</tns:allowFieldTruncation>'
ASSIGNMENT_RULE_HEADER_STRING = '<tns:useDefaultRule>true</tns:useDefaultRule>'
CALL_OPTIONS_STRING = '<tns:defaultNamespace>*DEVELOPER NAMESPACE PREFIX*</tns:defaultNamespace>'
EMAIL_HEADER_STRING = '<tns:triggerAutoResponseEmail>true</tns:triggerAutoResponseEmail>'
LOCALE_OPTIONS_STRING = '<tns:language>en_US</tns:language>'
# starting in 0.3.7, xsi:type="ns1:ID" is omitted from opening tag
LOGIN_SCOPE_HEADER_STRING = '>00D000xxxxxxxxx</tns:organizationId>'
MRU_HEADER_STRING = '<tns:updateMru>true</tns:updateMru>'
PACKAGE_VERSION_HEADER_STRING = '<tns:namespace>SFGA</tns:namespace>'
QUERY_OPTIONS_STRING = '<tns:batchSize>200</tns:batchSize>'
SESSION_HEADER_STRING = '</tns:sessionId>'
# starting in 0.3.7, xsi:type="ns1:ID" is omitted from opening tag
USER_TERRITORY_DELETE_HEADER_STRING = '>005000xxxxxxxxx</tns:transferToUserId>'
class SforceBaseClientTest(unittest.TestCase):
def setUp(self):
pass
def checkHeaders(self, call):
result = self.h.getLastRequest()
if (call != 'login'):
self.assertTrue(result.find(SESSION_HEADER_STRING) != -1)
if (call == 'convertLead' or
call == 'create' or
call == 'merge' or
call == 'process' or
call == 'undelete' or
call == 'update' or
call == 'upsert'):
self.assertTrue(result.find(ALLOW_FIELD_TRUNCATION_HEADER_STRING) != -1)
if (call == 'create' or
call == 'merge' or
call == 'update' or
call == 'upsert'):
self.assertTrue(result.find(ASSIGNMENT_RULE_HEADER_STRING) != -1)
# CallOptions will only ever be set by the SforcePartnerClient
if self.wsdlFormat == 'Partner':
if (call == 'create' or
call == 'merge' or
call == 'queryAll' or
call == 'query' or
call == 'queryMore' or
call == 'retrieve' or
call == 'search' or
call == 'update' or
call == 'upsert' or
call == 'convertLead' or
call == 'login' or
call == 'delete' or
call == 'describeGlobal' or
call == 'describeLayout' or
call == 'describeTabs' or
call == 'describeSObject' or
call == 'describeSObjects' or
call == 'getDeleted' or
call == 'getUpdated' or
call == 'process' or
call == 'undelete' or
call == 'getServerTimestamp' or
call == 'getUserInfo' or
call == 'setPassword' or
call == 'resetPassword'):
self.assertTrue(result.find(CALL_OPTIONS_STRING) != -1)
if (call == 'create' or
call == 'delete' or
call == 'resetPassword' or
call == 'update' or
call == 'upsert'):
self.assertTrue(result.find(EMAIL_HEADER_STRING) != -1)
if (call == 'describeSObject' or
call == 'describeSObjects'):
self.assertTrue(result.find(LOCALE_OPTIONS_STRING) != -1)
if call == 'login':
self.assertTrue(result.find(LOGIN_SCOPE_HEADER_STRING) != -1)
if (call == 'create' or
call == 'merge' or
call == 'query' or
call == 'retrieve' or
call == 'update' or
call == 'upsert'):
self.assertTrue(result.find(MRU_HEADER_STRING) != -1)
if (call == 'convertLead' or
call == 'create' or
call == 'delete' or
call == 'describeGlobal' or
call == 'describeLayout' or
call == 'describeSObject' or
call == 'describeSObjects' or
call == 'describeTabs' or
call == 'merge' or
call == 'process' or
call == 'query' or
call == 'retrieve' or
call == 'search' or
call == 'undelete' or
call == 'update' or
call == 'upsert'):
self.assertTrue(result.find(PACKAGE_VERSION_HEADER_STRING) != -1)
if (call == 'query' or
call == 'queryAll' or
call == 'queryMore' or
call == 'retrieve'):
self.assertTrue(result.find(QUERY_OPTIONS_STRING) != -1)
if call == 'delete':
self.assertTrue(result.find(USER_TERRITORY_DELETE_HEADER_STRING) != -1)
def createLead(self, returnLead = False):
lead = self.h.generateObject('Lead')
lead.FirstName = u'Joë'
lead.LastName = u'Möke'
lead.Company = u'你好公司'
lead.Email = 'joe@example.com'
if returnLead:
result = self.h.create(lead)
lead.Id = result.id
return (result, lead)
else:
return self.h.create(lead)
def createLeads(self, returnLeads = False):
lead = self.h.generateObject('Lead')
lead.FirstName = u'Joë'
lead.LastName = u'Möke'
lead.Company = u'你好公司'
lead.Email = 'joe@example.com'
lead2 = self.h.generateObject('Lead')
lead2.FirstName = u'Böb'
lead2.LastName = u'Möke'
lead2.Company = u'你好公司'
lead2.Email = 'bob@example.com'
if returnLeads:
result = self.h.create((lead, lead2))
lead.Id = result[0].id
lead2.Id = result[1].id
return (result, (lead, lead2))
else:
return self.h.create((lead, lead2))
# Set SOAP headers
def setHeaders(self, call):
# no need to manually attach session ID, will happen after login automatically
if (call == 'convertLead' or
call == 'create' or
call == 'merge' or
call == 'process' or
call == 'undelete' or
call == 'update' or
call == 'upsert'):
self.setAllowFieldTruncationHeader()
if (call == 'create' or
call == 'merge' or
call == 'update' or
call == 'upsert'):
self.setAssignmentRuleHeader()
# CallOptions will only ever be set by the SforcePartnerClient
if self.wsdlFormat == 'Partner':
if (call == 'create' or
call == 'merge' or
call == 'queryAll' or
call == 'query' or
call == 'queryMore' or
call == 'retrieve' or
call == 'search' or
call == 'update' or
call == 'upsert' or
call == 'convertLead' or
call == 'login' or
call == 'delete' or
call == 'describeGlobal' or
call == 'describeLayout' or
call == 'describeTabs' or
call == 'describeSObject' or
call == 'describeSObjects' or
call == 'getDeleted' or
call == 'getUpdated' or
call == 'process' or
call == 'undelete' or
call == 'getServerTimestamp' or
call == 'getUserInfo' or
call == 'setPassword' or
call == 'resetPassword'):
self.setCallOptions()
if (call == 'create' or
call == 'delete' or
call == 'resetPassword' or
call == 'update' or
call == 'upsert'):
self.setEmailHeader()
if (call == 'describeSObject' or
call == 'describeSObjects'):
self.setLocaleOptions()
if call == 'login':
self.setLoginScopeHeader()
if (call == 'create' or
call == 'merge' or
call == 'query' or
call == 'retrieve' or
call == 'update' or
call == 'upsert'):
self.setMruHeader()
if (call == 'convertLead' or
call == 'create' or
call == 'delete' or
call == 'describeGlobal' or
call == 'describeLayout' or
call == 'describeSObject' or
call == 'describeSObjects' or
call == 'describeTabs' or
call == 'merge' or
call == 'process' or
call == 'query' or
call == 'retrieve' or
call == 'search' or
call == 'undelete' or
call == 'update' or
call == 'upsert'):
self.setPackageVersionHeader()
if (call == 'query' or
call == 'queryAll' or
call == 'queryMore' or
call == 'retrieve'):
self.setQueryOptions()
if call == 'delete':
self.setUserTerritoryDeleteHeader()
def setAllowFieldTruncationHeader(self):
header = self.h.generateHeader('AllowFieldTruncationHeader');
header.allowFieldTruncation = False
self.h.setAllowFieldTruncationHeader(header)
def setAssignmentRuleHeader(self):
header = self.h.generateHeader('AssignmentRuleHeader');
header.useDefaultRule = True
self.h.setAssignmentRuleHeader(header)
def setCallOptions(self):
'''
Note that this header only applies to the Partner WSDL.
'''
if self.wsdlFormat == 'Partner':
header = self.h.generateHeader('CallOptions');
header.client = '*MY CLIENT STRING*'
header.defaultNamespace = '*DEVELOPER NAMESPACE PREFIX*'
self.h.setCallOptions(header)
else:
pass
def setEmailHeader(self):
header = self.h.generateHeader('EmailHeader');
header.triggerAutoResponseEmail = True
header.triggerOtherEmail = True
header.triggerUserEmail = True
self.h.setEmailHeader(header)
def setLocaleOptions(self):
header = self.h.generateHeader('LocaleOptions');
header.language = 'en_US'
self.h.setLocaleOptions(header)
def setLoginScopeHeader(self):
header = self.h.generateHeader('LoginScopeHeader');
header.organizationId = '00D000xxxxxxxxx'
#header.portalId = '00D000xxxxxxxxx'
self.h.setLoginScopeHeader(header)
def setMruHeader(self):
header = self.h.generateHeader('MruHeader');
header.updateMru = True
self.h.setMruHeader(header)
def setPackageVersionHeader(self):
header = self.h.generateHeader('PackageVersionHeader');
pkg = {}
pkg['majorNumber'] = 1
pkg['minorNumber'] = 2
pkg['namespace'] = 'SFGA'
header.packageVersions = pkg
self.h.setPackageVersionHeader(header)
def setQueryOptions(self):
header = self.h.generateHeader('QueryOptions');
header.batchSize = 200
self.h.setQueryOptions(header)
def setSessionHeader(self):
header = self.h.generateHeader('SessionHeader');
header.sessionId = '*PIGGYBACK SESSION ID HERE*'
self.h.setSessionHeader(header)
def setUserTerritoryDeleteHeader(self):
header = self.h.generateHeader('UserTerritoryDeleteHeader');
header.transferToUserId = '005000xxxxxxxxx'
self.h.setUserTerritoryDeleteHeader(header)
# Core calls
def testConvertLead(self):
result = self.createLead()
self.setHeaders('convertLead')
leadConvert = self.h.generateObject('LeadConvert')
leadConvert.leadId = result.id
leadConvert.convertedStatus = 'Qualified'
result = self.h.convertLead(leadConvert)
self.assertTrue(result.success)
self.assertTrue(result.accountId[0:3] == '001')
self.assertTrue(result.contactId[0:3] == '003')
self.assertTrue(result.leadId[0:3] == '00Q')
self.assertTrue(result.opportunityId[0:3] == '006')
self.checkHeaders('convertLead')
def testCreateCustomObject(self):
case = self.h.generateObject('Case')
result = self.h.create(case)
self.assertTrue(result.success)
self.assertTrue(result.id[0:3] == '500')
caseNote = self.h.generateObject('Case_Note__c')
caseNote.case__c = result.id
caseNote.subject__c = 'my subject'
caseNote.description__c = 'description here'
result = self.h.create(caseNote)
self.assertTrue(result.success)
self.assertTrue(result.id[0:3] == 'a0E')
def testCreateLead(self):
self.setHeaders('create')
result = self.createLead()
self.assertTrue(result.success)
self.assertTrue(result.id[0:3] == '00Q')
self.checkHeaders('create')
def testCreateLeads(self):
result = self.createLeads()
self.assertTrue(result[0].success)
self.assertTrue(result[0].id[0:3] == '00Q')
self.assertTrue(result[1].success)
self.assertTrue(result[1].id[0:3] == '00Q')
def testDeleteLead(self):
self.setHeaders('delete')
(result, lead) = self.createLead(True)
result = self.h.delete(result.id)
self.assertTrue(result.success)
self.assertEqual(result.id, lead.Id)
self.checkHeaders('delete')
def testDeleteLeads(self):
(result, (lead, lead2)) = self.createLeads(True)
result = self.h.delete((result[0].id, result[1].id))
self.assertTrue(result[0].success)
self.assertEqual(result[0].id, lead.Id)
self.assertTrue(result[1].success)
self.assertEqual(result[1].id, lead2.Id)
def testEmptyRecycleBinOneObject(self):
(result, lead) = self.createLead(True)
result = self.h.delete(result.id)
result = self.h.emptyRecycleBin(result.id)
self.assertTrue(result.success)
self.assertEqual(result.id, lead.Id)
def testEmptyRecycleBinTwoObjects(self):
(result, (lead, lead2)) = self.createLeads(True)
result = self.h.delete((result[0].id, result[1].id))
result = self.h.emptyRecycleBin((result[0].id, result[1].id))
self.assertTrue(result[0].success)
self.assertEqual(result[0].id, lead.Id)
self.assertTrue(result[1].success)
self.assertEqual(result[1].id, lead2.Id)
def testGetDeleted(self):
self.setHeaders('getDeleted')
now = datetime.datetime.utcnow()
result = self.createLead()
result = self.h.delete(result.id)
result = self.h.getDeleted('Lead', now.isoformat(), '2019-01-01T23:01:01Z')
# This will nearly always be one single result
self.assertTrue(len(result.deletedRecords) > 0)
for record in result.deletedRecords:
self.assertTrue(isinstance(record.deletedDate, datetime.datetime))
self.assertEqual(len(record.id), 18)
self.checkHeaders('getDeleted')
def testGetUpdated(self):
self.setHeaders('getUpdated')
now = datetime.datetime.utcnow()
(result, lead) = self.createLead(True)
result = self.h.update(lead)
result = self.h.getUpdated('Lead', now.isoformat(), '2019-01-01T23:01:01Z')
# This will nearly always be one single result
self.assertTrue(len(result.ids) > 0)
for id in result.ids:
self.assertEqual(len(id), 18)
self.checkHeaders('getUpdated')
def testInvalidateSession(self):
result = self.h.invalidateSessions(self.h.getSessionId())
self.assertTrue(result.success)
def testInvalidateSessions(self):
result = self.h.invalidateSessions((self.h.getSessionId(), 'foo'))
self.assertTrue(result[0].success)
self.assertFalse(result[1].success)
def testLogin(self):
# This is really only here to test the login() SOAP headers
self.setHeaders('login')
try:
self.h.login('foo', 'bar', 'baz')
except WebFault:
pass
self.checkHeaders('login')
def testLogout(self):
result = self.h.logout()
self.assertEqual(result, None)
def testMerge(self):
self.setHeaders('merge')
(result, (lead, lead2)) = self.createLeads(True)
mergeRequest = self.h.generateObject('MergeRequest')
mergeRequest.masterRecord = lead
mergeRequest.recordToMergeIds = result[1].id
result = self.h.merge(mergeRequest)
self.assertTrue(result.success)
self.assertEqual(result.id, lead.Id)
self.assertEqual(result.mergedRecordIds[0], lead2.Id)
self.checkHeaders('merge')
def testProcessSubmitRequestMalformedId(self):
self.setHeaders('process')
processRequest = self.h.generateObject('ProcessSubmitRequest')
processRequest.objectId = '*ID OF OBJECT PROCESS REQUEST AFFECTS*'
processRequest.comments = 'This is what I think.'
result = self.h.process(processRequest)
self.assertFalse(result.success)
self.assertEqual(result.errors[0].statusCode, 'MALFORMED_ID')
self.checkHeaders('process')
def testProcessSubmitRequestInvalidId(self):
processRequest = self.h.generateObject('ProcessSubmitRequest')
processRequest.objectId = '00Q000xxxxxxxxx'
processRequest.comments = 'This is what I think.'
result = self.h.process(processRequest)
self.assertFalse(result.success)
self.assertEqual(result.errors[0].statusCode, 'INSUFFICIENT_ACCESS_ON_CROSS_REFERENCE_ENTITY')
def testProcessWorkitemRequestMalformedId(self):
processRequest = self.h.generateObject('ProcessWorkitemRequest')
processRequest.action = 'Approve'
processRequest.workitemId = '*ID OF OBJECT PROCESS REQUEST AFFECTS*'
processRequest.comments = 'I approved this request.'
result = self.h.process(processRequest)
self.assertFalse(result.success)
self.assertEqual(result.errors[0].statusCode, 'MALFORMED_ID')
def testProcessWorkitemRequestInvalidId(self):
processRequest = self.h.generateObject('ProcessWorkitemRequest')
processRequest.action = 'Approve'
processRequest.workitemId = '00Q000xxxxxxxxx'
processRequest.comments = 'I approved this request.'
result = self.h.process(processRequest)
self.assertFalse(result.success)
self.assertEqual(result.errors[0].statusCode, 'INVALID_CROSS_REFERENCE_KEY')
# Note that Lead.LastName, Lead.Company, Account.Name can never equal NULL, they are required both
# via API and UI
#
# Also, SOQL does not return fields that are NULL
def testQueryNoResults(self):
self.setHeaders('query')
result = self.h.query('SELECT FirstName, LastName FROM Lead LIMIT 0')
self.assertFalse(hasattr(result, 'records'))
self.assertEqual(result.size, 0)
self.checkHeaders('query')
def testQueryOneResultWithFirstName(self):
result = self.h.query('SELECT FirstName, LastName FROM Lead WHERE FirstName != NULL LIMIT 1')
self.assertEqual(len(result.records), 1)
self.assertEqual(result.size, 1)
self.assertTrue(hasattr(result.records[0], 'FirstName'))
self.assertTrue(hasattr(result.records[0], 'LastName'))
self.assertFalse(isinstance(result.records[0].FirstName, list))
self.assertFalse(isinstance(result.records[0].LastName, list))
'''
See explanation below.
def testQueryOneResultWithoutFirstName(self):
result = self.h.query('SELECT FirstName, LastName FROM Lead WHERE FirstName = NULL LIMIT 1')
self.assertEqual(len(result.records), 1)
self.assertEqual(result.size, 1)
self.assertFalse(hasattr(result.records[0], 'FirstName'))
self.assertTrue(hasattr(result.records[0], 'LastName'))
self.assertFalse(isinstance(result.records[0].FirstName, list))
self.assertFalse(isinstance(result.records[0].LastName, list))
'''
def testQueryTwoResults(self):
result = self.h.query('SELECT FirstName, LastName FROM Lead WHERE FirstName != NULL LIMIT 2')
self.assertTrue(len(result.records) > 1)
self.assertTrue(result.size > 1)
for record in result.records:
self.assertTrue(hasattr(record, 'FirstName'))
self.assertTrue(hasattr(record, 'LastName'))
self.assertFalse(isinstance(record.FirstName, list))
self.assertFalse(isinstance(record.LastName, list))
def testQueryAllNoResults(self):
self.setHeaders('queryAll')
result = self.h.queryAll('SELECT Account.Name, FirstName, LastName FROM Contact LIMIT 0')
self.assertFalse(hasattr(result, 'records'))
self.assertEqual(result.size, 0)
self.checkHeaders('queryAll')
def testQueryAllOneResultWithFirstName(self):
result = self.h.queryAll('SELECT Account.Name, FirstName, LastName FROM Contact WHERE FirstName != NULL LIMIT 1')
self.assertEqual(len(result.records), 1)
self.assertEqual(result.size, 1)
self.assertTrue(hasattr(result.records[0], 'FirstName'))
self.assertTrue(hasattr(result.records[0], 'LastName'))
self.assertTrue(hasattr(result.records[0].Account, 'Name'))
self.assertFalse(isinstance(result.records[0].FirstName, list))
self.assertFalse(isinstance(result.records[0].LastName, list))
self.assertFalse(isinstance(result.records[0].Account.Name, list))
'''
There's a bug with Salesforce where the query in this test where the Partner WSDL includes
FirstName in the SOAP response, but the Enterprise WSDL does not.
Will report a bug once self-service portal is back up.
Partner:
"<?xml version="1.0" encoding="UTF-8"?><soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns="urn:partner.soap.sforce.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:sf="urn:sobject.partner.soap.sforce.com"><soapenv:Body><queryAllResponse><result xsi:type="QueryResult"><done>true</done><queryLocator xsi:nil="true"/><records xsi:type="sf:sObject"><sf:type>Contact</sf:type><sf:Id xsi:nil="true"/><sf:Account xsi:type="sf:sObject"><sf:type>Account</sf:type><sf:Id xsi:nil="true"/><sf:Name>Unknown</sf:Name></sf:Account><sf:FirstName xsi:nil="true"/><sf:LastName>Administrator</sf:LastName></records><size>1</size></result></queryAllResponse></soapenv:Body></soapenv:Envelope>"
Enterprise:
"<?xml version="1.0" encoding="UTF-8"?><soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns="urn:enterprise.soap.sforce.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:sf="urn:sobject.enterprise.soap.sforce.com"><soapenv:Body><queryAllResponse><result><done>true</done><queryLocator xsi:nil="true"/><records xsi:type="sf:Contact"><sf:Account xsi:type="sf:Account"><sf:Name>Unknown</sf:Name></sf:Account><sf:LastName>Administrator</sf:LastName></records><size>1</size></result></queryAllResponse></soapenv:Body></soapenv:Envelope>"
def testQueryAllOneResultWithoutFirstName(self):
result = self.h.queryAll('SELECT Account.Name, FirstName, LastName FROM Contact WHERE FirstName = NULL LIMIT 1')
print result
self.assertEqual(len(result.records), 1)
self.assertEqual(result.size, 1)
self.assertFalse(hasattr(result.records[0], 'FirstName'))
self.assertTrue(hasattr(result.records[0], 'LastName'))
self.assertTrue(hasattr(result.records[0].Account, 'Name'))
self.assertFalse(isinstance(result.records[0].FirstName, list))
self.assertFalse(isinstance(result.records[0].LastName, list))
self.assertFalse(isinstance(result.records[0].Account.Name, list))
'''
def testQueryAllTwoResults(self):
result = self.h.queryAll('SELECT Account.Name, FirstName, LastName FROM Contact WHERE FirstName != NULL LIMIT 2')
self.assertTrue(len(result.records) > 1)
self.assertTrue(result.size > 1)
for record in result.records:
self.assertTrue(hasattr(record, 'FirstName'))
self.assertTrue(hasattr(record, 'LastName'))
self.assertTrue(hasattr(record.Account, 'Name'))
self.assertFalse(isinstance(record.FirstName, list))
self.assertFalse(isinstance(record.LastName, list))
self.assertFalse(isinstance(record.Account.Name, list))
def testQueryMore(self):
self.setHeaders('queryMore')
result = self.h.queryAll('SELECT FirstName, LastName FROM Lead')
while (result.done == False):
self.assertTrue(result.queryLocator != None)
self.assertEqual(len(result.records), 200)
result = self.h.queryMore(result.queryLocator)
self.assertTrue(len(result.records) > 1)
self.assertTrue(len(result.records) <= 200)
self.assertTrue(result.done)
self.assertEqual(result.queryLocator, None)
self.checkHeaders('queryMore')
def testRetrievePassingList(self):
self.setHeaders('retrieve')
(result, lead) = self.createLead(True)
result = self.h.retrieve('FirstName, LastName, Company, Email', 'Lead', [result.id])
self.assertEqual(result.Id, lead.Id)
self.assertEqual(result.type, 'Lead')
self.assertEqual(result.FirstName, lead.FirstName)
self.assertEqual(result.LastName, lead.LastName)
self.assertEqual(result.Company, lead.Company)
self.assertEqual(result.Email, lead.Email)
self.checkHeaders('retrieve')
def testRetrievePassingString(self):
(result, lead) = self.createLead(True)
result = self.h.retrieve('FirstName, LastName, Company, Email', 'Lead', result.id)
self.assertEqual(result.Id, lead.Id)
self.assertEqual(result.type, 'Lead')
self.assertEqual(result.FirstName, lead.FirstName)
self.assertEqual(result.LastName, lead.LastName)
self.assertEqual(result.Company, lead.Company)
self.assertEqual(result.Email, lead.Email)
def testRetrievePassingTuple(self):
(result, lead) = self.createLead(True)
result = self.h.retrieve('FirstName, LastName, Company, Email', 'Lead', (result.id))
self.assertEqual(result.Id, lead.Id)
self.assertEqual(result.type, 'Lead')
self.assertEqual(result.FirstName, lead.FirstName)
self.assertEqual(result.LastName, lead.LastName)
self.assertEqual(result.Company, lead.Company)
self.assertEqual(result.Email, lead.Email)
def testRetrievePassingListOfTwoIds(self):
self.setHeaders('retrieve')
(result, lead) = self.createLead(True)
result = self.h.retrieve('FirstName, LastName, Company, Email', 'Lead', [result.id, result.id])
self.assertEqual(result[0].Id, lead.Id)
self.assertEqual(result[0].type, 'Lead')
self.assertEqual(result[0].FirstName, lead.FirstName)
self.assertEqual(result[0].LastName, lead.LastName)
self.assertEqual(result[0].Company, lead.Company)
self.assertEqual(result[0].Email, lead.Email)
self.assertEqual(result[1].Id, lead.Id)
self.assertEqual(result[1].type, 'Lead')
self.assertEqual(result[1].FirstName, lead.FirstName)
self.assertEqual(result[1].LastName, lead.LastName)
self.assertEqual(result[1].Company, lead.Company)
self.assertEqual(result[1].Email, lead.Email)
self.checkHeaders('retrieve')
def testSearchNoResults(self):
self.setHeaders('search')
result = self.h.search('FIND {asdfasdffdsaasdl;fjkwelhnfd} IN Name Fields RETURNING Lead(Name, Phone)')
self.assertEqual(len(result.searchRecords), 0)
self.checkHeaders('search')
def testUndeleteLead(self):
self.setHeaders('undelete')
(result, lead) = self.createLead(True)
result = self.h.delete(result.id)
result = self.h.undelete(result.id)
self.assertTrue(result.success)
self.assertEqual(result.id, lead.Id)
self.checkHeaders('undelete')
def testUndeleteLeads(self):
(result, (lead, lead2)) = self.createLeads(True)
result = self.h.delete((result[0].id, result[1].id))
result = self.h.undelete((result[0].id, result[1].id))
self.assertTrue(result[0].success)
self.assertEqual(result[0].id, lead.Id)
self.assertTrue(result[1].success)
self.assertEqual(result[1].id, lead2.Id)
def testUpdateNoFieldsToNull(self):
self.setHeaders('update')
(result, lead) = self.createLead(True)
lead.fieldsToNull = ()
result = self.h.update(lead)
self.assertTrue(result.success)
self.assertEqual(result.id, lead.Id)
self.checkHeaders('update')
def testUpsertCreate(self):
self.setHeaders('upsert')
lead = self.h.generateObject('Lead')
lead.FirstName = u'Joë'
lead.LastName = u'Möke'
lead.Company = u'你好公司'
lead.Email = 'joe@example.com'
result = self.h.upsert('Id', lead)
self.assertTrue(result.created)
self.assertTrue(result.id[0:3] == '00Q')
self.assertTrue(result.success)
self.checkHeaders('upsert')
def testUpsertUpdate(self):
(result, lead) = self.createLead(True)
result = self.h.upsert('Id', lead)
self.assertFalse(result.created)
self.assertEqual(result.id, lead.Id)
self.assertTrue(result.success)
# Describe calls
def testDescribeGlobal(self):
self.setHeaders('describeGlobal')
result = self.h.describeGlobal()
self.assertTrue(hasattr(result, 'encoding'))
self.assertTrue(hasattr(result, 'maxBatchSize'))
foundAccount = False
for object in result.sobjects:
if object.name == 'Account':
foundAccount = True
self.assertTrue(foundAccount)
self.checkHeaders('describeGlobal')
def testDescribeLayout(self):
self.setHeaders('describeLayout')
result = self.h.describeLayout('Lead', '012000000000000AAA') # Master Record Type
self.assertEqual(result[1][0].recordTypeId, '012000000000000AAA')
self.checkHeaders('describeLayout')
def testDescribeSObject(self):
self.setHeaders('describeSObject')
result = self.h.describeSObject('Lead')
self.assertTrue(hasattr(result, 'activateable'))
self.assertTrue(hasattr(result, 'childRelationships'))
self.assertEqual(result.keyPrefix, '00Q')
self.assertEqual(result.name, 'Lead')
self.checkHeaders('describeSObject')
def testDescribeSObjects(self):
self.setHeaders('describeSObjects')
result = self.h.describeSObjects(('Contact', 'Account'))
self.assertTrue(hasattr(result[0], 'activateable'))
self.assertTrue(hasattr(result[0], 'childRelationships'))
self.assertEqual(result[0].keyPrefix, '003')
self.assertEqual(result[0].name, 'Contact')
self.assertTrue(hasattr(result[1], 'activateable'))
self.assertTrue(hasattr(result[1], 'childRelationships'))
self.assertEqual(result[1].keyPrefix, '001')
self.assertEqual(result[1].name, 'Account')
self.checkHeaders('describeSObjects')
def testDescribeTabs(self):
self.setHeaders('describeTabs')
result = self.h.describeTabs()
self.assertTrue(hasattr(result[0], 'tabs'))
self.checkHeaders('describeTabs')
# Utility calls
def testGetServerTimestamp(self):
self.setHeaders('getServerTimestamp')
result = self.h.getServerTimestamp()
self.assertTrue(isinstance(result.timestamp, datetime.datetime))
self.checkHeaders('getServerTimestamp')
def testGetUserInfo(self):
self.setHeaders('getUserInfo')
result = self.h.getUserInfo()
self.assertTrue(hasattr(result, 'userEmail'))
self.assertTrue(hasattr(result, 'userId'))
self.checkHeaders('getUserInfo')
def testResetPassword(self):
self.setHeaders('resetPassword')
try:
self.h.resetPassword('005000xxxxxxxxx')
self.fail('WebFault not thrown')
except WebFault:
pass
self.checkHeaders('resetPassword')
def testSendSingleEmailFail(self):
self.setHeaders('sendEmail')
email = self.h.generateObject('SingleEmailMessage')
email.toAddresses = 'joeexample.com'
email.subject = 'This is my subject.'
email.plainTextBody = 'This is the plain-text body of my email.'
result = self.h.sendEmail([email])
self.assertFalse(result.success)
self.assertEqual(result.errors[0].statusCode, 'INVALID_EMAIL_ADDRESS')
self.checkHeaders('sendEmail')
def testSendSingleEmailPass(self):
email = self.h.generateObject('SingleEmailMessage')
email.toAddresses = 'joe@example.com'
email.subject = 'This is my subject.'
email.plainTextBody = 'This is the plain-text body of my email.'
result = self.h.sendEmail([email])
self.assertTrue(result.success)
def testSendMassEmailFail(self):
email = self.h.generateObject('MassEmailMessage')
email.targetObjectIds = (('*LEAD OR CONTACT ID TO EMAIL*', '*ANOTHER LEAD OR CONTACT TO EMAIL*'))
email.templateId = '*EMAIL TEMPLATE ID TO USE*'
result = self.h.sendEmail([email])
self.assertFalse(result.success)
self.assertEqual(result.errors[0].statusCode, 'INVALID_ID_FIELD')
# To make these tests as portable as possible, we won't depend on a particular templateId
# to test to make sure our mass emails succeed. From the failure message we can gather that
# SFDC is successfully receiving our SOAP message, and reasonably infer that our code works.
def testSetPassword(self):
self.setHeaders('setPassword')
try:
self.h.setPassword('*USER ID HERE*', '*NEW PASSWORD HERE*')
self.fail('WebFault not thrown')
except WebFault:
pass
self.checkHeaders('setPassword')
# Toolkit-Specific Utility Calls:
def testGenerateHeader(self):
header = self.h.generateHeader('SessionHeader')
self.assertEqual(header.sessionId, None)
def testGenerateObject(self):
account = self.h.generateObject('Account')
self.assertEqual(account.fieldsToNull, [])
self.assertEqual(account.Id, None)
self.assertEqual(account.type, 'Account')
def testGetLastRequest(self):
self.h.getServerTimestamp()
result = self.h.getLastRequest()
self.assertTrue(result.find(':getServerTimestamp/>') != -1)
def testGetLastResponse(self):
self.h.getServerTimestamp()
result = self.h.getLastResponse()
self.assertTrue(result.find('<getServerTimestampResponse>') != -1)
# SOAP Headers tested as part of the method calls
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
ehashman/oh-mainline | vendor/packages/Django/tests/regressiontests/admin_util/models.py | 115 | 1303 | from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class Article(models.Model):
"""
A simple Article model for testing
"""
site = models.ForeignKey('sites.Site', related_name="admin_articles")
title = models.CharField(max_length=100)
title2 = models.CharField(max_length=100, verbose_name="another name")
created = models.DateTimeField()
def test_from_model(self):
return "nothing"
def test_from_model_with_override(self):
return "nothing"
test_from_model_with_override.short_description = "not What you Expect"
@python_2_unicode_compatible
class Count(models.Model):
num = models.PositiveSmallIntegerField()
parent = models.ForeignKey('self', null=True)
def __str__(self):
return six.text_type(self.num)
class Event(models.Model):
date = models.DateTimeField(auto_now_add=True)
class Location(models.Model):
event = models.OneToOneField(Event, verbose_name='awesome event')
class Guest(models.Model):
event = models.OneToOneField(Event)
name = models.CharField(max_length=255)
class Meta:
verbose_name = "awesome guest"
class EventGuide(models.Model):
event = models.ForeignKey(Event, on_delete=models.DO_NOTHING)
| agpl-3.0 |
DarkArtek/FFXIVITAFC | allauth/socialaccount/providers/persona/provider.py | 10 | 1367 | import json
from django.template.loader import render_to_string
from django.utils.html import escapejs
from allauth.account.models import EmailAddress
from allauth.socialaccount.providers.base import Provider, ProviderAccount
class PersonaAccount(ProviderAccount):
def to_str(self):
return self.account.uid
class PersonaProvider(Provider):
id = 'persona'
name = 'Persona'
account_class = PersonaAccount
def media_js(self, request):
settings = self.get_settings()
request_parameters = settings.get('REQUEST_PARAMETERS', {})
ctx = {'request_parameters': json.dumps(request_parameters)}
return render_to_string('persona/auth.html', ctx, request=request)
def get_login_url(self, request, **kwargs):
next_url = "'%s'" % escapejs(kwargs.get('next') or '')
process = "'%s'" % escapejs(kwargs.get('process') or 'login')
return 'javascript:allauth.persona.login(%s, %s)' % (next_url, process)
def extract_uid(self, data):
return data['email']
def extract_common_fields(self, data):
return dict(email=data['email'])
def extract_email_addresses(self, data):
ret = [EmailAddress(email=data['email'],
verified=True,
primary=True)]
return ret
provider_classes = [PersonaProvider]
| unlicense |
mildass/tlsfuzzer | scripts/test-certificate-request.py | 1 | 12109 | # Author: Hubert Kario, (c) 2016
# Released under Gnu GPL v2.0, see LICENSE file for details
"""Test with CertificateRequest"""
from __future__ import print_function
import traceback
import sys
import getopt
import re
from itertools import chain
from tlsfuzzer.runner import Runner
from tlsfuzzer.messages import Connect, ClientHelloGenerator, \
ClientKeyExchangeGenerator, ChangeCipherSpecGenerator, \
FinishedGenerator, ApplicationDataGenerator, \
CertificateGenerator, CertificateVerifyGenerator, \
AlertGenerator
from tlsfuzzer.expect import ExpectServerHello, ExpectCertificate, \
ExpectServerHelloDone, ExpectChangeCipherSpec, ExpectFinished, \
ExpectAlert, ExpectClose, ExpectCertificateRequest, \
ExpectApplicationData
from tlsfuzzer.utils.lists import natural_sort_keys
from tlsfuzzer.helpers import sig_algs_to_ids, RSA_SIG_ALL
from tlslite.extensions import SignatureAlgorithmsExtension, \
SignatureAlgorithmsCertExtension
from tlslite.constants import CipherSuite, AlertDescription, \
HashAlgorithm, SignatureAlgorithm, ExtensionType, SignatureScheme
from tlslite.utils.keyfactory import parsePEMKey
from tlslite.x509 import X509
from tlslite.x509certchain import X509CertChain
version = 2
def help_msg():
print("Usage: <script-name> [-h hostname] [-p port] [[probe-name] ...]")
print(" -h hostname name of the host to run the test against")
print(" localhost by default")
print(" -p port port number to use for connection, 4433 by default")
print(" probe-name if present, will run only the probes with given")
print(" names and not all of them, e.g \"sanity\"")
print(" -e probe-name exclude the probe from the list of the ones run")
print(" may be specified multiple times")
print(" -s sigalgs hash and signature algorithm pairs that the server")
print(" is expected to support. \"sha512+rsa sha384+rsa ")
print(" sha256+rsa sha224+rsa sha1+rsa\" by default")
print(" -k keyfile file with private key of client")
print(" -c certfile file with the certificate of client")
print(" --help this message")
def main():
"""Check what signature algorithms server advertises"""
hostname = "localhost"
port = 4433
run_exclude = set()
cert = None
private_key = None
sigalgs = [SignatureScheme.rsa_pss_rsae_sha512,
SignatureScheme.rsa_pss_pss_sha512,
SignatureScheme.rsa_pss_rsae_sha384,
SignatureScheme.rsa_pss_pss_sha384,
SignatureScheme.rsa_pss_rsae_sha256,
SignatureScheme.rsa_pss_pss_sha256,
(HashAlgorithm.sha512, SignatureAlgorithm.rsa),
(HashAlgorithm.sha384, SignatureAlgorithm.rsa),
(HashAlgorithm.sha256, SignatureAlgorithm.rsa),
(HashAlgorithm.sha224, SignatureAlgorithm.rsa),
(HashAlgorithm.sha1, SignatureAlgorithm.rsa)]
argv = sys.argv[1:]
opts, args = getopt.getopt(argv, "h:p:e:s:k:c:", ["help"])
for opt, arg in opts:
if opt == '-h':
host = arg
elif opt == '-p':
port = int(arg)
elif opt == '-e':
run_exclude.add(arg)
elif opt == '--help':
help_msg()
sys.exit(0)
elif opt == '-s':
sigalgs = sig_algs_to_ids(arg)
elif opt == '-k':
text_key = open(arg, 'rb').read()
if sys.version_info[0] >= 3:
text_key = str(text_key, 'utf-8')
private_key = parsePEMKey(text_key, private=True)
elif opt == '-c':
text_cert = open(arg, 'rb').read()
if sys.version_info[0] >= 3:
text_cert = str(text_cert, 'utf-8')
cert = X509()
cert.parse(text_cert)
else:
raise ValueError("Unknown option: {0}".format(opt))
if args:
run_only = set(args)
else:
run_only = None
conversations = {}
# sanity check for Client Certificates
conversation = Connect(hostname, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
sigs = [SignatureScheme.rsa_pss_rsae_sha256,
SignatureScheme.rsa_pss_rsae_sha384,
SignatureScheme.rsa_pss_rsae_sha512,
SignatureScheme.rsa_pss_pss_sha256,
SignatureScheme.rsa_pss_pss_sha384,
SignatureScheme.rsa_pss_pss_sha512,
(HashAlgorithm.sha512, SignatureAlgorithm.rsa),
(HashAlgorithm.sha384, SignatureAlgorithm.rsa),
(HashAlgorithm.sha256, SignatureAlgorithm.rsa),
(HashAlgorithm.sha224, SignatureAlgorithm.rsa),
(HashAlgorithm.sha1, SignatureAlgorithm.rsa),
(HashAlgorithm.md5, SignatureAlgorithm.rsa)]
ext = {ExtensionType.signature_algorithms :
SignatureAlgorithmsExtension().create(sigs),
ExtensionType.signature_algorithms_cert :
SignatureAlgorithmsCertExtension().create(RSA_SIG_ALL)}
node = node.add_child(ClientHelloGenerator(ciphers, extensions=ext))
node = node.add_child(ExpectServerHello(version=(3, 3)))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectCertificateRequest())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(CertificateGenerator())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(b"GET / HTTP/1.0\n\n"))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertDescription.close_notify))
node = node.add_child(ExpectClose())
node.next_sibling = ExpectAlert()
node.next_sibling.add_child(ExpectClose())
conversations["sanity"] = conversation
if cert and private_key:
# sanity check for Client Certificates
conversation = Connect(hostname, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
sigs = [SignatureScheme.rsa_pss_rsae_sha256,
SignatureScheme.rsa_pss_rsae_sha384,
SignatureScheme.rsa_pss_rsae_sha512,
SignatureScheme.rsa_pss_pss_sha256,
SignatureScheme.rsa_pss_pss_sha384,
SignatureScheme.rsa_pss_pss_sha512,
(HashAlgorithm.sha512, SignatureAlgorithm.rsa),
(HashAlgorithm.sha384, SignatureAlgorithm.rsa),
(HashAlgorithm.sha256, SignatureAlgorithm.rsa),
(HashAlgorithm.sha224, SignatureAlgorithm.rsa),
(HashAlgorithm.sha1, SignatureAlgorithm.rsa),
(HashAlgorithm.md5, SignatureAlgorithm.rsa)]
ext = {ExtensionType.signature_algorithms :
SignatureAlgorithmsExtension().create(sigs),
ExtensionType.signature_algorithms_cert :
SignatureAlgorithmsCertExtension().create(RSA_SIG_ALL)}
node = node.add_child(ClientHelloGenerator(ciphers, extensions=ext))
node = node.add_child(ExpectServerHello(version=(3, 3)))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectCertificateRequest())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(CertificateGenerator(X509CertChain([cert])))
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(CertificateVerifyGenerator(private_key))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(b"GET / HTTP/1.0\n\n"))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertDescription.close_notify))
node = node.add_child(ExpectClose())
node.next_sibling = ExpectAlert()
node.next_sibling.add_child(ExpectClose())
conversations["with certificate"] = conversation
# verify the advertised hashes
conversation = Connect(hostname, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
sigs = [SignatureScheme.rsa_pss_rsae_sha256,
SignatureScheme.rsa_pss_rsae_sha384,
SignatureScheme.rsa_pss_rsae_sha512,
SignatureScheme.rsa_pss_pss_sha256,
SignatureScheme.rsa_pss_pss_sha384,
SignatureScheme.rsa_pss_pss_sha512,
(HashAlgorithm.sha512, SignatureAlgorithm.rsa),
(HashAlgorithm.sha384, SignatureAlgorithm.rsa),
(HashAlgorithm.sha256, SignatureAlgorithm.rsa),
(HashAlgorithm.sha224, SignatureAlgorithm.rsa),
(HashAlgorithm.sha1, SignatureAlgorithm.rsa),
(HashAlgorithm.md5, SignatureAlgorithm.rsa)]
ext = {ExtensionType.signature_algorithms :
SignatureAlgorithmsExtension().create(sigs),
ExtensionType.signature_algorithms_cert :
SignatureAlgorithmsCertExtension().create(RSA_SIG_ALL)}
node = node.add_child(ClientHelloGenerator(ciphers, extensions=ext))
node = node.add_child(ExpectServerHello(version=(3, 3)))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectCertificateRequest(sigalgs))
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(CertificateGenerator())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(b"GET / HTTP/1.0\n\n"))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertDescription.close_notify))
node = node.add_child(ExpectClose())
node.next_sibling = ExpectAlert()
node.next_sibling.add_child(ExpectClose())
conversations["check sigalgs in cert request"] = conversation
# run the conversation
good = 0
bad = 0
failed = []
# make sure that sanity test is run first and last
# to verify that server was running and kept running throught
sanity_test = ('sanity', conversations['sanity'])
ordered_tests = chain([sanity_test],
filter(lambda x: x[0] != 'sanity',
conversations.items()),
[sanity_test])
for c_name, c_test in ordered_tests:
if run_only and c_name not in run_only or c_name in run_exclude:
continue
print("{0} ...".format(c_name))
runner = Runner(c_test)
res = True
try:
runner.run()
except:
print("Error while processing")
print(traceback.format_exc())
res = False
if res:
good += 1
print("OK\n")
else:
bad += 1
failed.append(c_name)
print("Test to verify if server accepts empty certificate messages and")
print("advertises only expected signature algotithms in Certificate")
print("Request message\n")
print("version: {0}\n".format(version))
print("Test end")
print("successful: {0}".format(good))
print("failed: {0}".format(bad))
failed_sorted = sorted(failed, key=natural_sort_keys)
print(" {0}".format('\n '.join(repr(i) for i in failed_sorted)))
if bad > 0:
sys.exit(1)
if __name__ == "__main__":
main()
| gpl-2.0 |
kasioumis/invenio | invenio/legacy/websubmit/functions/Mail_Approval_Request_to_Referee.py | 13 | 20251 | # This file is part of Invenio.
# Copyright (C) 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Mail_Approval_Request_to_Referee: A function to send an email to the referee
of a document informing him/her that a request for its approval has been
submitted by the user.
"""
__revision__ = "$Id$"
import os
import re
import sre_constants
from invenio.legacy.websubmit.db_layer import get_approval_request_notes
from invenio.legacy.websubmit.config import InvenioWebSubmitFunctionError, \
CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN
from invenio.config import CFG_CERN_SITE, \
CFG_SITE_NAME, \
CFG_SITE_URL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_RECORD
from invenio.modules.access.control import acc_get_role_users, acc_get_role_id
from invenio.legacy.websubmit.functions.Shared_Functions import ParamFromFile
from invenio.ext.logging import register_exception
from invenio.legacy.search_engine import print_record
from invenio.ext.email import send_email
CFG_MAIL_BODY = """
A request for the approval of a document in the %(site-name)s has been
made and requires your attention as a referee. The details are as
follows:
Reference Number: [%(report-number)s]
Title: %(title)s
Author(s): %(authors)s
You can see the details of the record at the following address:
<%(site-url)s/%(CFG_SITE_RECORD)s/%(record-id)s>
Please register your decision by following the instructions at the
following address:
<%(site-url)s/submit/direct?%(report-number-fieldname)s=%(report-number)s&sub=%(approval-action)s%(doctype)s&combo%(doctype)s=%(category)s>
Below, you may find some additional information about the approval request:
%(notes)s
"""
def Mail_Approval_Request_to_Referee(parameters, curdir, form, user_info=None):
"""
This function sends an email to the referee of a document informing
him/her that a request for its approval has been submitted by the
user.
@param categ_file_appreq: (string) - some document types are
separated into different categories, each of which has its own
referee(s).
In such document types, it's necessary to know the document-
type's category in order to choose the referee.
This parameter provides a means by which the category information
can be extracted from a file in the current submission's working
directory. It should therefore be a filename.
@param categ_rnseek_appreq: (string) - some document types are
separated into different categories, each of which has its own
referee(s).
In such document types, it's necessary to know the document-
type's category in order to choose the referee.
This parameter provides a means by which the category information
can be extracted from the document's reference number.
It is infact a string that will be compiled into a regexp and
an attempt will be made to match it agains the document's reference
number starting from the left-most position.
The only pre-requisite is that the segment in which the category is
sought should be indicated with <CATEGORY>.
Thus, an example might be as follows:
ATL(-COM)?-<CATEGORY>-.+
This would allow "PHYS" in the following reference number to be
recognised as the category:
ATL-COM-PHYS-2008-001
@param edsrn: (string) - the name of the field in which the report
number should be placed when the referee visits the form for making
a decision.
@return: (string) - empty string.
"""
## Get the reference number (as global rn - sorry!) and the document type:
global sysno, rn
doctype = form['doctype']
########
## Get the parameters from the list:
########
## Get the name of the report-number file:
########
try:
edsrn_file = parameters["edsrn"]
except KeyError:
## No value given for the edsrn file:
msg = "Error in Mail_Approval_Request_to_Referee function: unable " \
"to determine the name of the file in which the document's " \
"report number should be stored."
raise InvenioWebSubmitFunctionError(msg)
else:
edsrn_file = str(edsrn_file)
edsrn_file = os.path.basename(edsrn_file).strip()
if edsrn_file == "":
msg = "Error in Mail_Approval_Request_to_Referee function: " \
"unable to determine the name of the file in which " \
"the document's report number should be stored."
raise InvenioWebSubmitFunctionError(msg)
########
## Get the name of the category file:
#######
try:
## If it has been provided, get the name of the file in which the
## category is stored:
category_file = parameters["categ_file_appreq"]
except KeyError:
## No value given for the category file:
category_file = None
else:
if category_file is not None:
category_file = str(category_file)
category_file = os.path.basename(category_file).strip()
if category_file == "":
category_file = None
########
## Get the regexp that is used to find the category in the report number:
########
try:
## If it has been provided, get the regexp used for identifying
## a document-type's category from its reference number:
category_rn_regexp = parameters["categ_rnseek_appreq"]
except KeyError:
## No value given for the category regexp:
category_rn_regexp = None
else:
if category_rn_regexp is not None:
category_rn_regexp = str(category_rn_regexp).strip()
if category_rn_regexp == "":
category_rn_regexp = None
#######
## Resolve the document type's category:
##
## This is a long process. The end result is that the category is extracted
## either from a file in curdir, or from the report number.
## If it's taken from the report number, the admin must configure the
## function to accept a regular expression that is used to find the
## category in the report number.
##
if category_file is not None and category_rn_regexp is not None:
## It is not valid to have both a category file and a pattern
## describing how to extract the category from a report number.
## raise an InvenioWebSubmitFunctionError
msg = "Error in Register_Approval_Request function: received " \
"instructions to search for the document's category in " \
"both its report number AND in a category file. Could " \
"not determine which to use - please notify the " \
"administrator."
raise InvenioWebSubmitFunctionError(msg)
elif category_file is not None:
## Attempt to recover the category information from a file in the
## current submission's working directory:
category = ParamFromFile("%s/%s" % (curdir, category_file))
if category is not None:
category = category.strip()
if category in (None, ""):
## The category cannot be resolved.
msg = "Error in Register_Approval_Request function: received " \
"instructions to search for the document's category in " \
"a category file, but could not recover the category " \
"from that file. An approval request therefore cannot " \
"be registered for the document."
raise InvenioWebSubmitFunctionError(msg)
elif category_rn_regexp is not None:
## Attempt to recover the category information from the document's
## reference number using the regexp in category_rn_regexp:
##
## Does the category regexp contain the key-phrase "<CATEG>"?
if category_rn_regexp.find("<CATEG>") != -1:
## Yes. Replace "<CATEG>" with "(?P<category>.+?)".
## For example, this:
## ATL(-COM)?-<CATEG>-
## Will be transformed into this:
## ATL(-COM)?-(?P<category>.+?)-
category_rn_final_regexp = \
category_rn_regexp.replace("<CATEG>", r"(?P<category>.+?)", 1)
else:
## The regexp for category didn't contain "<CATEG>", but this is
## mandatory.
msg = "Error in Register_Approval_Request function: The " \
"[%(doctype)s] submission has been configured to search " \
"for the document type's category in its reference number, " \
"using a poorly formed search expression (no marker for " \
"the category was present.) Since the document's category " \
"therefore cannot be retrieved, an approval request cannot " \
"be registered for it. Please report this problem to the " \
"administrator." \
% { 'doctype' : doctype, }
raise InvenioWebSubmitFunctionError(msg)
##
try:
## Attempt to compile the regexp for finding the category:
re_categ_from_rn = re.compile(category_rn_final_regexp)
except sre_constants.error:
## The expression passed to this function could not be compiled
## into a regexp. Register this exception and raise an
## InvenioWebSubmitFunctionError:
exception_prefix = "Error in Register_Approval_Request function: " \
"The [%(doctype)s] submission has been " \
"configured to search for the document type's " \
"category in its reference number, using the " \
"following regexp: /%(regexp)s/. This regexp, " \
"however, could not be compiled correctly " \
"(created it from %(categ-search-term)s.)" \
% { 'doctype' : doctype, \
'regexp' : category_rn_final_regexp, \
'categ-search-term' : category_rn_regexp, }
register_exception(prefix=exception_prefix)
msg = "Error in Register_Approval_Request function: The " \
"[%(doctype)s] submission has been configured to search " \
"for the document type's category in its reference number, " \
"using a poorly formed search expression. Since the " \
"document's category therefore cannot be retrieved, an " \
"approval request cannot be registered for it. Please " \
"report this problem to the administrator." \
% { 'doctype' : doctype, }
raise InvenioWebSubmitFunctionError(msg)
else:
## Now attempt to recover the category from the RN string:
m_categ_from_rn = re_categ_from_rn.match(rn)
if m_categ_from_rn is not None:
## The pattern matched in the string.
## Extract the category from the match:
try:
category = m_categ_from_rn.group("category")
except IndexError:
## There was no "category" group. That group is mandatory.
exception_prefix = \
"Error in Register_Approval_Request function: The " \
"[%(doctype)s] submission has been configured to " \
"search for the document type's category in its " \
"reference number using the following regexp: " \
"/%(regexp)s/. The search produced a match, but " \
"there was no \"category\" group in the match " \
"object although this group is mandatory. The " \
"regexp was compiled from the following string: " \
"[%(categ-search-term)s]." \
% { 'doctype' : doctype, \
'regexp' : category_rn_final_regexp, \
'categ-search-term' : category_rn_regexp, }
register_exception(prefix=exception_prefix)
msg = "Error in Register_Approval_Request function: The " \
"[%(doctype)s] submission has been configured to " \
"search for the document type's category in its " \
"reference number, using a poorly formed search " \
"expression (there was no category marker). Since " \
"the document's category therefore cannot be " \
"retrieved, an approval request cannot be " \
"registered for it. Please report this problem to " \
"the administrator." \
% { 'doctype' : doctype, }
raise InvenioWebSubmitFunctionError(msg)
else:
category = category.strip()
if category == "":
msg = "Error in Register_Approval_Request function: " \
"The [%(doctype)s] submission has been " \
"configured to search for the document type's " \
"category in its reference number, but no " \
"category was found. The request for approval " \
"cannot be registered. Please report this " \
"problem to the administrator." \
% { 'doctype' : doctype, }
raise InvenioWebSubmitFunctionError(msg)
else:
## No match. Cannot find the category and therefore cannot
## continue:
msg = "Error in Register_Approval_Request function: The " \
"[%(doctype)s] submission has been configured to " \
"search for the document type's category in its " \
"reference number, but no match was made. The request " \
"for approval cannot be registered. Please report " \
"this problem to the administrator." \
% { 'doctype' : doctype, }
raise InvenioWebSubmitFunctionError(msg)
else:
## The document type has no category.
category = ""
##
## End of category recovery
#######
#######
## Get the title and author(s) from the record:
#######
## Author(s):
rec_authors = ""
rec_first_author = print_record(int(sysno), 'tm', "100__a")
rec_other_authors = print_record(int(sysno), 'tm', "700__a")
if rec_first_author != "":
rec_authors += "".join(["%s\n" % author.strip() for \
author in rec_first_author.split("\n")])
if rec_other_authors != "":
rec_authors += "".join(["%s\n" % author.strip() for \
author in rec_other_authors.split("\n")])
## Title:
rec_title = "".join(["%s\n" % title.strip() for title in \
print_record(int(sysno), 'tm', "245__a").split("\n")])
##
#######
## the normal approval action
approve_act = 'APP'
## Get notes about the approval request:
approval_notes = get_approval_request_notes(doctype, rn)
## Get the referee email address:
if CFG_CERN_SITE:
## The referees system in CERN now works with listbox membership.
## List names should take the format
## "service-cds-referee-doctype-category@cern.ch"
## Make sure that your list exists!
## FIXME - to be replaced by a mailing alias in webaccess in the
## future.
if doctype == 'ATN': ## Special case of 'RPR' action for doctype ATN
action = ParamFromFile("%s/%s" % (curdir,'act')).strip()
if action == 'RPR':
notetype = ParamFromFile("%s/%s" % (curdir,'ATN_NOTETYPE')).strip()
if notetype not in ('SLIDE','PROC'):
raise InvenioWebSubmitFunctionError('ERROR function Mail_Approval_Request_to_Referee:: do not recognize notetype ' + notetype)
if notetype == 'PROC':
approve_act = 'APR' # RPR PROC requires APR action to approve
referee_listname = "service-cds-referee-atn-proc@cern.ch"
elif notetype == 'SLIDE': ## SLIDES approval
approve_act = 'APS' # RPR SLIDE requires APS action to approve
referee_listname = "atlas-speakers-comm@cern.ch"
else:
raise InvenioWebSubmitFunctionError('ERROR function Mail_Approval_Request_to_Referee:: do not understand notetype: ' +notetype)
else:
referee_listname = "service-cds-referee-%s" % doctype.lower()
if category != "":
referee_listname += "-%s" % category.lower()
mailto_addresses = referee_listname + "@cern.ch"
if category == 'CDSTEST':
referee_listname = "service-cds-referee-%s" % doctype.lower()
referee_listname += "-%s" % category.lower()
mailto_addresses = referee_listname + "@cern.ch"
else:
referee_address = ""
## Try to retrieve the referee's email from the referee's database:
for user in \
acc_get_role_users(acc_get_role_id("referee_%s_%s" \
% (doctype, category))):
referee_address += user[1] + ","
## And if there are general referees:
for user in \
acc_get_role_users(acc_get_role_id("referee_%s_*" % doctype)):
referee_address += user[1] + ","
referee_address = re.sub(",$", "", referee_address)
# Creation of the mail for the referee
mailto_addresses = ""
if referee_address != "":
mailto_addresses = referee_address + ","
else:
mailto_addresses = re.sub(",$", "", mailto_addresses)
##
## Send the email:
mail_subj = "Request for approval of [%s]" % rn
mail_body = CFG_MAIL_BODY % \
{ 'site-name' : CFG_SITE_NAME,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'report-number-fieldname' : edsrn_file,
'report-number' : rn,
'title' : rec_title,
'authors' : rec_authors,
'site-url' : CFG_SITE_URL,
'record-id' : sysno,
'approval-action' : approve_act,
'doctype' : doctype,
'notes' : approval_notes,
'category' : category,
}
send_email(CFG_SITE_SUPPORT_EMAIL,
mailto_addresses,
mail_subj,
mail_body,
copy_to_admin=CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN)
##
return ""
| gpl-2.0 |
MartinHjelmare/home-assistant | homeassistant/components/pjlink/media_player.py | 7 | 5409 | """Support for controlling projector via the PJLink protocol."""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ENCODING = 'encoding'
DEFAULT_PORT = 4352
DEFAULT_ENCODING = 'utf-8'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
})
SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PJLink platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
encoding = config.get(CONF_ENCODING)
password = config.get(CONF_PASSWORD)
if 'pjlink' not in hass.data:
hass.data['pjlink'] = {}
hass_data = hass.data['pjlink']
device_label = "{}:{}".format(host, port)
if device_label in hass_data:
return
device = PjLinkDevice(host, port, name, encoding, password)
hass_data[device_label] = device
add_entities([device], True)
def format_input_source(input_source_name, input_source_number):
"""Format input source for display in UI."""
return "{} {}".format(input_source_name, input_source_number)
class PjLinkDevice(MediaPlayerDevice):
"""Representation of a PJLink device."""
def __init__(self, host, port, name, encoding, password):
"""Iinitialize the PJLink device."""
self._host = host
self._port = port
self._name = name
self._password = password
self._encoding = encoding
self._muted = False
self._pwstate = STATE_OFF
self._current_source = None
with self.projector() as projector:
if not self._name:
self._name = projector.get_name()
inputs = projector.get_inputs()
self._source_name_mapping = \
{format_input_source(*x): x for x in inputs}
self._source_list = sorted(self._source_name_mapping.keys())
def projector(self):
"""Create PJLink Projector instance."""
from pypjlink import Projector
projector = Projector.from_address(
self._host, self._port, self._encoding)
projector.authenticate(self._password)
return projector
def update(self):
"""Get the latest state from the device."""
from pypjlink.projector import ProjectorError
with self.projector() as projector:
try:
pwstate = projector.get_power()
if pwstate in ('on', 'warm-up'):
self._pwstate = STATE_ON
else:
self._pwstate = STATE_OFF
self._muted = projector.get_mute()[1]
self._current_source = \
format_input_source(*projector.get_input())
except KeyError as err:
if str(err) == "'OK'":
self._pwstate = STATE_OFF
self._muted = False
self._current_source = None
else:
raise
except ProjectorError as err:
if str(err) == 'unavailable time':
self._pwstate = STATE_OFF
self._muted = False
self._current_source = None
else:
raise
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def is_volume_muted(self):
"""Return boolean indicating mute status."""
return self._muted
@property
def source(self):
"""Return current input source."""
return self._current_source
@property
def source_list(self):
"""Return all available input sources."""
return self._source_list
@property
def supported_features(self):
"""Return projector supported features."""
return SUPPORT_PJLINK
def turn_off(self):
"""Turn projector off."""
with self.projector() as projector:
projector.set_power('off')
def turn_on(self):
"""Turn projector on."""
with self.projector() as projector:
projector.set_power('on')
def mute_volume(self, mute):
"""Mute (true) of unmute (false) media player."""
with self.projector() as projector:
from pypjlink import MUTE_AUDIO
projector.set_mute(MUTE_AUDIO, mute)
def select_source(self, source):
"""Set the input source."""
source = self._source_name_mapping[source]
with self.projector() as projector:
projector.set_input(*source)
| apache-2.0 |
brandonium21/snowflake | snowflakeEnv/lib/python2.7/site-packages/sqlalchemy/__init__.py | 23 | 2072 | # sqlalchemy/__init__.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .sql import (
alias,
and_,
asc,
between,
bindparam,
case,
cast,
collate,
delete,
desc,
distinct,
except_,
except_all,
exists,
extract,
false,
func,
insert,
intersect,
intersect_all,
join,
literal,
literal_column,
modifier,
not_,
null,
or_,
outerjoin,
outparam,
over,
select,
subquery,
text,
true,
tuple_,
type_coerce,
union,
union_all,
update,
)
from .types import (
BIGINT,
BINARY,
BLOB,
BOOLEAN,
BigInteger,
Binary,
Boolean,
CHAR,
CLOB,
DATE,
DATETIME,
DECIMAL,
Date,
DateTime,
Enum,
FLOAT,
Float,
INT,
INTEGER,
Integer,
Interval,
LargeBinary,
NCHAR,
NVARCHAR,
NUMERIC,
Numeric,
PickleType,
REAL,
SMALLINT,
SmallInteger,
String,
TEXT,
TIME,
TIMESTAMP,
Text,
Time,
TypeDecorator,
Unicode,
UnicodeText,
VARBINARY,
VARCHAR,
)
from .schema import (
CheckConstraint,
Column,
ColumnDefault,
Constraint,
DefaultClause,
FetchedValue,
ForeignKey,
ForeignKeyConstraint,
Index,
MetaData,
PassiveDefault,
PrimaryKeyConstraint,
Sequence,
Table,
ThreadLocalMetaData,
UniqueConstraint,
DDL,
)
from .inspection import inspect
from .engine import create_engine, engine_from_config
__version__ = '0.9.8'
def __go(lcls):
global __all__
from . import events
from . import util as _sa_util
import inspect as _inspect
__all__ = sorted(name for name, obj in lcls.items()
if not (name.startswith('_') or _inspect.ismodule(obj)))
_sa_util.dependencies.resolve_all("sqlalchemy")
__go(locals())
| bsd-2-clause |
lukeiwanski/tensorflow | tensorflow/contrib/opt/python/training/model_average_optimizer_test.py | 14 | 7814 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ModelAverageOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import portpicker
from tensorflow.contrib.opt.python.training import model_average_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import server_lib
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return them."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)
]
return cluster_dict, workers, ps_servers
# Creates the workers and return their sessions, graphs, train_ops.
# Chief worker will update at last
def _get_workers(num_workers, steps, workers):
sessions = []
graphs = []
train_ops = []
for worker_id in range(num_workers):
graph = ops.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
worker_device = "/job:worker/task:%d/cpu:0" % (worker_id)
ma_coustom = model_average_optimizer.ModelAverageCustomGetter(
worker_device=worker_device)
with variable_scope.variable_scope(
"", custom_getter=ma_coustom), ops.device(
device_setter.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/task:0/cpu:0",
ps_tasks=1)):
global_step = variables.Variable(0, name="global_step", trainable=False)
var_0 = variable_scope.get_variable(initializer=0.0, name="v0")
var_1 = variable_scope.get_variable(initializer=1.0, name="v1")
with ops.device("/job:worker/task:" + str(worker_id)):
if worker_id == 0:
grads_0 = constant_op.constant(-1.0)
grads_1 = constant_op.constant(-1.0)
else:
grads_0 = constant_op.constant(-2.0)
grads_1 = constant_op.constant(-2.0)
sgd_opt = gradient_descent.GradientDescentOptimizer(1.0)
opt = model_average_optimizer.ModelAverageOptimizer(
opt=sgd_opt,
num_worker=num_workers,
ma_custom_getter=ma_coustom,
is_chief=is_chief,
interval_steps=steps)
train_op = [
opt.apply_gradients([[grads_0, var_0], [grads_1, var_1]],
global_step)
]
easgd_hook = opt.make_session_run_hook()
# Creates MonitoredSession
sess = training.MonitoredTrainingSession(
workers[worker_id].target, hooks=[easgd_hook])
sessions.append(sess)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class ModelAverageOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def test1Workers2Period(self):
num_workers = 2
steps = 2
num_ps = 1
_, workers, _ = create_local_cluster(
num_workers=num_workers, num_ps=num_ps)
sessions, graphs, train_ops = _get_workers(num_workers, steps, workers)
var_0 = graphs[0].get_tensor_by_name("v0:0")
var_1 = graphs[0].get_tensor_by_name("v1:0")
global_step = training_util.get_global_step(graphs[0])
global_var_0 = graphs[0].get_tensor_by_name(
model_average_optimizer.GLOBAL_VARIABLE_NAME + "/v0:0")
global_var_1 = graphs[0].get_tensor_by_name(
model_average_optimizer.GLOBAL_VARIABLE_NAME + "/v1:0")
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(global_var_0))
self.assertAllEqual(1.0, sessions[0].run(global_var_1))
self.assertAllEqual(0, sessions[0].run(global_step))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
self.assertAllEqual(1.0, sessions[0].run(var_0))
self.assertAllEqual(2.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(global_var_0))
self.assertAllEqual(1.0, sessions[0].run(global_var_1))
self.assertAllEqual(0, sessions[0].run(global_step))
# iteration 2, global variable update
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
thread_0.start()
thread_1.start()
thread_0.join()
thread_1.join()
self.assertAllEqual(3.0, sessions[0].run(var_0))
self.assertAllEqual(4.0, sessions[0].run(var_1))
self.assertAllEqual(3.0, sessions[0].run(global_var_0))
self.assertAllEqual(4.0, sessions[0].run(global_var_1))
self.assertAllEqual(1, sessions[0].run(global_step))
# iteration 3
sessions[0].run(train_ops[0])
self.assertAllEqual(4.0, sessions[0].run(var_0))
self.assertAllEqual(5.0, sessions[0].run(var_1))
self.assertAllEqual(3.0, sessions[0].run(global_var_0))
self.assertAllEqual(4.0, sessions[0].run(global_var_1))
self.assertAllEqual(1, sessions[0].run(global_step))
def testPS2TasksWithClusterSpecClass(self):
cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
worker_device = "/job:worker/task:0"
ma_coustom = model_average_optimizer.ModelAverageCustomGetter(
worker_device=worker_device)
from tensorflow.python.training import device_setter
with ops.device(
device_setter.replica_device_setter(cluster=cluster_spec,
worker_device=worker_device,
ps_device="/job:ps")), \
variable_scope.variable_scope("", custom_getter=ma_coustom):
v = variable_scope.get_variable(initializer=[1, 2], name="v")
w = variable_scope.get_variable(initializer=[2, 1], name="w")
v_g, w_g = ma_coustom._local_2_global[v], ma_coustom._local_2_global[w]
self.assertDeviceEqual("/job:worker/task:0", v.device)
self.assertDeviceEqual("job:ps/task:0", v_g.device)
self.assertDeviceEqual("/job:worker/task:0", w.device)
self.assertDeviceEqual("job:ps/task:1", w_g.device)
if __name__ == "__main__":
test.main()
| apache-2.0 |
MyAOSP/external_chromium_org | third_party/protobuf/python/google/protobuf/internal/descriptor_database_test.py | 213 | 2872 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.descriptor_database."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
import unittest
from google.protobuf import descriptor_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf import descriptor_database
class DescriptorDatabaseTest(unittest.TestCase):
def testAdd(self):
db = descriptor_database.DescriptorDatabase()
file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
db.Add(file_desc_proto)
self.assertEquals(file_desc_proto, db.FindFileByName(
'net/proto2/python/internal/factory_test2.proto'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'net.proto2.python.internal.Factory2Message'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'net.proto2.python.internal.Factory2Message.NestedFactory2Message'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'net.proto2.python.internal.Factory2Enum'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'net.proto2.python.internal.Factory2Message.NestedFactory2Enum'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
changbindu/linux-ok6410 | tools/perf/scripts/python/netdev-times.py | 1544 | 15191 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
callchain, irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
LumPenPacK/NetworkExtractionFromImages | osx_build/nefi2_osx_amd64_xcode_2015/site-packages/networkx/algorithms/centrality/betweenness_subset.py | 10 | 8520 | """
Betweenness centrality measures for subsets of nodes.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['betweenness_centrality_subset',
'edge_betweenness_centrality_subset',
'betweenness_centrality_source']
import networkx as nx
from networkx.algorithms.centrality.betweenness import\
_single_source_dijkstra_path_basic as dijkstra
from networkx.algorithms.centrality.betweenness import\
_single_source_shortest_path_basic as shortest_path
def betweenness_centrality_subset(G,sources,targets,
normalized=False,
weight=None):
r"""Compute betweenness centrality for a subset of nodes.
.. math::
c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)}
where `S` is the set of sources, `T` is the set of targets,
`\sigma(s, t)` is the number of shortest `(s, t)`-paths,
and `\sigma(s, t|v)` is the number of those paths
passing through some node `v` other than `s, t`.
If `s = t`, `\sigma(s, t) = 1`,
and if `v \in {s, t}`, `\sigma(s, t|v) = 0` [2]_.
Parameters
----------
G : graph
sources: list of nodes
Nodes to use as sources for shortest paths in betweenness
targets: list of nodes
Nodes to use as targets for shortest paths in betweenness
normalized : bool, optional
If True the betweenness values are normalized by `2/((n-1)(n-2))`
for graphs, and `1/((n-1)(n-2))` for directed graphs where `n`
is the number of nodes in G.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
edge_betweenness_centrality
load_centrality
Notes
-----
The basic algorithm is from [1]_.
For weighted graphs the edge weights must be greater than zero.
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
The normalization might seem a little strange but it is the same
as in betweenness_centrality() and is designed to make
betweenness_centrality(G) be the same as
betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
References
----------
.. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
Journal of Mathematical Sociology 25(2):163-177, 2001.
http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
.. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
Centrality and their Generic Computation.
Social Networks 30(2):136-145, 2008.
http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
"""
b=dict.fromkeys(G,0.0) # b[v]=0 for v in G
for s in sources:
# single source shortest paths
if weight is None: # use BFS
S,P,sigma=shortest_path(G,s)
else: # use Dijkstra's algorithm
S,P,sigma=dijkstra(G,s,weight)
b=_accumulate_subset(b,S,P,sigma,s,targets)
b=_rescale(b,len(G),normalized=normalized,directed=G.is_directed())
return b
def edge_betweenness_centrality_subset(G,sources,targets,
normalized=False,
weight=None):
r"""Compute betweenness centrality for edges for a subset of nodes.
.. math::
c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)}
where `S` is the set of sources, `T` is the set of targets,
`\sigma(s, t)` is the number of shortest `(s, t)`-paths,
and `\sigma(s, t|e)` is the number of those paths
passing through edge `e` [2]_.
Parameters
----------
G : graph
A networkx graph
sources: list of nodes
Nodes to use as sources for shortest paths in betweenness
targets: list of nodes
Nodes to use as targets for shortest paths in betweenness
normalized : bool, optional
If True the betweenness values are normalized by `2/(n(n-1))`
for graphs, and `1/(n(n-1))` for directed graphs where `n`
is the number of nodes in G.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
edges : dictionary
Dictionary of edges with Betweenness centrality as the value.
See Also
--------
betweenness_centrality
edge_load
Notes
-----
The basic algorithm is from [1]_.
For weighted graphs the edge weights must be greater than zero.
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
The normalization might seem a little strange but it is the same
as in edge_betweenness_centrality() and is designed to make
edge_betweenness_centrality(G) be the same as
edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
References
----------
.. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
Journal of Mathematical Sociology 25(2):163-177, 2001.
http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
.. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
Centrality and their Generic Computation.
Social Networks 30(2):136-145, 2008.
http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
"""
b=dict.fromkeys(G,0.0) # b[v]=0 for v in G
b.update(dict.fromkeys(G.edges(),0.0)) # b[e] for e in G.edges()
for s in sources:
# single source shortest paths
if weight is None: # use BFS
S,P,sigma=shortest_path(G,s)
else: # use Dijkstra's algorithm
S,P,sigma=dijkstra(G,s,weight)
b=_accumulate_edges_subset(b,S,P,sigma,s,targets)
for n in G: # remove nodes to only return edges
del b[n]
b=_rescale_e(b,len(G),normalized=normalized,directed=G.is_directed())
return b
# obsolete name
def betweenness_centrality_source(G,normalized=True,weight=None,sources=None):
if sources is None:
sources=G.nodes()
targets=G.nodes()
return betweenness_centrality_subset(G,sources,targets,normalized,weight)
def _accumulate_subset(betweenness,S,P,sigma,s,targets):
delta=dict.fromkeys(S,0)
target_set=set(targets)
while S:
w=S.pop()
for v in P[w]:
if w in target_set:
delta[v]+=(sigma[v]/sigma[w])*(1.0+delta[w])
else:
delta[v]+=delta[w]/len(P[w])
if w != s:
betweenness[w]+=delta[w]
return betweenness
def _accumulate_edges_subset(betweenness,S,P,sigma,s,targets):
delta=dict.fromkeys(S,0)
target_set=set(targets)
while S:
w=S.pop()
for v in P[w]:
if w in target_set:
c=(sigma[v]/sigma[w])*(1.0+delta[w])
else:
c=delta[w]/len(P[w])
if (v,w) not in betweenness:
betweenness[(w,v)]+=c
else:
betweenness[(v,w)]+=c
delta[v]+=c
if w != s:
betweenness[w]+=delta[w]
return betweenness
def _rescale(betweenness,n,normalized,directed=False):
if normalized is True:
if n <=2:
scale=None # no normalization b=0 for all nodes
else:
scale=1.0/((n-1)*(n-2))
else: # rescale by 2 for undirected graphs
if not directed:
scale=1.0/2.0
else:
scale=None
if scale is not None:
for v in betweenness:
betweenness[v] *= scale
return betweenness
def _rescale_e(betweenness,n,normalized,directed=False):
if normalized is True:
if n <=1:
scale=None # no normalization b=0 for all nodes
else:
scale=1.0/(n*(n-1))
else: # rescale by 2 for undirected graphs
if not directed:
scale=1.0/2.0
else:
scale=None
if scale is not None:
for v in betweenness:
betweenness[v] *= scale
return betweenness
| bsd-2-clause |
Spoken-tutorial/spoken-website | cron/spoken_search/whoosh/lang/snowball/german.py | 96 | 5347 | from .bases import _StandardStemmer
from whoosh.compat import u
class GermanStemmer(_StandardStemmer):
"""
The German Snowball stemmer.
:cvar __vowels: The German vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __st_ending: Letter that may directly appear before a word final 'st'.
:type __st_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the German
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/german/stemmer.html
"""
__vowels = u("aeiouy\xE4\xF6\xFC")
__s_ending = "bdfghklmnrt"
__st_ending = "bdfghklmnt"
__step1_suffixes = ("ern", "em", "er", "en", "es", "e", "s")
__step2_suffixes = ("est", "en", "er", "st")
__step3_suffixes = ("isch", "lich", "heit", "keit",
"end", "ung", "ig", "ik")
def stem(self, word):
"""
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
word = word.replace(u("\xDF"), "ss")
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in range(1, len(word) - 1):
if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i + 1:]))
elif word[i] == "y":
word = "".join((word[:i], "Y", word[i + 1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i - 1] in self.__vowels:
if len(word[:i + 1]) < 3 and len(word[:i + 1]) > 0:
r1 = word[3:]
elif len(word[:i + 1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if (suffix in ("en", "es", "e") and
word[-len(suffix) - 4:-len(suffix)] == "niss"):
word = word[:-len(suffix) - 1]
r1 = r1[:-len(suffix) - 1]
r2 = r2[:-len(suffix) - 1]
elif suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "st":
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ung"):
if ("ig" in r2[-len(suffix) - 2:-len(suffix)] and
"e" not in r2[-len(suffix) - 3:-len(suffix) - 2]):
word = word[:-len(suffix) - 2]
else:
word = word[:-len(suffix)]
elif (suffix in ("ig", "ik", "isch") and
"e" not in r2[-len(suffix) - 1:-len(suffix)]):
word = word[:-len(suffix)]
elif suffix in ("lich", "heit"):
if ("er" in r1[-len(suffix) - 2:-len(suffix)] or
"en" in r1[-len(suffix) - 2:-len(suffix)]):
word = word[:-len(suffix) - 2]
else:
word = word[:-len(suffix)]
elif suffix == "keit":
if "lich" in r2[-len(suffix) - 4:-len(suffix)]:
word = word[:-len(suffix) - 4]
elif "ig" in r2[-len(suffix) - 2:-len(suffix)]:
word = word[:-len(suffix) - 2]
else:
word = word[:-len(suffix)]
break
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = (word.replace(u("\xE4"), "a").replace(u("\xF6"), "o")
.replace(u("\xFC"), "u").replace("U", "u")
.replace("Y", "y"))
return word
| gpl-3.0 |
chetan/ansible | plugins/inventory/vmware.py | 17 | 6210 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
VMWARE external inventory script
=================================
shamelessly copied from existing inventory scripts.
This script and it's ini can be used more than once,
i.e vmware.py/vmware_colo.ini vmware_idf.py/vmware_idf.ini
(script can be link)
so if you don't have clustered vcenter but multiple esx machines or
just diff clusters you can have a inventory per each and automatically
group hosts based on file name or specify a group in the ini.
'''
import os
import sys
import time
import ConfigParser
from psphere.client import Client
from psphere.managedobjects import HostSystem
try:
import json
except ImportError:
import simplejson as json
def save_cache(cache_item, data, config):
''' saves item to cache '''
dpath = config.get('defaults', 'cache_dir')
try:
cache = open('/'.join([dpath,cache_item]), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError, e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('defaults', 'cache_dir')
inv = {}
try:
cache = open('/'.join([dpath,cache_item]), 'r')
inv = json.loads(cache.read())
cache.close()
except IOError, e:
pass # not really sure what to do here
return inv
def cache_available(cache_item, config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('defaults', 'cache_dir'):
dpath = config.get('defaults', 'cache_dir')
try:
existing = os.stat( '/'.join([dpath,cache_item]))
except:
# cache doesn't exist or isn't accessible
return False
if config.has_option('defaults', 'cache_max_age'):
maxage = config.get('defaults', 'cache_max_age')
if (existing.st_mtime - int(time.time())) <= maxage:
return True
return False
def get_host_info(host):
''' Get variables about a specific host '''
hostinfo = {
'vmware_name' : host.name,
'vmware_tag' : host.tag,
'vmware_parent': host.parent.name,
}
for k in host.capability.__dict__.keys():
if k.startswith('_'):
continue
try:
hostinfo['vmware_' + k] = str(host.capability[k])
except:
continue
return hostinfo
def get_inventory(client, config):
''' Reads the inventory from cache or vmware api '''
if cache_available('inventory', config):
inv = get_cache('inventory',config)
else:
inv= { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } }
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
if config.has_option('defaults', 'guests_only'):
guests_only = config.get('defaults', 'guests_only')
else:
guests_only = True
if not guests_only:
if config.has_option('defaults','hw_group'):
hw_group = config.get('defaults','hw_group')
else:
hw_group = default_group + '_hw'
inv[hw_group] = []
if config.has_option('defaults','vm_group'):
vm_group = config.get('defaults','vm_group')
else:
vm_group = default_group + '_vm'
inv[vm_group] = []
# Loop through physical hosts:
hosts = HostSystem.all(client)
for host in hosts:
if not guests_only:
inv['all']['hosts'].append(host.name)
inv[hw_group].append(host.name)
if host.tag:
taggroup = 'vmware_' + host.tag
if taggroup in inv:
inv[taggroup].append(host.name)
else:
inv[taggroup] = [ host.name ]
inv['_meta']['hostvars'][host.name] = get_host_info(host)
save_cache(vm.name, inv['_meta']['hostvars'][host.name], config)
for vm in host.vm:
inv['all']['hosts'].append(vm.name)
inv[vm_group].append(vm.name)
if vm.tag:
taggroup = 'vmware_' + vm.tag
if taggroup in inv:
inv[taggroup].append(vm.name)
else:
inv[taggroup] = [ vm.name ]
inv['_meta']['hostvars'][vm.name] = get_host_info(host)
save_cache(vm.name, inv['_meta']['hostvars'][vm.name], config)
save_cache('inventory', inv, config)
return json.dumps(inv)
def get_single_host(client, config, hostname):
inv = {}
if cache_available(hostname, config):
inv = get_cache(hostname,config)
else:
hosts = HostSystem.all(client) #TODO: figure out single host getter
for host in hosts:
if hostname == host.name:
inv = get_host_info(host)
break
for vm in host.vm:
if hostname == vm.name:
inv = get_host_info(host)
break
save_cache(hostname,inv,config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
hostname = None
if len(sys.argv) > 1:
if sys.argv[1] == "--host":
hostname = sys.argv[2]
# Read config
config = ConfigParser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
try:
client = Client( config.get('auth','host'),
config.get('auth','user'),
config.get('auth','password'),
)
except Exception, e:
client = None
#print >> STDERR "Unable to login (only cache avilable): %s", str(e)
# acitually do the work
if hostname is None:
inventory = get_inventory(client, config)
else:
inventory = get_single_host(client, config, hostname)
# return to ansible
print inventory
| gpl-3.0 |
js0701/chromium-crosswalk | tools/perf/core/perf_benchmark.py | 8 | 2671 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from telemetry import benchmark
from telemetry.internal.browser import browser_finder
sys.path.append(os.path.join(os.path.dirname(__file__), '..',
'..', 'variations'))
import fieldtrial_util # pylint: disable=import-error
class PerfBenchmark(benchmark.Benchmark):
""" Super class for all benchmarks in src/tools/perf/benchmarks directory.
All the perf benchmarks must subclass from this one to to make sure that
the field trial configs are activated for the browser during benchmark runs.
For more info, see: https://goo.gl/4uvaVM
"""
def SetExtraBrowserOptions(self, options):
""" To be overridden by perf benchmarks. """
pass
def CustomizeBrowserOptions(self, options):
# Subclass of PerfBenchmark should override SetExtraBrowserOptions to add
# more browser options rather than overriding CustomizeBrowserOptions.
super(PerfBenchmark, self).CustomizeBrowserOptions(options)
# Enable taking screen shot on failed pages for all perf benchmarks.
options.take_screenshot_for_failed_page = True
# The current field trial config is used for an older build in the case of
# reference. This is a problem because we are then subjecting older builds
# to newer configurations that may crash. To work around this problem,
# don't add the field trials to reference builds.
if options.browser_type != 'reference':
variations = self._GetVariationsBrowserArgs(options.finder_options)
options.AppendExtraBrowserArgs(variations)
self.SetExtraBrowserOptions(options)
@staticmethod
def _FixupTargetOS(target_os):
if target_os == 'darwin':
return 'mac'
if target_os.startswith('win'):
return 'win'
if target_os.startswith('linux'):
return 'linux'
return target_os
def _GetVariationsBrowserArgs(self, finder_options):
variations_dir = os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'testing', 'variations')
possible_browser = browser_finder.FindBrowser(finder_options)
if not possible_browser:
return []
return fieldtrial_util.GenerateArgs(
os.path.join(variations_dir,
'fieldtrial_testing_config_%s.json' % self._FixupTargetOS(
possible_browser.target_os)))
@staticmethod
def IsSvelte(possible_browser):
"""Returns whether a possible_browser is on a svelte Android build."""
if possible_browser.target_os == 'android':
return possible_browser.platform.IsSvelte()
return False
| bsd-3-clause |
JCA-Developpement/Odoo | addons/im_chat/im_chat.py | 10 | 20595 | # -*- coding: utf-8 -*-
import base64
import datetime
import logging
import time
import uuid
import random
import simplejson
import openerp
from openerp.http import request
from openerp.osv import osv, fields
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.addons.bus.bus import TIMEOUT
_logger = logging.getLogger(__name__)
DISCONNECTION_TIMER = TIMEOUT + 5
AWAY_TIMER = 600 # 10 minutes
#----------------------------------------------------------
# Models
#----------------------------------------------------------
class im_chat_conversation_state(osv.Model):
""" Adds a state on the m2m between user and session. """
_name = 'im_chat.conversation_state'
_table = "im_chat_session_res_users_rel"
_columns = {
"state" : fields.selection([('open', 'Open'), ('folded', 'Folded'), ('closed', 'Closed')]),
"session_id" : fields.many2one('im_chat.session', 'Session', required=True, ondelete="cascade"),
"user_id" : fields.many2one('res.users', 'Users', required=True, ondelete="cascade"),
}
_defaults = {
"state" : 'open'
}
class im_chat_session(osv.Model):
""" Conversations."""
_order = 'id desc'
_name = 'im_chat.session'
_rec_name = 'uuid'
_columns = {
'uuid': fields.char('UUID', size=50, select=True),
'message_ids': fields.one2many('im_chat.message', 'to_id', 'Messages'),
'user_ids': fields.many2many('res.users', 'im_chat_session_res_users_rel', 'session_id', 'user_id', "Session Users"),
'session_res_users_rel': fields.one2many('im_chat.conversation_state', 'session_id', 'Relation Session Users'),
}
_defaults = {
'uuid': lambda *args: '%s' % uuid.uuid4(),
}
def users_infos(self, cr, uid, ids, context=None):
""" get the user infos for all the user in the session """
for session in self.pool["im_chat.session"].browse(cr, uid, ids, context=context):
users_infos = self.pool["res.users"].read(cr, uid, [u.id for u in session.user_ids], ['id','name', 'im_status'], context=context)
return users_infos
def is_private(self, cr, uid, ids, context=None):
for session_id in ids:
""" return true if the session is private between users no external messages """
mess_ids = self.pool["im_chat.message"].search(cr, uid, [('to_id','=',session_id),('from_id','=',None)], context=context)
return len(mess_ids) == 0
def session_info(self, cr, uid, ids, context=None):
""" get the session info/header of a given session """
for session in self.browse(cr, uid, ids, context=context):
info = {
'uuid': session.uuid,
'users': session.users_infos(),
'state': 'open',
}
# add uid_state if available
if uid:
domain = [('user_id','=',uid), ('session_id','=',session.id)]
uid_state = self.pool['im_chat.conversation_state'].search_read(cr, uid, domain, ['state'], context=context)
if uid_state:
info['state'] = uid_state[0]['state']
return info
def session_get(self, cr, uid, user_to, context=None):
""" returns the canonical session between 2 users, create it if needed """
session_id = False
if user_to:
sids = self.search(cr, uid, [('user_ids','in', user_to),('user_ids', 'in', [uid])], context=context, limit=1)
for sess in self.browse(cr, uid, sids, context=context):
if len(sess.user_ids) == 2 and sess.is_private():
session_id = sess.id
break
else:
session_id = self.create(cr, uid, { 'user_ids': [(6,0, (user_to, uid))] }, context=context)
return self.session_info(cr, uid, [session_id], context=context)
def update_state(self, cr, uid, uuid, state=None, context=None):
""" modify the fold_state of the given session, and broadcast to himself (e.i. : to sync multiple tabs) """
domain = [('user_id','=',uid), ('session_id.uuid','=',uuid)]
ids = self.pool['im_chat.conversation_state'].search(cr, uid, domain, context=context)
for sr in self.pool['im_chat.conversation_state'].browse(cr, uid, ids, context=context):
if not state:
state = sr.state
if sr.state == 'open':
state = 'folded'
else:
state = 'open'
self.pool['im_chat.conversation_state'].write(cr, uid, ids, {'state': state}, context=context)
self.pool['bus.bus'].sendone(cr, uid, (cr.dbname, 'im_chat.session', uid), sr.session_id.session_info())
def add_user(self, cr, uid, uuid, user_id, context=None):
""" add the given user to the given session """
sids = self.search(cr, uid, [('uuid', '=', uuid)], context=context, limit=1)
for session in self.browse(cr, uid, sids, context=context):
if user_id not in [u.id for u in session.user_ids]:
self.write(cr, uid, [session.id], {'user_ids': [(4, user_id)]}, context=context)
# notify the all the channel users and anonymous channel
notifications = []
for channel_user_id in session.user_ids:
info = self.session_info(cr, channel_user_id.id, [session.id], context=context)
notifications.append([(cr.dbname, 'im_chat.session', channel_user_id.id), info])
# Anonymous are not notified when a new user is added : cannot exec session_info as uid = None
info = self.session_info(cr, openerp.SUPERUSER_ID, [session.id], context=context)
notifications.append([session.uuid, info])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
# send a message to the conversation
user = self.pool['res.users'].read(cr, uid, user_id, ['name'], context=context)
self.pool["im_chat.message"].post(cr, uid, uid, session.uuid, "meta", user['name'] + " joined the conversation.", context=context)
def get_image(self, cr, uid, uuid, user_id, context=None):
""" get the avatar of a user in the given session """
#default image
image_b64 = 'R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
# get the session
if user_id:
session_id = self.pool["im_chat.session"].search(cr, uid, [('uuid','=',uuid), ('user_ids','in', user_id)])
if session_id:
# get the image of the user
res = self.pool["res.users"].read(cr, uid, [user_id], ["image_small"])[0]
if res["image_small"]:
image_b64 = res["image_small"]
return image_b64
class im_chat_message(osv.Model):
""" Sessions messsages type can be 'message' or 'meta'.
For anonymous message, the from_id is False.
Messages are sent to a session not to users.
"""
_name = 'im_chat.message'
_order = "id desc"
_columns = {
'create_date': fields.datetime('Create Date', required=True, select=True),
'from_id': fields.many2one('res.users', 'Author'),
'to_id': fields.many2one('im_chat.session', 'Session To', required=True, select=True, ondelete='cascade'),
'type': fields.selection([('message','Message'), ('meta','Meta')], 'Type'),
'message': fields.char('Message'),
}
_defaults = {
'type' : 'message',
}
def init_messages(self, cr, uid, context=None):
""" get unread messages and old messages received less than AWAY_TIMER
ago and the session_info for open or folded window
"""
# get the message since the AWAY_TIMER
threshold = datetime.datetime.now() - datetime.timedelta(seconds=AWAY_TIMER)
threshold = threshold.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
domain = [('to_id.user_ids', 'in', [uid]), ('create_date','>',threshold)]
# get the message since the last poll of the user
presence_ids = self.pool['im_chat.presence'].search(cr, uid, [('user_id', '=', uid)], context=context)
if presence_ids:
presence = self.pool['im_chat.presence'].browse(cr, uid, presence_ids, context=context)[0]
threshold = presence.last_poll
domain.append(('create_date','>',threshold))
messages = self.search_read(cr, uid, domain, ['from_id','to_id','create_date','type','message'], order='id asc', context=context)
# get the session of the messages and the not-closed ones
session_ids = map(lambda m: m['to_id'][0], messages)
domain = [('user_id','=',uid), '|', ('state','!=','closed'), ('session_id', 'in', session_ids)]
session_rels_ids = self.pool['im_chat.conversation_state'].search(cr, uid, domain, context=context)
# re-open the session where a message have been recieve recently
session_rels = self.pool['im_chat.conversation_state'].browse(cr, uid, session_rels_ids, context=context)
reopening_session = []
notifications = []
for sr in session_rels:
si = sr.session_id.session_info()
si['state'] = sr.state
if sr.state == 'closed':
si['state'] = 'folded'
reopening_session.append(sr.id)
notifications.append([(cr.dbname,'im_chat.session', uid), si])
for m in messages:
notifications.append([(cr.dbname,'im_chat.session', uid), m])
self.pool['im_chat.conversation_state'].write(cr, uid, reopening_session, {'state': 'folded'}, context=context)
return notifications
def post(self, cr, uid, from_uid, uuid, message_type, message_content, context=None):
""" post and broadcast a message, return the message id """
message_id = False
Session = self.pool['im_chat.session']
session_ids = Session.search(cr, uid, [('uuid','=',uuid)], context=context)
notifications = []
for session in Session.browse(cr, uid, session_ids, context=context):
# build the new message
vals = {
"from_id": from_uid,
"to_id": session.id,
"type": message_type,
"message": message_content,
}
# save it
message_id = self.create(cr, uid, vals, context=context)
# broadcast it to channel (anonymous users) and users_ids
data = self.read(cr, uid, [message_id], ['from_id','to_id','create_date','type','message'], context=context)[0]
notifications.append([uuid, data])
for user in session.user_ids:
notifications.append([(cr.dbname, 'im_chat.session', user.id), data])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
return message_id
class im_chat_presence(osv.Model):
""" im_chat_presence status can be: online, away or offline.
This model is a one2one, but is not attached to res_users to avoid database concurrence errors
"""
_name = 'im_chat.presence'
_columns = {
'user_id' : fields.many2one('res.users', 'Users', required=True, select=True),
'last_poll': fields.datetime('Last Poll'),
'last_presence': fields.datetime('Last Presence'),
'status' : fields.selection([('online','Online'), ('away','Away'), ('offline','Offline')], 'IM Status'),
}
_defaults = {
'last_poll' : fields.datetime.now,
'last_presence' : fields.datetime.now,
'status' : 'offline'
}
_sql_constraints = [('im_chat_user_status_unique','unique(user_id)', 'A user can only have one IM status.')]
def update(self, cr, uid, presence=True, context=None):
""" register the poll, and change its im status if necessary. It also notify the Bus if the status has changed. """
presence_ids = self.search(cr, uid, [('user_id', '=', uid)], context=context)
presences = self.browse(cr, uid, presence_ids, context=context)
# set the default values
send_notification = True
vals = {
'last_poll': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'status' : presences and presences[0].status or 'offline'
}
# update the user or a create a new one
if not presences:
vals['status'] = 'online'
vals['user_id'] = uid
self.create(cr, uid, vals, context=context)
else:
if presence:
vals['last_presence'] = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
vals['status'] = 'online'
else:
threshold = datetime.datetime.now() - datetime.timedelta(seconds=AWAY_TIMER)
if datetime.datetime.strptime(presences[0].last_presence, DEFAULT_SERVER_DATETIME_FORMAT) < threshold:
vals['status'] = 'away'
send_notification = presences[0].status != vals['status']
# write only if the last_poll is passed TIMEOUT, or if the status has changed
delta = datetime.datetime.now() - datetime.datetime.strptime(presences[0].last_poll, DEFAULT_SERVER_DATETIME_FORMAT)
if (delta > datetime.timedelta(seconds=TIMEOUT) or send_notification):
self.write(cr, uid, presence_ids, vals, context=context)
# avoid TransactionRollbackError
cr.commit()
# notify if the status has changed
if send_notification:
self.pool['bus.bus'].sendone(cr, uid, (cr.dbname,'im_chat.presence'), {'id': uid, 'im_status': vals['status']})
# gc : disconnect the users having a too old last_poll. 1 on 100 chance to do it.
if random.random() < 0.01:
self.check_users_disconnection(cr, uid, context=context)
return True
def check_users_disconnection(self, cr, uid, context=None):
""" disconnect the users having a too old last_poll """
dt = (datetime.datetime.now() - datetime.timedelta(0, DISCONNECTION_TIMER)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
presence_ids = self.search(cr, uid, [('last_poll', '<', dt), ('status' , '!=', 'offline')], context=context)
self.write(cr, uid, presence_ids, {'status': 'offline'}, context=context)
presences = self.browse(cr, uid, presence_ids, context=context)
notifications = []
for presence in presences:
notifications.append([(cr.dbname,'im_chat.presence'), {'id': presence.user_id.id, 'im_status': presence.status}])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
return True
class res_users(osv.Model):
_inherit = "res.users"
def _get_im_status(self, cr, uid, ids, fields, arg, context=None):
""" function computing the im_status field of the users """
r = dict((i, 'offline') for i in ids)
status_ids = self.pool['im_chat.presence'].search(cr, uid, [('user_id', 'in', ids)], context=context)
status = self.pool['im_chat.presence'].browse(cr, uid, status_ids, context=context)
for s in status:
r[s.user_id.id] = s.status
return r
_columns = {
'im_status' : fields.function(_get_im_status, type="char", string="IM Status"),
}
def im_search(self, cr, uid, name, limit=20, context=None):
""" search users with a name and return its id, name and im_status """
result = [];
# find the employee group
group_employee = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'group_user')[1]
where_clause_base = " U.active = 't' "
query_params = ()
if name:
where_clause_base += " AND P.name ILIKE %s "
query_params = query_params + ('%'+name+'%',)
# first query to find online employee
cr.execute('''SELECT U.id as id, P.name as name, COALESCE(S.status, 'offline') as im_status
FROM im_chat_presence S
JOIN res_users U ON S.user_id = U.id
JOIN res_partner P ON P.id = U.partner_id
WHERE '''+where_clause_base+'''
AND U.id != %s
AND EXISTS (SELECT 1 FROM res_groups_users_rel G WHERE G.gid = %s AND G.uid = U.id)
AND S.status = 'online'
ORDER BY P.name
LIMIT %s
''', query_params + (uid, group_employee, limit))
result = result + cr.dictfetchall()
# second query to find other online people
if(len(result) < limit):
cr.execute('''SELECT U.id as id, P.name as name, COALESCE(S.status, 'offline') as im_status
FROM im_chat_presence S
JOIN res_users U ON S.user_id = U.id
JOIN res_partner P ON P.id = U.partner_id
WHERE '''+where_clause_base+'''
AND U.id NOT IN %s
AND S.status = 'online'
ORDER BY P.name
LIMIT %s
''', query_params + (tuple([u["id"] for u in result]) + (uid,), limit-len(result)))
result = result + cr.dictfetchall()
# third query to find all other people
if(len(result) < limit):
cr.execute('''SELECT U.id as id, P.name as name, COALESCE(S.status, 'offline') as im_status
FROM res_users U
LEFT JOIN im_chat_presence S ON S.user_id = U.id
LEFT JOIN res_partner P ON P.id = U.partner_id
WHERE '''+where_clause_base+'''
AND U.id NOT IN %s
ORDER BY P.name
LIMIT %s
''', query_params + (tuple([u["id"] for u in result]) + (uid,), limit-len(result)))
result = result + cr.dictfetchall()
return result
#----------------------------------------------------------
# Controllers
#----------------------------------------------------------
class Controller(openerp.addons.bus.bus.Controller):
def _poll(self, dbname, channels, last, options):
if request.session.uid:
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
registry.get('im_chat.presence').update(cr, uid, options.get('im_presence', False), context=context)
## For performance issue, the real time status notification is disabled. This means a change of status are still braoadcasted
## but not received by anyone. Otherwise, all listening user restart their longpolling at the same time and cause a 'ConnectionPool Full Error'
## since there is not enought cursors for everyone. Now, when a user open his list of users, an RPC call is made to update his user status list.
##channels.append((request.db,'im_chat.presence'))
# channel to receive message
channels.append((request.db,'im_chat.session', request.uid))
return super(Controller, self)._poll(dbname, channels, last, options)
@openerp.http.route('/im_chat/init', type="json", auth="none")
def init(self):
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
notifications = registry['im_chat.message'].init_messages(cr, uid, context=context)
return notifications
@openerp.http.route('/im_chat/post', type="json", auth="none")
def post(self, uuid, message_type, message_content):
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
# execute the post method as SUPERUSER_ID
message_id = registry["im_chat.message"].post(cr, openerp.SUPERUSER_ID, uid, uuid, message_type, message_content, context=context)
return message_id
@openerp.http.route(['/im_chat/image/<string:uuid>/<string:user_id>'], type='http', auth="none")
def image(self, uuid, user_id):
registry, cr, context, uid = request.registry, request.cr, request.context, request.session.uid
# get the image
Session = registry.get("im_chat.session")
image_b64 = Session.get_image(cr, openerp.SUPERUSER_ID, uuid, simplejson.loads(user_id), context)
# built the response
image_data = base64.b64decode(image_b64)
headers = [('Content-Type', 'image/png')]
headers.append(('Content-Length', len(image_data)))
return request.make_response(image_data, headers)
# vim:et:
| agpl-3.0 |
pjg101/SickRage | sickbeard/metadata/mediabrowser.py | 5 | 22802 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import datetime
import os
import re
import six
import sickbeard
from sickbeard import helpers, logger
from sickbeard.metadata import generic
from sickrage.helper.common import dateFormat, replace_extension
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex, ShowNotFoundException
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
class MediaBrowserMetadata(generic.GenericMetadata):
"""
Metadata generation class for Media Browser 2.x/3.x - Standard Mode.
The following file structure is used:
show_root/series.xml (show metadata)
show_root/folder.jpg (poster)
show_root/backdrop.jpg (fanart)
show_root/Season ##/folder.jpg (season thumb)
show_root/Season ##/filename.ext (*)
show_root/Season ##/metadata/filename.xml (episode metadata)
show_root/Season ##/metadata/filename.jpg (episode thumb)
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
fanart,
poster,
banner,
episode_thumbnails,
season_posters,
season_banners,
season_all_poster,
season_all_banner)
self.name = 'MediaBrowser'
self._ep_nfo_extension = 'xml'
self._show_metadata_filename = 'series.xml'
self.fanart_name = "backdrop.jpg"
self.poster_name = "folder.jpg"
# web-ui metadata template
self.eg_show_metadata = "series.xml"
self.eg_episode_metadata = "Season##\\metadata\\<i>filename</i>.xml"
self.eg_fanart = "backdrop.jpg"
self.eg_poster = "folder.jpg"
self.eg_banner = "banner.jpg"
self.eg_episode_thumbnails = "Season##\\metadata\\<i>filename</i>.jpg"
self.eg_season_posters = "Season##\\folder.jpg"
self.eg_season_banners = "Season##\\banner.jpg"
self.eg_season_all_poster = "<i>not supported</i>"
self.eg_season_all_banner = "<i>not supported</i>"
# Override with empty methods for unsupported features
def retrieveShowMetadata(self, folder):
# while show metadata is generated, it is not supported for our lookup
return None, None, None
def create_season_all_poster(self, show_obj):
pass
def create_season_all_banner(self, show_obj):
pass
def get_episode_file_path(self, ep_obj):
"""
Returns a full show dir/metadata/episode.xml path for MediaBrowser
episode metadata files
ep_obj: a TVEpisode object to get the path for
"""
if ek(os.path.isfile, ep_obj.location):
xml_file_name = replace_extension(ek(os.path.basename, ep_obj.location), self._ep_nfo_extension)
metadata_dir_name = ek(os.path.join, ek(os.path.dirname, ep_obj.location), 'metadata')
xml_file_path = ek(os.path.join, metadata_dir_name, xml_file_name)
else:
logger.log("Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG)
return ''
return xml_file_path
@staticmethod
def get_episode_thumb_path(ep_obj):
"""
Returns a full show dir/metadata/episode.jpg path for MediaBrowser
episode thumbs.
ep_obj: a TVEpisode object to get the path from
"""
if ek(os.path.isfile, ep_obj.location):
tbn_file_name = replace_extension(ek(os.path.basename, ep_obj.location), 'jpg')
metadata_dir_name = ek(os.path.join, ek(os.path.dirname, ep_obj.location), 'metadata')
tbn_file_path = ek(os.path.join, metadata_dir_name, tbn_file_name)
else:
return None
return tbn_file_path
@staticmethod
def get_season_poster_path(show_obj, season):
"""
Season thumbs for MediaBrowser go in Show Dir/Season X/folder.jpg
If no season folder exists, None is returned
"""
dir_list = [x for x in ek(os.listdir, show_obj.location) if
ek(os.path.isdir, ek(os.path.join, show_obj.location, x))]
season_dir_regex = r'^Season\s+(\d+)$'
season_dir = None
for cur_dir in dir_list:
# MediaBrowser 1.x only supports 'Specials'
# MediaBrowser 2.x looks to only support 'Season 0'
# MediaBrowser 3.x looks to mimic KODI/Plex support
if season == 0 and cur_dir == "Specials":
season_dir = cur_dir
break
match = re.match(season_dir_regex, cur_dir, re.I)
if not match:
continue
cur_season = int(match.group(1))
if cur_season == season:
season_dir = cur_dir
break
if not season_dir:
logger.log("Unable to find a season dir for season " + str(season), logger.DEBUG)
return None
logger.log("Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG)
return ek(os.path.join, show_obj.location, season_dir, 'folder.jpg')
@staticmethod
def get_season_banner_path(show_obj, season):
"""
Season thumbs for MediaBrowser go in Show Dir/Season X/banner.jpg
If no season folder exists, None is returned
"""
dir_list = [x for x in ek(os.listdir, show_obj.location) if
ek(os.path.isdir, ek(os.path.join, show_obj.location, x))]
season_dir_regex = r'^Season\s+(\d+)$'
season_dir = None
for cur_dir in dir_list:
# MediaBrowser 1.x only supports 'Specials'
# MediaBrowser 2.x looks to only support 'Season 0'
# MediaBrowser 3.x looks to mimic KODI/Plex support
if season == 0 and cur_dir == "Specials":
season_dir = cur_dir
break
match = re.match(season_dir_regex, cur_dir, re.I)
if not match:
continue
cur_season = int(match.group(1))
if cur_season == season:
season_dir = cur_dir
break
if not season_dir:
logger.log("Unable to find a season dir for season " + str(season), logger.DEBUG)
return None
logger.log("Using " + str(season_dir) + "/banner.jpg as season dir for season " + str(season), logger.DEBUG)
return ek(os.path.join, show_obj.location, season_dir, 'banner.jpg')
def _show_data(self, show_obj):
"""
Creates an elementTree XML structure for a MediaBrowser-style series.xml
returns the resulting data object.
show_obj: a TVShow instance to create the NFO for
"""
indexer_lang = show_obj.lang
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy()
lINDEXER_API_PARMS['actors'] = True
lINDEXER_API_PARMS['language'] = indexer_lang or sickbeard.INDEXER_DEFAULT_LANGUAGE
if show_obj.dvdorder:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS)
tv_node = etree.Element("Series")
try:
myShow = t[int(show_obj.indexerid)]
except sickbeard.indexer_shownotfound:
logger.log("Unable to find show with id " + str(show_obj.indexerid) + " on " + sickbeard.indexerApi(
show_obj.indexer).name + ", skipping it", logger.ERROR)
raise
except sickbeard.indexer_error:
logger.log(
"" + sickbeard.indexerApi(show_obj.indexer).name + " is down, can't use its data to make the NFO",
logger.ERROR)
raise
# check for title and id
if not (getattr(myShow, 'seriesname', None) and getattr(myShow, 'id', None)):
logger.log("Incomplete info for show with id " + str(show_obj.indexerid) + " on " + sickbeard.indexerApi(
show_obj.indexer).name + ", skipping it")
return False
if getattr(myShow, 'id', None):
indexerid = etree.SubElement(tv_node, "id")
indexerid.text = str(myShow['id'])
if getattr(myShow, 'seriesname', None):
SeriesName = etree.SubElement(tv_node, "SeriesName")
SeriesName.text = myShow['seriesname']
if getattr(myShow, 'status', None):
Status = etree.SubElement(tv_node, "Status")
Status.text = myShow['status']
if getattr(myShow, 'network', None):
Network = etree.SubElement(tv_node, "Network")
Network.text = myShow['network']
if getattr(myShow, 'airs_time', None):
Airs_Time = etree.SubElement(tv_node, "Airs_Time")
Airs_Time.text = myShow['airs_time']
if getattr(myShow, 'airs_dayofweek', None):
Airs_DayOfWeek = etree.SubElement(tv_node, "Airs_DayOfWeek")
Airs_DayOfWeek.text = myShow['airs_dayofweek']
FirstAired = etree.SubElement(tv_node, "FirstAired")
if getattr(myShow, 'firstaired', None):
FirstAired.text = myShow['firstaired']
if getattr(myShow, 'contentrating', None):
ContentRating = etree.SubElement(tv_node, "ContentRating")
ContentRating.text = myShow['contentrating']
MPAARating = etree.SubElement(tv_node, "MPAARating")
MPAARating.text = myShow['contentrating']
certification = etree.SubElement(tv_node, "certification")
certification.text = myShow['contentrating']
MetadataType = etree.SubElement(tv_node, "Type")
MetadataType.text = "Series"
if getattr(myShow, 'overview', None):
Overview = etree.SubElement(tv_node, "Overview")
Overview.text = myShow['overview']
if getattr(myShow, 'firstaired', None):
PremiereDate = etree.SubElement(tv_node, "PremiereDate")
PremiereDate.text = myShow['firstaired']
if getattr(myShow, 'rating', None):
Rating = etree.SubElement(tv_node, "Rating")
Rating.text = myShow['rating']
if getattr(myShow, 'firstaired', None):
try:
year_text = str(datetime.datetime.strptime(myShow['firstaired'], dateFormat).year)
if year_text:
ProductionYear = etree.SubElement(tv_node, "ProductionYear")
ProductionYear.text = year_text
except Exception:
pass
if getattr(myShow, 'runtime', None):
RunningTime = etree.SubElement(tv_node, "RunningTime")
RunningTime.text = myShow['runtime']
Runtime = etree.SubElement(tv_node, "Runtime")
Runtime.text = myShow['runtime']
if getattr(myShow, 'imdb_id', None):
imdb_id = etree.SubElement(tv_node, "IMDB_ID")
imdb_id.text = myShow['imdb_id']
imdb_id = etree.SubElement(tv_node, "IMDB")
imdb_id.text = myShow['imdb_id']
imdb_id = etree.SubElement(tv_node, "IMDbId")
imdb_id.text = myShow['imdb_id']
if getattr(myShow, 'zap2it_id', None):
Zap2ItId = etree.SubElement(tv_node, "Zap2ItId")
Zap2ItId.text = myShow['zap2it_id']
if getattr(myShow, 'genre', None) and isinstance(myShow["genre"], six.string_types):
Genres = etree.SubElement(tv_node, "Genres")
for genre in myShow['genre'].split('|'):
if genre.strip():
cur_genre = etree.SubElement(Genres, "Genre")
cur_genre.text = genre.strip()
Genre = etree.SubElement(tv_node, "Genre")
Genre.text = "|".join([x.strip() for x in myShow["genre"].split('|') if x.strip()])
if getattr(myShow, 'network', None):
Studios = etree.SubElement(tv_node, "Studios")
Studio = etree.SubElement(Studios, "Studio")
Studio.text = myShow['network']
if getattr(myShow, '_actors', None):
Persons = etree.SubElement(tv_node, "Persons")
for actor in myShow['_actors']:
if not ('name' in actor and actor['name'].strip()):
continue
cur_actor = etree.SubElement(Persons, "Person")
cur_actor_name = etree.SubElement(cur_actor, "Name")
cur_actor_name.text = actor['name'].strip()
cur_actor_type = etree.SubElement(cur_actor, "Type")
cur_actor_type.text = "Actor"
if 'role' in actor and actor['role'].strip():
cur_actor_role = etree.SubElement(cur_actor, "Role")
cur_actor_role.text = actor['role'].strip()
helpers.indentXML(tv_node)
data = etree.ElementTree(tv_node)
return data
def _ep_data(self, ep_obj):
"""
Creates an elementTree XML structure for a MediaBrowser style episode.xml
and returns the resulting data object.
show_obj: a TVShow instance to create the NFO for
"""
eps_to_write = [ep_obj] + ep_obj.relatedEps
persons_dict = {
'Director': [],
'GuestStar': [],
'Writer': []
}
indexer_lang = ep_obj.show.lang
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(ep_obj.show.indexer).api_params.copy()
lINDEXER_API_PARMS['actors'] = True
lINDEXER_API_PARMS['language'] = indexer_lang or sickbeard.INDEXER_DEFAULT_LANGUAGE
if ep_obj.show.dvdorder:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS)
myShow = t[ep_obj.show.indexerid]
except sickbeard.indexer_shownotfound as e:
raise ShowNotFoundException(e.message)
except sickbeard.indexer_error as e:
logger.log("Unable to connect to " + sickbeard.indexerApi(
ep_obj.show.indexer).name + " while creating meta files - skipping - " + ex(e), logger.ERROR)
return False
rootNode = etree.Element("Item")
# write an MediaBrowser XML containing info for all matching episodes
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
logger.log("Metadata writer is unable to find episode {0:d}x{1:d} of {2} on {3}..."
"has it been removed? Should I delete from db?".format(
curEpToWrite.season, curEpToWrite.episode, curEpToWrite.show.name,
sickbeard.indexerApi(ep_obj.show.indexer).name))
return None
if curEpToWrite == ep_obj:
# root (or single) episode
# default to today's date for specials if firstaired is not set
if ep_obj.season == 0 and not getattr(myEp, 'firstaired', None):
myEp['firstaired'] = str(datetime.date.fromordinal(1))
if not (getattr(myEp, 'episodename', None) and getattr(myEp, 'firstaired', None)):
return None
episode = rootNode
if curEpToWrite.name:
EpisodeName = etree.SubElement(episode, "EpisodeName")
EpisodeName.text = curEpToWrite.name
EpisodeNumber = etree.SubElement(episode, "EpisodeNumber")
EpisodeNumber.text = str(ep_obj.episode)
if ep_obj.relatedEps:
EpisodeNumberEnd = etree.SubElement(episode, "EpisodeNumberEnd")
EpisodeNumberEnd.text = str(curEpToWrite.episode)
SeasonNumber = etree.SubElement(episode, "SeasonNumber")
SeasonNumber.text = str(curEpToWrite.season)
if not ep_obj.relatedEps and getattr(myEp, 'absolute_number', None):
absolute_number = etree.SubElement(episode, "absolute_number")
absolute_number.text = str(myEp['absolute_number'])
if curEpToWrite.airdate != datetime.date.fromordinal(1):
FirstAired = etree.SubElement(episode, "FirstAired")
FirstAired.text = str(curEpToWrite.airdate)
MetadataType = etree.SubElement(episode, "Type")
MetadataType.text = "Episode"
if curEpToWrite.description:
Overview = etree.SubElement(episode, "Overview")
Overview.text = curEpToWrite.description
if not ep_obj.relatedEps:
if getattr(myEp, 'rating', None):
Rating = etree.SubElement(episode, "Rating")
Rating.text = myEp['rating']
if getattr(myShow, 'imdb_id', None):
IMDB_ID = etree.SubElement(episode, "IMDB_ID")
IMDB_ID.text = myShow['imdb_id']
IMDB = etree.SubElement(episode, "IMDB")
IMDB.text = myShow['imdb_id']
IMDbId = etree.SubElement(episode, "IMDbId")
IMDbId.text = myShow['imdb_id']
indexerid = etree.SubElement(episode, "id")
indexerid.text = str(curEpToWrite.indexerid)
Persons = etree.SubElement(episode, "Persons")
if getattr(myShow, '_actors', None):
for actor in myShow['_actors']:
if not ('name' in actor and actor['name'].strip()):
continue
cur_actor = etree.SubElement(Persons, "Person")
cur_actor_name = etree.SubElement(cur_actor, "Name")
cur_actor_name.text = actor['name'].strip()
cur_actor_type = etree.SubElement(cur_actor, "Type")
cur_actor_type.text = "Actor"
if 'role' in actor and actor['role'].strip():
cur_actor_role = etree.SubElement(cur_actor, "Role")
cur_actor_role.text = actor['role'].strip()
Language = etree.SubElement(episode, "Language")
try:
Language.text = myEp['language']
except Exception:
Language.text = sickbeard.INDEXER_DEFAULT_LANGUAGE # tvrage api doesn't provide language so we must assume a value here
thumb = etree.SubElement(episode, "filename")
# TODO: See what this is needed for.. if its still needed
# just write this to the NFO regardless of whether it actually exists or not
# note: renaming files after nfo generation will break this, tough luck
thumb_text = self.get_episode_thumb_path(ep_obj)
if thumb_text:
thumb.text = thumb_text
else:
# append data from (if any) related episodes
EpisodeNumberEnd.text = str(curEpToWrite.episode)
if curEpToWrite.name:
if not EpisodeName.text:
EpisodeName.text = curEpToWrite.name
else:
EpisodeName.text = EpisodeName.text + ", " + curEpToWrite.name
if curEpToWrite.description:
if not Overview.text:
Overview.text = curEpToWrite.description
else:
Overview.text = Overview.text + "\r" + curEpToWrite.description
# collect all directors, guest stars and writers
if getattr(myEp, 'director', None):
persons_dict['Director'] += [x.strip() for x in myEp['director'].split('|') if x.strip()]
if getattr(myEp, 'gueststars', None):
persons_dict['GuestStar'] += [x.strip() for x in myEp['gueststars'].split('|') if x.strip()]
if getattr(myEp, 'writer', None):
persons_dict['Writer'] += [x.strip() for x in myEp['writer'].split('|') if x.strip()]
# fill in Persons section with collected directors, guest starts and writers
for person_type, names in six.iteritems(persons_dict):
# remove doubles
names = list(set(names))
for cur_name in names:
Person = etree.SubElement(Persons, "Person")
cur_person_name = etree.SubElement(Person, "Name")
cur_person_name.text = cur_name
cur_person_type = etree.SubElement(Person, "Type")
cur_person_type.text = person_type
helpers.indentXML(rootNode)
data = etree.ElementTree(rootNode)
return data
# present a standard "interface" from the module
metadata_class = MediaBrowserMetadata
| gpl-3.0 |
kvar/ansible | lib/ansible/module_utils/network/nxos/facts/lacp_interfaces/lacp_interfaces.py | 21 | 3978 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The nxos lacp_interfaces fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
from copy import deepcopy
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.nxos.argspec.lacp_interfaces.lacp_interfaces import Lacp_interfacesArgs
from ansible.module_utils.network.nxos.utils.utils import get_interface_type
class Lacp_interfacesFacts(object):
""" The nxos lacp_interfaces fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = Lacp_interfacesArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for lacp_interfaces
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
objs = []
if not data:
data = connection.get('show running-config | section ^interface')
resources = data.split('interface ')
for resource in resources:
if resource and re.search(r'lacp', resource):
obj = self.render_config(self.generated_spec, resource)
if obj and len(obj.keys()) > 1:
objs.append(obj)
ansible_facts['ansible_network_resources'].pop('lacp_interfaces', None)
facts = {}
if objs:
facts['lacp_interfaces'] = []
params = utils.validate_config(self.argument_spec, {'config': objs})
for cfg in params['config']:
facts['lacp_interfaces'].append(utils.remove_empties(cfg))
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, spec, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
config = deepcopy(spec)
match = re.search(r'^(\S+)', conf)
intf = match.group(1)
if get_interface_type(intf) == 'unknown':
return {}
config['name'] = intf
config['port_priority'] = utils.parse_conf_arg(conf, 'lacp port-priority')
config['rate'] = utils.parse_conf_arg(conf, 'lacp rate')
config['mode'] = utils.parse_conf_arg(conf, 'mode')
suspend_individual = re.search(r'no lacp suspend-individual', conf)
if suspend_individual:
config['suspend_individual'] = False
max_links = utils.parse_conf_arg(conf, 'lacp max-bundle')
if max_links:
config['links']['max'] = max_links
min_links = utils.parse_conf_arg(conf, 'lacp min-links')
if min_links:
config['links']['min'] = min_links
graceful = re.search(r'no lacp graceful-convergence', conf)
if graceful:
config['convergence']['gracefule'] = False
vpc = re.search(r'lacp vpc-convergence', conf)
if vpc:
config['convergence']['vpc'] = True
return utils.remove_empties(config)
| gpl-3.0 |
andreabrambilla/libres | python/tests/res/enkf/test_es_update.py | 1 | 4384 | from tests import ResTest
from res.test import ErtTestContext
from ecl.util.util import BoolVector
from res.enkf import NodeId
from res.enkf import ESUpdate
from res.enkf import ErtRunContext
from res.enkf import EnkfNode
class ESUpdateTest(ResTest):
def test_create(self):
config = self.createTestPath("local/custom_kw/mini_config")
with ErtTestContext("python/enkf/data/custom_kw_simulated", config) as context:
ert = context.getErt()
es_update = ESUpdate( ert )
self.assertFalse( es_update.hasModule( "NO_NOT_THIS_MODULE" ))
with self.assertRaises(KeyError):
m = es_update.getModule( "STD_ENKF_XXX" )
module = es_update.getModule( "STD_ENKF" )
def test_update(self):
config = self.createTestPath("local/snake_oil/snake_oil.ert")
with ErtTestContext("update_test", config) as context:
ert = context.getErt()
es_update = ESUpdate( ert )
fsm = ert.getEnkfFsManager()
sim_fs = fsm.getFileSystem("default_0")
target_fs = fsm.getFileSystem("target")
mask = BoolVector( initial_size = ert.getEnsembleSize(), default_value = True)
run_context = ErtRunContext.ensemble_smoother_update( sim_fs, target_fs )
es_update.smootherUpdate( run_context )
conf = ert.ensembleConfig()["SNAKE_OIL_PARAM"]
sim_node = EnkfNode( conf )
target_node = EnkfNode( conf )
node_id = NodeId(0,0)
sim_node.load(sim_fs, node_id)
target_node.load(target_fs, node_id)
sim_gen_kw = sim_node.asGenKw()
target_gen_kw = target_node.asGenKw()
# Test that an update has actually taken place
for index in range(len(sim_gen_kw)):
self.assertNotEqual(sim_gen_kw[index], target_gen_kw[index])
def test_localization(self):
config = self.createTestPath("local/snake_oil/snake_oil.ert")
with ErtTestContext("localization_test", config) as context:
ert = context.getErt()
es_update = ESUpdate(ert)
fsm = ert.getEnkfFsManager()
sim_fs = fsm.getFileSystem("default_0")
target_fs = fsm.getFileSystem("target")
# perform localization
localized_idxs = (1, 2)
local_config = ert.getLocalConfig()
local_config.clear()
dataset = local_config.createDataset('DATASET_SCALAR_LOCA')
dataset.addNode('SNAKE_OIL_PARAM')
active_list = dataset.getActiveList('SNAKE_OIL_PARAM')
for i in localized_idxs:
active_list.addActiveIndex(i)
obs = local_config.createObsdata('OBSSET_LOCA')
obs.addNode('WOPR_OP1_72')
ministep = local_config.createMinistep('MINISTEP_LOCA')
ministep.attachDataset(dataset)
ministep.attachObsset(obs)
updatestep = local_config.getUpdatestep()
updatestep.attachMinistep(ministep)
# Run enseble smoother
mask = BoolVector(initial_size=ert.getEnsembleSize(), default_value=True)
model_config = ert.getModelConfig()
path_fmt = model_config.getRunpathFormat()
jobname_fmt = model_config.getJobnameFormat()
subst_list = None
run_context = ErtRunContext.ensemble_smoother(sim_fs, target_fs, mask, path_fmt, jobname_fmt, subst_list, 0)
es_update.smootherUpdate(run_context)
conf = ert.ensembleConfig()["SNAKE_OIL_PARAM"]
sim_node = EnkfNode(conf)
target_node = EnkfNode(conf)
node_id = NodeId(0, 0)
sim_node.load(sim_fs, node_id)
target_node.load(target_fs, node_id)
sim_gen_kw = sim_node.asGenKw()
target_gen_kw = target_node.asGenKw()
# Test that the localized values has been updated
for i in localized_idxs:
self.assertNotEqual(sim_gen_kw[i], target_gen_kw[i])
# test that all the other values are left unchanged
non_localized_idxs = (
x for x in range(len(sim_gen_kw)) if x not in localized_idxs)
for i in non_localized_idxs:
self.assertEqual(sim_gen_kw[i], target_gen_kw[i])
| gpl-3.0 |
BackupGGCode/python-for-android | python3-alpha/python3-src/Lib/importlib/test/test_abc.py | 51 | 2426 | from importlib import abc
from importlib import machinery
import inspect
import unittest
class InheritanceTests:
"""Test that the specified class is a subclass/superclass of the expected
classes."""
subclasses = []
superclasses = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.subclasses or self.superclasses, self.__class__
self.__test = getattr(abc, self.__class__.__name__)
def test_subclasses(self):
# Test that the expected subclasses inherit.
for subclass in self.subclasses:
self.assertTrue(issubclass(subclass, self.__test),
"{0} is not a subclass of {1}".format(subclass, self.__test))
def test_superclasses(self):
# Test that the class inherits from the expected superclasses.
for superclass in self.superclasses:
self.assertTrue(issubclass(self.__test, superclass),
"{0} is not a superclass of {1}".format(superclass, self.__test))
class Finder(InheritanceTests, unittest.TestCase):
subclasses = [machinery.BuiltinImporter, machinery.FrozenImporter,
machinery.PathFinder]
class Loader(InheritanceTests, unittest.TestCase):
subclasses = [abc.PyLoader]
class ResourceLoader(InheritanceTests, unittest.TestCase):
superclasses = [abc.Loader]
class InspectLoader(InheritanceTests, unittest.TestCase):
superclasses = [abc.Loader]
subclasses = [abc.PyLoader, machinery.BuiltinImporter,
machinery.FrozenImporter]
class ExecutionLoader(InheritanceTests, unittest.TestCase):
superclasses = [abc.InspectLoader]
subclasses = [abc.PyLoader]
class SourceLoader(InheritanceTests, unittest.TestCase):
superclasses = [abc.ResourceLoader, abc.ExecutionLoader]
class PyLoader(InheritanceTests, unittest.TestCase):
superclasses = [abc.Loader, abc.ResourceLoader, abc.ExecutionLoader]
class PyPycLoader(InheritanceTests, unittest.TestCase):
superclasses = [abc.PyLoader]
def test_main():
from test.support import run_unittest
classes = []
for class_ in globals().values():
if (inspect.isclass(class_) and
issubclass(class_, unittest.TestCase) and
issubclass(class_, InheritanceTests)):
classes.append(class_)
run_unittest(*classes)
if __name__ == '__main__':
test_main()
| apache-2.0 |
cyberphox/MissionPlanner | Lib/site-packages/numpy/lib/tests/test_twodim_base.py | 54 | 11192 | """ Test functions for matrix module
"""
from numpy.testing import *
from numpy import ( arange, rot90, add, fliplr, flipud, zeros, ones, eye,
array, diag, histogram2d, tri, mask_indices, triu_indices,
triu_indices_from, tril_indices, tril_indices_from )
import numpy as np
from numpy.compat import asbytes, asbytes_nested
def get_mat(n):
data = arange(n)
data = add.outer(data,data)
return data
class TestEye(TestCase):
def test_basic(self):
assert_equal(eye(4),array([[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]))
assert_equal(eye(4,dtype='f'),array([[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]],'f'))
assert_equal(eye(3) == 1, eye(3,dtype=bool))
def test_diag(self):
assert_equal(eye(4,k=1),array([[0,1,0,0],
[0,0,1,0],
[0,0,0,1],
[0,0,0,0]]))
assert_equal(eye(4,k=-1),array([[0,0,0,0],
[1,0,0,0],
[0,1,0,0],
[0,0,1,0]]))
def test_2d(self):
assert_equal(eye(4,3),array([[1,0,0],
[0,1,0],
[0,0,1],
[0,0,0]]))
assert_equal(eye(3,4),array([[1,0,0,0],
[0,1,0,0],
[0,0,1,0]]))
def test_diag2d(self):
assert_equal(eye(3,4,k=2),array([[0,0,1,0],
[0,0,0,1],
[0,0,0,0]]))
assert_equal(eye(4,3,k=-2),array([[0,0,0],
[0,0,0],
[1,0,0],
[0,1,0]]))
def test_eye_bounds(self):
assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])
assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])
assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])
assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])
assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])
def test_strings(self):
assert_equal(eye(2, 2, dtype='S3'),
asbytes_nested([['1', ''], ['', '1']]))
def test_bool(self):
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
class TestDiag(TestCase):
def test_vector(self):
vals = (100 * arange(5)).astype('l')
b = zeros((5, 5))
for k in range(5):
b[k, k] = vals[k]
assert_equal(diag(vals), b)
b = zeros((7, 7))
c = b.copy()
for k in range(5):
b[k, k + 2] = vals[k]
c[k + 2, k] = vals[k]
assert_equal(diag(vals, k=2), b)
assert_equal(diag(vals, k=-2), c)
def test_matrix(self, vals=None):
if vals is None:
vals = (100 * get_mat(5) + 1).astype('l')
b = zeros((5,))
for k in range(5):
b[k] = vals[k,k]
assert_equal(diag(vals), b)
b = b * 0
for k in range(3):
b[k] = vals[k, k + 2]
assert_equal(diag(vals, 2), b[:3])
for k in range(3):
b[k] = vals[k + 2, k]
assert_equal(diag(vals, -2), b[:3])
def test_fortran_order(self):
vals = array((100 * get_mat(5) + 1), order='F', dtype='l')
self.test_matrix(vals)
def test_diag_bounds(self):
A = [[1, 2], [3, 4], [5, 6]]
assert_equal(diag(A, k=2), [])
assert_equal(diag(A, k=1), [2])
assert_equal(diag(A, k=0), [1, 4])
assert_equal(diag(A, k=-1), [3, 6])
assert_equal(diag(A, k=-2), [5])
assert_equal(diag(A, k=-3), [])
def test_failure(self):
self.assertRaises(ValueError, diag, [[[1]]])
class TestFliplr(TestCase):
def test_basic(self):
self.assertRaises(ValueError, fliplr, ones(4))
a = get_mat(4)
b = a[:,::-1]
assert_equal(fliplr(a),b)
a = [[0,1,2],
[3,4,5]]
b = [[2,1,0],
[5,4,3]]
assert_equal(fliplr(a),b)
class TestFlipud(TestCase):
def test_basic(self):
a = get_mat(4)
b = a[::-1,:]
assert_equal(flipud(a),b)
a = [[0,1,2],
[3,4,5]]
b = [[3,4,5],
[0,1,2]]
assert_equal(flipud(a),b)
class TestRot90(TestCase):
def test_basic(self):
self.assertRaises(ValueError, rot90, ones(4))
a = [[0,1,2],
[3,4,5]]
b1 = [[2,5],
[1,4],
[0,3]]
b2 = [[5,4,3],
[2,1,0]]
b3 = [[3,0],
[4,1],
[5,2]]
b4 = [[0,1,2],
[3,4,5]]
for k in range(-3,13,4):
assert_equal(rot90(a,k=k),b1)
for k in range(-2,13,4):
assert_equal(rot90(a,k=k),b2)
for k in range(-1,13,4):
assert_equal(rot90(a,k=k),b3)
for k in range(0,13,4):
assert_equal(rot90(a,k=k),b4)
def test_axes(self):
a = ones((50,40,3))
assert_equal(rot90(a).shape,(40,50,3))
class TestHistogram2d(TestCase):
def test_simple(self):
x = array([ 0.41702200, 0.72032449, 0.00011437481, 0.302332573, 0.146755891])
y = array([ 0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673])
xedges = np.linspace(0,1,10)
yedges = np.linspace(0,1,10)
H = histogram2d(x, y, (xedges, yedges))[0]
answer = array([[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
assert_array_equal(H.T, answer)
H = histogram2d(x, y, xedges)[0]
assert_array_equal(H.T, answer)
H,xedges,yedges = histogram2d(range(10),range(10))
assert_array_equal(H, eye(10,10))
assert_array_equal(xedges, np.linspace(0,9,11))
assert_array_equal(yedges, np.linspace(0,9,11))
def test_asym(self):
x = array([1, 1, 2, 3, 4, 4, 4, 5])
y = array([1, 3, 2, 0, 1, 2, 3, 4])
H, xed, yed = histogram2d(x,y, (6, 5), range = [[0,6],[0,5]], normed=True)
answer = array([[0.,0,0,0,0],
[0,1,0,1,0],
[0,0,1,0,0],
[1,0,0,0,0],
[0,1,1,1,0],
[0,0,0,0,1]])
assert_array_almost_equal(H, answer/8., 3)
assert_array_equal(xed, np.linspace(0,6,7))
assert_array_equal(yed, np.linspace(0,5,6))
def test_norm(self):
x = array([1,2,3,1,2,3,1,2,3])
y = array([1,1,1,2,2,2,3,3,3])
H, xed, yed = histogram2d(x,y,[[1,2,3,5], [1,2,3,5]], normed=True)
answer=array([[1,1,.5],
[1,1,.5],
[.5,.5,.25]])/9.
assert_array_almost_equal(H, answer, 3)
def test_all_outliers(self):
r = rand(100)+1.
H, xed, yed = histogram2d(r, r, (4, 5), range=([0,1], [0,1]))
assert_array_equal(H, 0)
class TestTri(TestCase):
def test_dtype(self):
out = array([[1,0,0],
[1,1,0],
[1,1,1]])
assert_array_equal(tri(3),out)
assert_array_equal(tri(3,dtype=bool),out.astype(bool))
class TestMaskIndices(TestCase):
def test_mask_indices(self):
# simple test without offset
iu = mask_indices(3, np.triu)
a = np.arange(9).reshape(3, 3)
yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8]))
# Now with an offset
iu1 = mask_indices(3, np.triu, 1)
yield (assert_array_equal, a[iu1], array([1, 2, 5]))
class TestTrilIndices(TestCase):
def test_tril_indices(self):
# indices without and with offset
il1 = tril_indices(4)
il2 = tril_indices(4, 2)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
# indexing:
yield (assert_array_equal, a[il1],
array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16]) )
# And for assigning values:
a[il1] = -1
yield (assert_array_equal, a,
array([[-1, 2, 3, 4],
[-1, -1, 7, 8],
[-1, -1, -1, 12],
[-1, -1, -1, -1]]) )
# These cover almost the whole array (two diagonals right of the main one):
a[il2] = -10
yield (assert_array_equal, a,
array([[-10, -10, -10, 4],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]]) )
class TestTrilIndicesFrom(TestCase):
def test_exceptions(self):
yield assert_raises(ValueError, tril_indices_from, np.ones((2,)))
yield assert_raises(ValueError, tril_indices_from, np.ones((2,2,2)))
yield assert_raises(ValueError, tril_indices_from, np.ones((2,3)))
class TestTriuIndices(TestCase):
def test_triu_indices(self):
iu1 = triu_indices(4)
iu2 = triu_indices(4, 2)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
# Both for indexing:
yield (assert_array_equal, a[iu1],
array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
# And for assigning values:
a[iu1] = -1
yield (assert_array_equal, a,
array([[-1, -1, -1, -1],
[ 5, -1, -1, -1],
[ 9, 10, -1, -1],
[13, 14, 15, -1]]) )
# These cover almost the whole array (two diagonals right of the main one):
a[iu2] = -10
yield ( assert_array_equal, a,
array([[ -1, -1, -10, -10],
[ 5, -1, -1, -10],
[ 9, 10, -1, -1],
[ 13, 14, 15, -1]]) )
class TestTriuIndicesFrom(TestCase):
def test_exceptions(self):
yield assert_raises(ValueError, triu_indices_from, np.ones((2,)))
yield assert_raises(ValueError, triu_indices_from, np.ones((2,2,2)))
yield assert_raises(ValueError, triu_indices_from, np.ones((2,3)))
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
sudosurootdev/external_chromium_org | chrome/common/extensions/docs/server2/docs_server_utils.py | 86 | 1712 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from base64 import b64encode
from hashlib import sha1
import os
def FormatKey(key):
'''Normalize a key by making sure it has a .html extension, and convert any
'.'s to '_'s.
'''
if key.endswith('.html'):
key = key[:-len('.html')]
safe_key = key.replace('.', '_')
return '%s.html' % safe_key
def SanitizeAPIName(name):
'''Sanitizes API filenames that are in subdirectories.
'''
filename = os.path.splitext(name)[0].replace(os.sep, '_')
if 'experimental' in filename:
filename = 'experimental_' + filename.replace('experimental_', '')
return filename
def StringIdentity(first, *more):
'''Creates a small hash of a string.
'''
def encode(string):
return b64encode(sha1(string).digest())
identity = encode(first)
for m in more:
identity = encode(identity + m)
return identity[:8]
def MarkFirst(dicts):
'''Adds a property 'first' == True to the first element in a list of dicts.
'''
if len(dicts) > 0:
dicts[0]['first'] = True
def MarkLast(dicts):
'''Adds a property 'last' == True to the last element in a list of dicts.
'''
if len(dicts) > 0:
dicts[-1]['last'] = True
def MarkFirstAndLast(dicts):
'''Marks the first and last element in a list of dicts.
'''
MarkFirst(dicts)
MarkLast(dicts)
def ToUnicode(data):
'''Returns the str |data| as a unicode object. It's expected to be utf8, but
there are also latin-1 encodings in there for some reason. Fall back to that.
'''
try:
return unicode(data, 'utf-8')
except:
return unicode(data, 'latin-1')
| bsd-3-clause |
thaim/ansible | test/units/modules/network/fortios/test_fortios_wireless_controller_hotspot20_h2qp_osu_provider.py | 21 | 11048 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_wireless_controller_hotspot20_h2qp_osu_provider
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_wireless_controller_hotspot20_h2qp_osu_provider.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_wireless_controller_hotspot20_h2qp_osu_provider_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_h2qp_osu_provider': {'icon': 'test_value_3',
'name': 'default_name_4',
'osu_method': 'oma-dm',
'osu_nai': 'test_value_6',
'server_uri': 'test_value_7',
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_h2qp_osu_provider.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {'icon': 'test_value_3',
'name': 'default_name_4',
'osu-method': 'oma-dm',
'osu-nai': 'test_value_6',
'server-uri': 'test_value_7',
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'h2qp-osu-provider', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wireless_controller_hotspot20_h2qp_osu_provider_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_h2qp_osu_provider': {'icon': 'test_value_3',
'name': 'default_name_4',
'osu_method': 'oma-dm',
'osu_nai': 'test_value_6',
'server_uri': 'test_value_7',
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_h2qp_osu_provider.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {'icon': 'test_value_3',
'name': 'default_name_4',
'osu-method': 'oma-dm',
'osu-nai': 'test_value_6',
'server-uri': 'test_value_7',
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'h2qp-osu-provider', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wireless_controller_hotspot20_h2qp_osu_provider_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'wireless_controller_hotspot20_h2qp_osu_provider': {'icon': 'test_value_3',
'name': 'default_name_4',
'osu_method': 'oma-dm',
'osu_nai': 'test_value_6',
'server_uri': 'test_value_7',
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_h2qp_osu_provider.fortios_wireless_controller_hotspot20(input_data, fos_instance)
delete_method_mock.assert_called_with('wireless-controller.hotspot20', 'h2qp-osu-provider', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wireless_controller_hotspot20_h2qp_osu_provider_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'wireless_controller_hotspot20_h2qp_osu_provider': {'icon': 'test_value_3',
'name': 'default_name_4',
'osu_method': 'oma-dm',
'osu_nai': 'test_value_6',
'server_uri': 'test_value_7',
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_h2qp_osu_provider.fortios_wireless_controller_hotspot20(input_data, fos_instance)
delete_method_mock.assert_called_with('wireless-controller.hotspot20', 'h2qp-osu-provider', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wireless_controller_hotspot20_h2qp_osu_provider_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_h2qp_osu_provider': {'icon': 'test_value_3',
'name': 'default_name_4',
'osu_method': 'oma-dm',
'osu_nai': 'test_value_6',
'server_uri': 'test_value_7',
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_h2qp_osu_provider.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {'icon': 'test_value_3',
'name': 'default_name_4',
'osu-method': 'oma-dm',
'osu-nai': 'test_value_6',
'server-uri': 'test_value_7',
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'h2qp-osu-provider', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_wireless_controller_hotspot20_h2qp_osu_provider_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_h2qp_osu_provider': {
'random_attribute_not_valid': 'tag', 'icon': 'test_value_3',
'name': 'default_name_4',
'osu_method': 'oma-dm',
'osu_nai': 'test_value_6',
'server_uri': 'test_value_7',
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_h2qp_osu_provider.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {'icon': 'test_value_3',
'name': 'default_name_4',
'osu-method': 'oma-dm',
'osu-nai': 'test_value_6',
'server-uri': 'test_value_7',
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'h2qp-osu-provider', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| mit |
ardoi/datajuicer | lsjuicer/ui/widgets/smallwidgets.py | 1 | 16929 | import time
from PyQt5 import QtGui as QG
from PyQt5 import QtWidgets as QW
from PyQt5 import QtCore as QC
import numpy as n
from lsjuicer.inout.db.sqlbase import dbmaster
from lsjuicer.resources import cm
class FramePlayer(QW.QWidget):
def __init__(self, frame_get_func, frame_set_func, frame_max_func, parent = None):
super(FramePlayer, self).__init__(parent)
self.frame_get_func = frame_get_func
self.frame_set_func = frame_set_func
self.frame_max_func = frame_max_func
layout = QW.QHBoxLayout()
self.setLayout(layout)
play_pb = QW.QPushButton("Play")
play_pb.setCheckable(True)
self.play_pb = play_pb
stop_pb = QW.QPushButton("Stop")
stop_pb.setEnabled(False)
self.stop_pb = stop_pb
layout.addWidget(play_pb)
layout.addWidget(stop_pb)
play_pb.clicked.connect(self.play_frames)
stop_pb.clicked.connect(self.stop_play)
hlayout = QW.QHBoxLayout()
hlayout.addWidget(QW.QLabel("FPS"))
fps_selector = QW.QComboBox(self)
fps_selector.setInputMethodHints(QC.Qt.ImhDigitsOnly)
fps_selector.setEditable(True)
self.fps_selector=fps_selector
fpss = [5, 10, 25, 50, 100, 150]
for fps in fpss:
fps_selector.addItem(str(fps))
fps_selector.setCurrentIndex(2)
fps_selector.currentIndexChanged.connect(self.change_fps)
hlayout.addWidget(fps_selector)
layout.addLayout(hlayout)
self.last_frame_time = None
self.skipped_frames = 0
@property
def fps(self):
return int(self.fps_selector.currentText())
@property
def playing(self):
return not self.play_pb.isEnabled()
def play_frames(self):
if self.frame_get_func() == self.frame_max_func():
self.frame_set_func(0)
self.last_frame_time = None
self.timer = QC.QTimer(self)
self.timer.timeout.connect(self.increase_frame)
self.timer.start(1./self.fps*1000) #in msec
self.stop_pb.setEnabled(True)
self.play_pb.setEnabled(False)
def change_fps(self, new_fps):
if self.playing:
self.stop_pb.click()
self.play_pb.click()
else:
return
def increase_frame(self):
max_real_fps = 25.0
#min_frame_dt = 1./max_real_fps
if self.frame_get_func() == self.frame_max_func():
self.stop_play()
self.play_pb.setChecked(False)
if self.last_frame_time is None or \
time.time() - self.last_frame_time > 1./max_real_fps:
if self.last_frame_time is None:
skipped = 1
else:
skipped = max(1,round((time.time()-self.last_frame_time)*self.fps))
self.frame_set_func(self.frame_get_func() + int(skipped))
self.last_frame_time = time.time()
else:
self.skipped_frames += 1
def stop_play(self):
self.timer.stop()
self.stop_pb.setEnabled(False)
self.play_pb.setEnabled(True)
self.play_pb.setChecked(False)
class Tasker(QW.QWidget):
def __init__(self, parent = None):
super(Tasker, self).__init__(parent)
self.setLayout( QW.QHBoxLayout())
self.filesButton = QW.QPushButton('Files')
self.filesButton.setCheckable(True)
self.filesButton.setChecked(True)
self.analysisButton = QW.QPushButton('Analysis')
self.analysisButton.setCheckable(True)
self.analysisButton.setEnabled(False)
self.confButton = QW.QPushButton('Configuration')
self.confButton.setCheckable(True)
bg = QW.QButtonGroup(self)
bg.addButton(self.filesButton)
bg.addButton(self.analysisButton)
bg.addButton(self.confButton)
self.layout().addWidget(self.filesButton)
self.layout().addWidget(self.analysisButton)
self.layout().addWidget(self.confButton)
class SparkResultWidget(QW.QFrame):
def __init__(self, parent = None):
super(SparkResultWidget, self).__init__(parent)
self.setFrameShape(QW.QFrame.StyledPanel)
stats_layout = QW.QGridLayout()
stats_layout.addWidget(QW.QLabel('<b>Amplitude:</b>'),1,0,QC.Qt.AlignRight)
#stats_layout.addWidget(QG.QLabel('<b>dF/F0:</b>'),1,0,QC.Qt.AlignRight)
stats_layout.addWidget(QW.QLabel('<b>FWHM:</b>'),3,0,QC.Qt.AlignRight)
stats_layout.addWidget(QW.QLabel('<b>FDHM:</b>'),4,0,QC.Qt.AlignRight)
stats_layout.addWidget(QW.QLabel('<b>Decay rate:</b>'),5,0,QC.Qt.AlignRight)
stats_layout.addWidget(QW.QLabel('<b>Rise time:</b>'),6,0,QC.Qt.AlignRight)
stats_layout.addWidget(QW.QLabel('<b>Time @ max:</b>'),7,0,QC.Qt.AlignRight)
stats_layout.addWidget(QW.QLabel('<b>Location @ max:</b>'),8,0,QC.Qt.AlignRight)
stats_layout.addWidget(QW.QLabel('<b>Baseline:</b>'),9,0,QC.Qt.AlignRight)
stats_layout.setSpacing(0)
self.setLayout(stats_layout)
self.amp_label = QW.QLabel('0')
self.risetime_label = QW.QLabel('0')
self.FWHM_label = QW.QLabel('0')
self.FDHM_label = QW.QLabel('0')
self.decay_label = QW.QLabel('0')
self.time_at_max_label = QW.QLabel('0')
self.location_at_max_label = QW.QLabel('0')
self.baseline_label = QW.QLabel('0')
self.spark_name_label = QW.QLabel('')
self.spark_name_label.setStyleSheet("""
QLabel{
background-color:black;
color:white;
font-weight:bold;
}
""")
self.spark_name_label.setAlignment(QC.Qt.AlignCenter)
stats_layout.addWidget(self.spark_name_label,0,0,1,2)
stats_layout.addWidget(self.amp_label, 1,1)
#stats_layout.addWidget(self.dFF0_label, 1,1)
stats_layout.addWidget(self.FWHM_label, 3,1)
stats_layout.addWidget(self.FDHM_label, 4,1)
stats_layout.addWidget(self.decay_label, 5,1)
stats_layout.addWidget(self.risetime_label, 6,1)
stats_layout.addWidget(self.time_at_max_label, 7,1)
stats_layout.addWidget(self.location_at_max_label, 8,1)
stats_layout.addWidget(self.baseline_label, 9,1)
self.setStyleSheet("""SparkResultWidget { background-color: white; }""")
self.setVisible(False)
class VisualizationOptionsWidget(QW.QWidget):
settings_changed = QC.pyqtSignal(dict)
close = QC.pyqtSignal()
def __init__(self, pipechain, parent = None, channel=0):
super(VisualizationOptionsWidget, self).__init__(parent)
#data from shelf
vis_key = 'visualization_options_reference'
vis_conf = dbmaster.get_config_setting_value(vis_key)
main_layout = QW.QVBoxLayout()
layout = QW.QFormLayout()
self.setLayout(main_layout)
self.channel = channel
main_layout.addLayout(layout)
self.blur_spinbox = QW.QDoubleSpinBox(self)
self.blur_spinbox.setMaximum(5)
self.blur_spinbox.setSingleStep(.05)
self.blur_spinbox.setMinimum(0)
self.blur_spinbox.setValue(vis_conf['blur'])
self.blur_spinbox.valueChanged.connect(
self.visualization_controls_moved)
self.blur_spinbox.setKeyboardTracking(False)
layout.addRow(QW.QLabel('<html>Blur kernel σ [μm]</html>:'),self.blur_spinbox)
self.saturation_spinbox = QW.QDoubleSpinBox(self)
self.saturation_spinbox.setMaximum(99)
self.saturation_spinbox.setSingleStep(.1)
self.saturation_spinbox.setMinimum(0)
self.saturation_spinbox.setValue(vis_conf['saturation'])
self.saturation_spinbox.valueChanged.connect(
self.visualization_controls_moved)
self.saturation_spinbox.setKeyboardTracking(False)
layout.addRow('Saturation:',self.saturation_spinbox)
self.colormap_combobox = QW.QComboBox(self)
self.colormaps = [name for name in cm.datad if not name.endswith("_r")]
self.colormaps.sort()
self.colormap_combobox.setIconSize(QC.QSize(100,20))
for cm_name in self.colormaps:
icon = QG.QIcon(QG.QPixmap(":/colormap_%s.png"%cm_name))
self.colormap_combobox.addItem(icon, cm_name)
self.colormap_combobox.setCurrentIndex(\
self.colormaps.index(vis_conf['colormap']))
self.colormap_combobox.currentIndexChanged.connect(
self.visualization_controls_moved)
layout.addRow('Colormap:',self.colormap_combobox)
self.colormap_reverse_checkbox = QW.QCheckBox(self)
self.colormap_reverse_checkbox.setChecked(vis_conf['colormap_reverse'])
self.colormap_reverse_checkbox.stateChanged.connect(
self.visualization_controls_moved)
layout.addRow('Reverse colormap', self.colormap_reverse_checkbox)
self.hist_plot = HistogramPlot( self)
self.hist_plot.saturation_changed.connect(self.saturation_spinbox.setValue)
main_layout.addWidget(self.hist_plot)
close_pb =QW.QPushButton("Close")
#close_pb.setSizePolicy(QG.QSizePolicy.Maximum, QG.QSizePolicy.Maximum)
main_layout.addWidget(close_pb, QC.Qt.AlignRight)
close_pb.clicked.connect(self.close)
#self.pipechain = pipechain
self.update_pipechain(pipechain)
# self.do_histogram()
def update_pipechain(self, pipechain):
self.pipechain = pipechain
pipechain.new_histogram.connect(self.update_hdata)
self.update_hdata()
def update_hdata(self):
self.hist_plot.update_hdata(self.pipechain)
self.do_histogram()
#@helpers.timeIt
def do_histogram(self):
self.hist_plot.set_histogram(self.saturation_spinbox.value())
def visualization_controls_moved(self):
saturate = self.saturation_spinbox.value()
cmap_name = str(self.colormap_combobox.currentText())
cmap_r= self.colormap_reverse_checkbox.isChecked()
blur = self.blur_spinbox.value()
settings = {'saturation':saturate, 'colormap':cmap_name,
'colormap_reverse':cmap_r, 'blur':blur}
self.settings_changed.emit(settings)
dbmaster.set_config_setting(
"visualization_options_reference", settings)
self.do_histogram()
class HistogramPlot(QW.QWidget):
saturation_changed = QC.pyqtSignal(float)
@property
def log_scale(self):
return self.log_checkbox.isChecked()
def __init__(self, parent = None):
super(HistogramPlot, self).__init__(parent)
layout = QW.QVBoxLayout()
self.channel = parent.channel
self.setLayout(layout)
self.scene = HistogramScene(self)
self.scene.clicked.connect(self.reverse_saturation)
self.scene.setBackgroundBrush(QG.QBrush(QG.QColor('#1D3A3B')))
#gr = self.scene.addRect(QC.QRectF(0,0,200,100))
self.view = RefitView(self)
self.view.setScene(self.scene)
self.log_checkbox = QW.QCheckBox("Log scale histogram")
#self.log_checkbox.stateChanged.connect(self.set_histogram)
self.log_checkbox.stateChanged.connect(self.checkbox_changed)
#self.setFixedSize(200, 150)
self.gpath = None
self.gpath_fill = None
self.saturation_gline = None
self.saturation_value = 0
self.pipechain=None
self._points = {'Normal':None, 'Log':None}
layout.addWidget(self.view)
layout.addWidget(self.log_checkbox)
#self.g_pixmap = None
#self.g_roiline = None
#gt = self.scene.addText("No file selected")
#self.view.centerOn(gr)
self.view.setFrameStyle(QW.QFrame.NoFrame)
self.view.setRenderHint(QG.QPainter.Antialiasing)
self.view.setHorizontalScrollBarPolicy(QC.Qt.ScrollBarAlwaysOff)
self.view.setVerticalScrollBarPolicy(QC.Qt.ScrollBarAlwaysOff)
self.saturation_pen = QG.QPen(QG.QColor('orange'))
#self.saturation_pen.setWidth(3)
self.saturation_pen.setCosmetic(True)
self.hist_line_pen = QG.QPen(QG.QColor('white'))
#self.hist_line_pen.setWidth(2)
self.hist_line_pen.setCosmetic(True)
def update_hdata(self, pipechain):
self.hdata = pipechain.histogram(self.channel)
#reset precalculated histogram points
self._points = {'Normal':None, 'Log':None}
self.pipechain = pipechain
def checkbox_changed(self, state):
self.set_histogram()
def scale_y(self, val, logmin=0):
if self.log_scale:
return 1./n.log(val+1e-12) - logmin
else:
return -val
def sizeHint(self):
return QC.QSize(200,150)
@property
def points(self):
if self.log_scale:
if not self._points["Log"]:
self._points["Log"] = self.make_points(self.hdata)
return self._points["Log"]
else:
if not self._points["Normal"]:
self._points["Normal"] = self.make_points(self.hdata)
return self._points["Normal"]
#@helpers.timeIt
def reverse_saturation(self, loc):
#method to determine saturation value based on user click on scene
saturation_value = self.pipechain.value_percentage(loc, self.channel)
#self.set_histogram(100-saturation_value)
self.saturation_changed.emit(100 - saturation_value)
#print 'emitted', 100 - saturation_value, saturation_value
#@helpers.timeIt
def set_histogram(self, saturation_value=None ):
#print '\nsat val',self.saturation_value,saturation_value
if saturation_value:
self.saturation_value = saturation_value
#print 'new sat val',self.saturation_value
if self.hdata is not None:
cut_max = self.pipechain.percentage_value(self.saturation_value,
self.channel)
points = self.points
start = points[0]
path = QG.QPainterPath(start)
for p in points[1:]:
path.lineTo(p)
if self.gpath:
self.scene.removeItem(self.gpath)
self.gpath = self.scene.addPath(path,self.hist_line_pen)
self.gpath.setZValue(2)
#draw line of saturation percentage
if self.saturation_gline:
self.scene.removeItem(self.saturation_gline)
self.saturation_gline = self.scene.addLine(cut_max, 0,
cut_max, self.scale_y(max(self.hdata[0][1:])),
self.saturation_pen)
#make fill path till cut_max
path_fill = QG.QPainterPath(start)
for p in points[1:]:
if p.x()<=cut_max:
path_fill.lineTo(p)
else:
sl = self.saturation_gline.shape()
#hl = self.gpath.shape()
inters = path.intersected(sl)
if inters.elementCount() > 1:
el=inters.elementAt(0)
y_last = el.y
else:
y_last = p.y()
path_fill.lineTo(cut_max, y_last)
path_fill.lineTo(cut_max, start.y())
break
path_fill.closeSubpath()
if self.gpath_fill:
self.scene.removeItem(self.gpath_fill)
self.gpath_fill = self.scene.addPath(path_fill,pen=QG.QPen(QC.Qt.NoPen),
brush=QG.QBrush(QG.QColor('cornflowerblue')))
self.gpath_fill.setZValue(1)
self.saturation_gline.setZValue(3)
self.fit_view()
def fit_view(self):
if self.gpath:
#rect has to be adjusted because QT adds .5 pixels to each side
rect = self.scene.itemsBoundingRect().adjusted(0.5,0.5,-0.5,-0.5)
self.view.fitInView(rect)
#@helpers.timeIt
def make_points(self, data):
points = []
#skip the first point of the histogram because it will be all black values and
#will make the histogram unreadable unless in log scale.
points.append(QC.QPointF(data[1][1], 0))
logmin = max(1/n.log(data[0][1:]+1e-12))
for x,y in zip(data[1][1:],data[0][1:]):
point = QC.QPointF(x, self.scale_y(y,logmin = logmin))
points.append(point)
points.append(QC.QPointF(data[1][-2], 0))
return points
class HistogramScene(QW.QGraphicsScene):
clicked = QC.pyqtSignal(float)
def mousePressEvent(self, event):
pos = event.scenePos()
self.clicked.emit(pos.x())
class RefitView(QW.QGraphicsView):
"""GraphicsView extension that fits in view at every resize event"""
def resizeEvent(self, event):
QW.QGraphicsView.resizeEvent(self,event)
rect = self.scene().itemsBoundingRect().adjusted(0.5,0.5,-0.5,-0.5)
self.fitInView(rect)
| gpl-3.0 |
guorendong/iridium-browser-ubuntu | third_party/chromite/lib/paygen/paygen_build_lib_unittest.py | 1 | 59480 | # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for paygen_build_lib."""
from __future__ import print_function
import itertools
import mox
import os
import shutil
import tempfile
import unittest
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import commands
from chromite.cbuildbot import constants
from chromite.cbuildbot import failures_lib
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import parallel
from chromite.lib.paygen import download_cache
from chromite.lib.paygen import gslock
from chromite.lib.paygen import gslib
from chromite.lib.paygen import gspaths
from chromite.lib.paygen import urilib
from chromite.lib.paygen import paygen_build_lib
from chromite.lib.paygen import paygen_payload_lib
from chromite.lib.paygen import utils
# We access a lot of protected members during testing.
# pylint: disable=protected-access
class PaygenBuildLibTest(cros_test_lib.MoxTempDirTestCase):
"""Test PaygenBuildLib class."""
def setUp(self):
self.work_dir = '/work/foo'
self.prev_image = gspaths.Image(channel='foo-channel',
board='foo-board',
version='1.0.0',
key='mp')
self.prev2_image = gspaths.Image(channel='foo-channel',
board='foo-board',
version='1.1.0',
key='mp')
self.foo_build = gspaths.Build(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.2.3')
# Create an additional 'special' image like NPO that isn't NPO,
# and keyed with a weird key. It should match none of the filters.
self.special_image = gspaths.Image(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.2.3',
key='foo-key',
image_channel='special-channel')
self.images = self._GetBuildImages(self.foo_build)
(self.basic_image, self.premp_image,
self.npo_image, self.premp_npo_image) = self.images
self.test_image = self._GetBuildTestImage(self.foo_build)
def _GetPaygenBuildInstance(self, skip_test_payloads=False,
disable_tests=False):
"""Helper method to create a standard Paygen instance."""
control_dir = None if disable_tests else '/tmp/foo'
return paygen_build_lib._PaygenBuild(self.foo_build, self.work_dir,
control_dir=control_dir,
skip_test_payloads=skip_test_payloads)
def _GetBuildImages(self, build):
"""Create basic_image, npo_image, premp_image, premp_npo_image.
Args:
build: gspaths.Build object describing the build to create fake images
for.
"""
# NPOs should have image_version incremented, but it doesn't matter for our
# testing.
basic_image = gspaths.Image(key='mp-v2', **build)
npo_image = gspaths.Image(key='mp-v2',
image_channel='nplusone-channel',
image_version=build.version,
**build)
premp_image = gspaths.Image(key='premp', **build)
premp_npo_image = gspaths.Image(key='premp',
image_channel='nplusone-channel',
image_version=build.version,
**build)
# Code in several places depends on the order.
return [basic_image, premp_image, npo_image, premp_npo_image]
def _GetBuildTestImage(self, build):
"""Returns a test image object for the build.
Args:
build: gspaths.Build object describing the build to create fake images
for.
"""
return gspaths.UnsignedImageArchive(bucket=build.bucket,
channel=build.channel,
board=build.board,
version=build.version,
milestone='R12',
image_type='test')
def testGetFlagURI(self):
"""Validate the helper method to create flag URIs for our current build."""
paygen = self._GetPaygenBuildInstance()
self.assertEqual(
paygen._GetFlagURI(gspaths.ChromeosReleases.LOCK),
'gs://crt/foo-channel/foo-board/1.2.3/payloads/LOCK_flag')
self.assertEqual(
paygen._GetFlagURI(gspaths.ChromeosReleases.SKIP),
'gs://crt/foo-channel/foo-board/1.2.3/payloads/SKIP_flag')
self.assertEqual(
paygen._GetFlagURI(gspaths.ChromeosReleases.FINISHED),
'gs://crt/foo-channel/foo-board/1.2.3/payloads/FINISHED_flag')
def testFilterHelpers(self):
"""Test _FilterForMp helper method."""
# All of the filter helpers should handle empty list.
self.assertEqual(paygen_build_lib._FilterForMp([]), [])
self.assertEqual(paygen_build_lib._FilterForPremp([]), [])
self.assertEqual(paygen_build_lib._FilterForBasic([]), [])
self.assertEqual(paygen_build_lib._FilterForNpo([]), [])
# prev_image lets us test with an 'mp' key, instead of an 'mp-v2' key.
images = list(self.images) + [self.special_image, self.prev_image]
self.assertEqual(paygen_build_lib._FilterForMp(images),
[self.basic_image, self.npo_image, self.prev_image])
self.assertEqual(paygen_build_lib._FilterForPremp(images),
[self.premp_image, self.premp_npo_image])
self.assertEqual(paygen_build_lib._FilterForBasic(images),
[self.basic_image, self.premp_image, self.prev_image])
self.assertEqual(paygen_build_lib._FilterForNpo(images),
[self.npo_image, self.premp_npo_image])
def testValidateExpectedBuildImages(self):
"""Test a function that validates expected images are found on a build."""
paygen = self._GetPaygenBuildInstance()
# Test with basic mp image only.
paygen._ValidateExpectedBuildImages(self.foo_build, (self.basic_image,))
# Test with basic mp and mp npo images.
paygen._ValidateExpectedBuildImages(self.foo_build, (self.basic_image,
self.npo_image))
# Test with basic mp and premp images.
paygen._ValidateExpectedBuildImages(self.foo_build, (self.basic_image,
self.premp_image))
# Test with basic mp and premp images.
paygen._ValidateExpectedBuildImages(self.foo_build, (self.basic_image,
self.premp_image,
self.npo_image))
# Test with 4 different images.
paygen._ValidateExpectedBuildImages(self.foo_build, (self.basic_image,
self.premp_image,
self.npo_image,
self.premp_npo_image))
# No images isn't valid.
with self.assertRaises(paygen_build_lib.ImageMissing):
paygen._ValidateExpectedBuildImages(self.foo_build, [])
# NPO image only isn't valid.
with self.assertRaises(paygen_build_lib.ImageMissing):
paygen._ValidateExpectedBuildImages(self.foo_build, (self.npo_image,))
# NPO without matching basic isn't valid.
with self.assertRaises(paygen_build_lib.ImageMissing):
paygen._ValidateExpectedBuildImages(self.foo_build,
(self.premp_image,
self.npo_image,
self.premp_npo_image))
# More than one of the same type of image should trigger BuildCorrupt
with self.assertRaises(paygen_build_lib.BuildCorrupt):
paygen._ValidateExpectedBuildImages(self.foo_build, (self.basic_image,
self.basic_image))
# Unexpected images should trigger BuildCorrupt
with self.assertRaises(paygen_build_lib.BuildCorrupt):
paygen._ValidateExpectedBuildImages(self.foo_build,
(self.basic_image,
self.npo_image,
self.special_image))
def _TestDiscoverArtifacts(self, list_files_uri, list_files_result,
test_func, test_args, should_succeed,
expected_result):
"""Test artifact discovery using mocked gsutil results."""
self.mox.StubOutWithMock(urilib, 'ListFiles')
urilib.ListFiles(list_files_uri).AndReturn(list_files_result)
self.mox.ReplayAll()
if should_succeed:
self.assertEqual(test_func(*test_args), expected_result)
else:
self.assertRaises(expected_result, test_func, *test_args)
def testDiscoverImages(self):
"""Test _DiscoverImages."""
paygen = self._GetPaygenBuildInstance()
uri_base = 'gs://crt/foo-channel/foo-board/1.2.3'
uri_basic = os.path.join(
uri_base, 'chromeos_1.2.3_foo-board_recovery_foo-channel_mp-v3.bin')
uri_premp = os.path.join(
uri_base, 'chromeos_1.2.3_foo-board_recovery_foo-channel_premp.bin')
uri_npo = os.path.join(
uri_base,
'chromeos_1.2.4_foo-board_recovery_nplusone-channel_mp-v3.bin')
file_list_result = [uri_basic, uri_premp, uri_npo]
base_image_params = {'channel': 'foo-channel',
'board': 'foo-board',
'version': '1.2.3',
'bucket': 'crt'}
expected_basic = gspaths.Image(key='mp-v3', uri=uri_basic,
**base_image_params)
expected_premp = gspaths.Image(key='premp', uri=uri_premp,
**base_image_params)
expected_npo = gspaths.Image(key='mp-v3', image_channel='nplusone-channel',
image_version='1.2.4', uri=uri_npo,
**base_image_params)
expected_result = [expected_basic, expected_premp, expected_npo]
self._TestDiscoverArtifacts(
os.path.join(uri_base, 'chromeos_*_foo-board_recovery_*_*.bin'),
file_list_result,
paygen._DiscoverImages,
[self.foo_build],
True,
expected_result)
def testDiscoverTestImageArchives(self):
"""Test _DiscoverTestImageArchives (success)."""
paygen = self._GetPaygenBuildInstance()
uri_base = 'gs://crt/foo-channel/foo-board/1.2.3'
uri_test_archive = os.path.join(
uri_base, 'ChromeOS-test-R12-1.2.3-foo-board.tar.xz')
file_list_result = [uri_test_archive]
expected_test_archive = gspaths.UnsignedImageArchive(
channel='foo-channel',
board='foo-board',
version='1.2.3',
bucket='crt',
uri=uri_test_archive,
milestone='R12',
image_type='test')
expected_result = [expected_test_archive]
self._TestDiscoverArtifacts(
os.path.join(uri_base, 'ChromeOS-test-*-1.2.3-foo-board.tar.xz'),
file_list_result,
paygen._DiscoverTestImageArchives,
[self.foo_build],
True,
expected_result)
def testDiscoverTestImageArchivesMultipleResults(self):
"""Test _DiscoverTestImageArchives (fails due to multiple results)."""
paygen = self._GetPaygenBuildInstance()
uri_base = 'gs://crt/foo-channel/foo-board/1.2.3'
uri_test_archive1 = os.path.join(
uri_base, 'ChromeOS-test-R12-1.2.3-foo-board.tar.xz')
uri_test_archive2 = os.path.join(
uri_base, 'ChromeOS-test-R13-1.2.3-foo-board.tar.xz')
file_list_result = [uri_test_archive1, uri_test_archive2]
self._TestDiscoverArtifacts(
os.path.join(uri_base, 'ChromeOS-test-*-1.2.3-foo-board.tar.xz'),
file_list_result,
paygen._DiscoverTestImageArchives,
[self.foo_build],
False,
paygen_build_lib.BuildCorrupt)
def testDiscoverTestImageArchivesMissing(self):
"""Test _DiscoverTestImageArchives (fails due to missing images)."""
paygen = self._GetPaygenBuildInstance()
uri_base = 'gs://crt/foo-channel/foo-board/1.2.3'
self._TestDiscoverArtifacts(
os.path.join(uri_base, 'ChromeOS-test-*-1.2.3-foo-board.tar.xz'),
[],
paygen._DiscoverTestImageArchives,
[self.foo_build],
False,
paygen_build_lib.ImageMissing)
@unittest.skipIf(not paygen_build_lib.config, 'Internal crostools required.')
def testDiscoverActiveFsiBuilds(self):
"""Using test release.conf values, test _DiscoverActiveFsiBuilds."""
test_config = """
[valid-board]
fsi_images: 2913.331.0,2465.105.0
[no-fsi-board]
"""
paygen_build_lib.config.LoadTestConfig(test_config)
# Test a board with FSI values on stable-channel.
paygen = paygen_build_lib._PaygenBuild(
gspaths.Build(channel='stable-channel', board='valid-board',
version='1.2.3'),
self.work_dir)
self.assertEqual(
sorted(paygen._DiscoverActiveFsiBuilds()),
[gspaths.Build(board='valid-board',
channel='stable-channel',
version='2465.105.0'),
gspaths.Build(board='valid-board',
channel='stable-channel',
version='2913.331.0')])
# Test a board without FSI values on stable-channel.
paygen = paygen_build_lib._PaygenBuild(
gspaths.Build(channel='stable-channel', board='no-fsi-board',
version='1.2.3'),
self.work_dir)
self.assertEqual(paygen._DiscoverActiveFsiBuilds(), [])
# Test a board with FSI values on non-stable-channel.
paygen = paygen_build_lib._PaygenBuild(
gspaths.Build(channel='beta-channel', board='valid-board',
version='1.2.3'),
self.work_dir)
self.assertEqual(paygen._DiscoverActiveFsiBuilds(), [])
paygen_build_lib.config.LoadGlobalConfig()
@cros_test_lib.NetworkTest()
@unittest.skipIf(not paygen_build_lib.config, 'Internal crostools required.')
def testDiscoverAllFsiBuilds(self):
"""Using test release.conf values, test _DiscoverActiveFsiBuilds."""
paygen = paygen_build_lib._PaygenBuild(
gspaths.Build(channel='stable-channel', board='x86-alex-he',
version='1.2.3'),
self.work_dir)
# Search for real FSIs for an older/live board.
self.assertEqual(paygen._DiscoverAllFsiBuilds(),
['0.12.433.257', '0.14.811.132', '1412.205.0'])
@unittest.skipIf(not paygen_build_lib.query, 'Internal crostools required.')
def testDiscoverNmoBuild(self):
"""Test _DiscoverNmoBuild (N minus One)."""
paygen = self._GetPaygenBuildInstance()
self.mox.StubOutWithMock(paygen_build_lib.query, 'FindLatestPublished')
# Set up the test replay script.
paygen_build_lib.query.FindLatestPublished(
'foo-channel', 'foo-board').AndReturn('1.0.0')
paygen_build_lib.query.FindLatestPublished(
'foo-channel', 'foo-board').AndReturn(None)
# Run the test verification.
self.mox.ReplayAll()
self.assertEqual(paygen._DiscoverNmoBuild(),
[gspaths.Build(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.0.0')])
self.assertEqual(paygen._DiscoverNmoBuild(), [])
def testDiscoverRequiredFullPayloads(self):
"""Test _DiscoverRequiredFullPayloads."""
paygen = self._GetPaygenBuildInstance()
self.assertEqual(paygen._DiscoverRequiredFullPayloads([]), [])
self.assertItemsEqual(
paygen._DiscoverRequiredFullPayloads(self.images + [self.test_image]),
[gspaths.Payload(tgt_image=self.basic_image),
gspaths.Payload(tgt_image=self.npo_image),
gspaths.Payload(tgt_image=self.premp_image),
gspaths.Payload(tgt_image=self.premp_npo_image),
gspaths.Payload(tgt_image=self.test_image)])
def testDiscoverRequiredNpoDeltas(self):
"""Test _DiscoverRequiredNpoDeltas."""
paygen = self._GetPaygenBuildInstance()
self.assertEqual(paygen._DiscoverRequiredNpoDeltas([]), [])
self.assertEqual(paygen._DiscoverRequiredNpoDeltas([self.basic_image]), [])
self.assertEqual(paygen._DiscoverRequiredNpoDeltas([self.npo_image]), [])
expected = [gspaths.Payload(tgt_image=self.npo_image,
src_image=self.basic_image)]
self.assertEqual(paygen._DiscoverRequiredNpoDeltas([self.basic_image,
self.npo_image]),
expected)
self.assertEqual(paygen._DiscoverRequiredNpoDeltas([self.npo_image,
self.basic_image]),
expected)
self.assertEqual(paygen._DiscoverRequiredNpoDeltas([self.premp_image,
self.premp_npo_image]),
[gspaths.Payload(tgt_image=self.premp_npo_image,
src_image=self.premp_image)])
def testDiscoverRequiredTestNpoDeltas(self):
"""Test _DiscoverRequiredTestNpoDeltas."""
paygen = self._GetPaygenBuildInstance()
self.assertEqual(
paygen._DiscoverRequiredTestNpoDeltas([]), [])
self.assertItemsEqual(
paygen._DiscoverRequiredTestNpoDeltas([self.test_image]),
[gspaths.Payload(tgt_image=self.test_image,
src_image=self.test_image)])
def testDiscoverRequiredFromPreviousDeltas(self):
"""Test _DiscoverRequiredFromPreviousDeltas."""
paygen = self._GetPaygenBuildInstance()
images = [self.basic_image]
prevs = [self.prev_image, self.prev2_image]
# Empty lists.
results = paygen._DiscoverRequiredFromPreviousDeltas([], [])
expected = []
self.assertEqual(results, expected)
# Empty previous list.
results = paygen._DiscoverRequiredFromPreviousDeltas(images, [])
expected = []
self.assertEqual(results, expected)
# Empty target list.
results = paygen._DiscoverRequiredFromPreviousDeltas([], prevs)
expected = []
self.assertEqual(results, expected)
# Basic list.
results = paygen._DiscoverRequiredFromPreviousDeltas(images, prevs)
expected = [gspaths.Payload(tgt_image=self.basic_image,
src_image=self.prev_image),
gspaths.Payload(tgt_image=self.basic_image,
src_image=self.prev2_image)]
self.assertEqual(results, expected)
# Inverted order (should return nothing).
results = paygen._DiscoverRequiredFromPreviousDeltas(
[self.prev_image], images)
expected = []
self.assertEqual(results, expected)
def testDiscoverRequiredPayloadsIncompleteBuild(self):
"""Test _DiscoverRequiredPayloads."""
paygen = self._GetPaygenBuildInstance()
self.mox.StubOutWithMock(paygen, '_DiscoverImages')
self.mox.StubOutWithMock(paygen, '_DiscoverNmoBuild')
self.mox.StubOutWithMock(paygen, '_DiscoverActiveFsiBuilds')
paygen.BUILD_DISCOVER_RETRY_SLEEP = 0
# Check that we retry 3 times.
paygen._DiscoverImages(paygen._build).AndRaise(
paygen_build_lib.ImageMissing())
paygen._DiscoverImages(paygen._build).AndRaise(
paygen_build_lib.ImageMissing())
paygen._DiscoverImages(paygen._build).AndRaise(
paygen_build_lib.ImageMissing())
paygen._DiscoverImages(paygen._build).AndRaise(
paygen_build_lib.ImageMissing())
# Run the test verification.
self.mox.ReplayAll()
with self.assertRaises(paygen_build_lib.BuildNotReady):
paygen._DiscoverRequiredPayloads()
def testDiscoverRequiredPayloads(self):
"""Test _DiscoverRequiredPayloads."""
paygen = self._GetPaygenBuildInstance()
output_uri = 'gs://foo'
self.mox.StubOutWithMock(paygen, '_DiscoverImages')
self.mox.StubOutWithMock(paygen, '_DiscoverTestImageArchives')
self.mox.StubOutWithMock(paygen, '_DiscoverNmoBuild')
self.mox.StubOutWithMock(paygen, '_DiscoverActiveFsiBuilds')
self.mox.StubOutWithMock(paygen_payload_lib, 'DefaultPayloadUri')
nmo_build = gspaths.Build(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.2.2')
fsi1_build = gspaths.Build(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.0.0')
fsi2_build = gspaths.Build(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.1.0')
nmo_images = self._GetBuildImages(nmo_build)
nmo_test_image = self._GetBuildTestImage(nmo_build)
fsi1_images = self._GetBuildImages(fsi1_build)
fsi1_test_image = self._GetBuildTestImage(fsi1_build)
fsi2_images = self._GetBuildImages(fsi2_build)
fsi2_test_image = self._GetBuildTestImage(fsi2_build)
paygen._DiscoverImages(paygen._build).AndReturn(self.images)
paygen._DiscoverTestImageArchives(paygen._build).AndReturn(
[self.test_image])
paygen._DiscoverNmoBuild().AndReturn([nmo_build])
paygen._DiscoverActiveFsiBuilds().AndReturn([fsi1_build, fsi2_build])
paygen._DiscoverImages(nmo_build).AndReturn(nmo_images)
paygen._DiscoverTestImageArchives(nmo_build).AndReturn([nmo_test_image])
paygen._DiscoverImages(fsi1_build).AndReturn(fsi1_images)
paygen._DiscoverTestImageArchives(fsi1_build).AndReturn([fsi1_test_image])
paygen._DiscoverImages(fsi2_build).AndReturn(fsi2_images)
paygen._DiscoverTestImageArchives(fsi2_build).AndReturn([fsi2_test_image])
# Simplify the output URIs, so it's easy to check them below.
paygen_payload_lib.DefaultPayloadUri(
mox.IsA(gspaths.Payload), None).MultipleTimes().AndReturn(output_uri)
# Run the test verification.
self.mox.ReplayAll()
results = paygen._DiscoverRequiredPayloads()
expected = [gspaths.Payload(tgt_image=self.basic_image, uri=output_uri),
gspaths.Payload(tgt_image=self.npo_image, uri=output_uri),
gspaths.Payload(tgt_image=self.premp_image, uri=output_uri),
gspaths.Payload(tgt_image=self.premp_npo_image, uri=output_uri),
# NPO Deltas
gspaths.Payload(tgt_image=self.npo_image,
src_image=self.basic_image,
uri=output_uri),
gspaths.Payload(tgt_image=self.premp_npo_image,
src_image=self.premp_image,
uri=output_uri),
# NMO Delta
gspaths.Payload(tgt_image=self.basic_image,
src_image=nmo_images[0],
uri=output_uri),
gspaths.Payload(tgt_image=self.premp_image,
src_image=nmo_images[1],
uri=output_uri),
# FSI Deltas
gspaths.Payload(tgt_image=self.basic_image,
src_image=fsi1_images[0],
uri=output_uri),
gspaths.Payload(tgt_image=self.premp_image,
src_image=fsi1_images[1],
uri=output_uri),
gspaths.Payload(tgt_image=self.basic_image,
src_image=fsi2_images[0],
uri=output_uri),
gspaths.Payload(tgt_image=self.premp_image,
src_image=fsi2_images[1],
uri=output_uri),
# Test full payload.
gspaths.Payload(tgt_image=self.test_image,
uri=output_uri),
# Test NPO delta.
gspaths.Payload(tgt_image=self.test_image,
src_image=self.test_image,
uri=output_uri),
# Test NMO delta.
gspaths.Payload(tgt_image=self.test_image,
src_image=nmo_test_image,
uri=output_uri),
# Test FSI deltas.
gspaths.Payload(tgt_image=self.test_image,
src_image=fsi1_test_image,
uri=output_uri),
gspaths.Payload(tgt_image=self.test_image,
src_image=fsi2_test_image,
uri=output_uri)]
expected = zip(expected, itertools.repeat(False))
self.assertItemsEqual(sorted(results), sorted(expected))
def testDiscoverRequiredPayloadsPreviousSkipped(self):
"""Test _DiscoverRequiredPayload.
Ensures that no test delta payload is generated if generation of a
signed delta from the same build was skipped.
"""
paygen = self._GetPaygenBuildInstance()
output_uri = 'gs://foo'
self.mox.StubOutWithMock(paygen, '_DiscoverImages')
self.mox.StubOutWithMock(paygen, '_DiscoverTestImageArchives')
self.mox.StubOutWithMock(paygen, '_DiscoverNmoBuild')
self.mox.StubOutWithMock(paygen, '_DiscoverActiveFsiBuilds')
self.mox.StubOutWithMock(paygen_payload_lib, 'DefaultPayloadUri')
nmo_build = gspaths.Build(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.2.2')
fsi1_build = gspaths.Build(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.0.0')
fsi2_build = gspaths.Build(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.1.0')
fsi1_images = self._GetBuildImages(fsi1_build)
fsi1_test_image = self._GetBuildTestImage(fsi1_build)
fsi2_images = self._GetBuildImages(fsi2_build)
fsi2_test_image = self._GetBuildTestImage(fsi2_build)
paygen._DiscoverImages(paygen._build).AndReturn(self.images)
paygen._DiscoverTestImageArchives(paygen._build).AndReturn(
[self.test_image])
paygen._DiscoverNmoBuild().AndReturn([nmo_build])
paygen._DiscoverActiveFsiBuilds().AndReturn([fsi1_build, fsi2_build])
paygen._DiscoverImages(nmo_build).AndRaise(
paygen_build_lib.ImageMissing('nmo build is missing some image'))
# _DiscoverTestImageArchives(nmo_build) should NOT be called.
paygen._DiscoverImages(fsi1_build).AndReturn(fsi1_images)
paygen._DiscoverTestImageArchives(fsi1_build).AndReturn([fsi1_test_image])
paygen._DiscoverImages(fsi2_build).AndReturn(fsi2_images)
paygen._DiscoverTestImageArchives(fsi2_build).AndReturn([fsi2_test_image])
# Simplify the output URIs, so it's easy to check them below.
paygen_payload_lib.DefaultPayloadUri(
mox.IsA(gspaths.Payload), None).MultipleTimes().AndReturn(output_uri)
# Run the test verification.
self.mox.ReplayAll()
results = paygen._DiscoverRequiredPayloads()
# IMPORTANT: we intentionally omit the NMO payload from the expected list
# of payloads as it is a duplicate of one of the FSIs.
expected = [gspaths.Payload(tgt_image=self.basic_image, uri=output_uri),
gspaths.Payload(tgt_image=self.npo_image, uri=output_uri),
gspaths.Payload(tgt_image=self.premp_image, uri=output_uri),
gspaths.Payload(tgt_image=self.premp_npo_image, uri=output_uri),
# NPO Deltas
gspaths.Payload(tgt_image=self.npo_image,
src_image=self.basic_image,
uri=output_uri),
gspaths.Payload(tgt_image=self.premp_npo_image,
src_image=self.premp_image,
uri=output_uri),
# FSI Deltas
gspaths.Payload(tgt_image=self.basic_image,
src_image=fsi1_images[0],
uri=output_uri),
gspaths.Payload(tgt_image=self.premp_image,
src_image=fsi1_images[1],
uri=output_uri),
gspaths.Payload(tgt_image=self.basic_image,
src_image=fsi2_images[0],
uri=output_uri),
gspaths.Payload(tgt_image=self.premp_image,
src_image=fsi2_images[1],
uri=output_uri),
# Test full payload.
gspaths.Payload(tgt_image=self.test_image,
uri=output_uri),
# Test NPO delta.
gspaths.Payload(tgt_image=self.test_image,
src_image=self.test_image,
uri=output_uri),
# Test FSI deltas.
gspaths.Payload(tgt_image=self.test_image,
src_image=fsi1_test_image,
uri=output_uri),
gspaths.Payload(tgt_image=self.test_image,
src_image=fsi2_test_image,
uri=output_uri)]
expected = zip(expected, itertools.repeat(False))
self.assertItemsEqual(sorted(results), sorted(expected))
def testDiscoverRequiredPayloadsNmoIsAlsoFsi(self):
"""Test _DiscoverRequiredPayloads."""
paygen = self._GetPaygenBuildInstance()
output_uri = 'gs://foo'
self.mox.StubOutWithMock(paygen, '_DiscoverImages')
self.mox.StubOutWithMock(paygen, '_DiscoverTestImageArchives')
self.mox.StubOutWithMock(paygen, '_DiscoverNmoBuild')
self.mox.StubOutWithMock(paygen, '_DiscoverActiveFsiBuilds')
self.mox.StubOutWithMock(paygen_payload_lib, 'DefaultPayloadUri')
nmo_build = gspaths.Build(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.2.2')
fsi1_build = gspaths.Build(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.0.0')
fsi2_build = gspaths.Build(bucket='crt',
channel='foo-channel',
board='foo-board',
version='1.2.2')
fsi1_images = self._GetBuildImages(fsi1_build)
fsi1_test_image = self._GetBuildTestImage(fsi1_build)
fsi2_images = self._GetBuildImages(fsi2_build)
fsi2_test_image = self._GetBuildTestImage(fsi2_build)
paygen._DiscoverImages(paygen._build).AndReturn(self.images)
paygen._DiscoverTestImageArchives(paygen._build).AndReturn(
[self.test_image])
paygen._DiscoverActiveFsiBuilds().AndReturn([fsi1_build, fsi2_build])
paygen._DiscoverNmoBuild().AndReturn([nmo_build])
paygen._DiscoverImages(fsi1_build).AndReturn(fsi1_images)
paygen._DiscoverImages(fsi2_build).AndReturn(fsi2_images)
paygen._DiscoverTestImageArchives(fsi1_build).AndReturn([fsi1_test_image])
paygen._DiscoverTestImageArchives(fsi2_build).AndReturn([fsi2_test_image])
# Simplify the output URIs, so it's easy to check them below.
paygen_payload_lib.DefaultPayloadUri(
mox.IsA(gspaths.Payload), None).MultipleTimes().AndReturn(output_uri)
# Run the test verification.
self.mox.ReplayAll()
results = paygen._DiscoverRequiredPayloads()
expected = [gspaths.Payload(tgt_image=self.basic_image, uri=output_uri),
gspaths.Payload(tgt_image=self.npo_image, uri=output_uri),
gspaths.Payload(tgt_image=self.premp_image, uri=output_uri),
gspaths.Payload(tgt_image=self.premp_npo_image, uri=output_uri),
# NPO Deltas
gspaths.Payload(tgt_image=self.npo_image,
src_image=self.basic_image,
uri=output_uri),
gspaths.Payload(tgt_image=self.premp_npo_image,
src_image=self.premp_image,
uri=output_uri),
# FSI Deltas
gspaths.Payload(tgt_image=self.basic_image,
src_image=fsi1_images[0],
uri=output_uri),
gspaths.Payload(tgt_image=self.premp_image,
src_image=fsi1_images[1],
uri=output_uri),
gspaths.Payload(tgt_image=self.basic_image,
src_image=fsi2_images[0],
uri=output_uri),
gspaths.Payload(tgt_image=self.premp_image,
src_image=fsi2_images[1],
uri=output_uri),
# Test full payload.
gspaths.Payload(tgt_image=self.test_image,
uri=output_uri),
# Test NPO delta.
gspaths.Payload(tgt_image=self.test_image,
src_image=self.test_image,
uri=output_uri),
# Test FSI deltas.
gspaths.Payload(tgt_image=self.test_image,
src_image=fsi1_test_image,
uri=output_uri),
gspaths.Payload(tgt_image=self.test_image,
src_image=fsi2_test_image,
uri=output_uri)]
expected = zip(expected, itertools.repeat(False))
self.assertItemsEqual(sorted(results), sorted(expected))
def testFindFullTestPayloads(self):
paygen = self._GetPaygenBuildInstance()
self.mox.StubOutWithMock(urilib, 'ListFiles')
urilib.ListFiles(
'gs://crt/find_channel/foo-board/find_full_version/payloads/'
'chromeos_find_full_version_foo-board_find_channel_full_test.bin-*'
).AndReturn(['foo', 'foo.json', 'foo.log', 'bar'])
urilib.ListFiles(
'gs://crt/diff_channel/foo-board/find_full_version/payloads/'
'chromeos_find_full_version_foo-board_diff_channel_full_test.bin-*'
).AndReturn(['foo'])
# Run the test verification.
self.mox.ReplayAll()
# Call once and use mocked look up. Make sure we filter properly.
self.assertEqual(
paygen._FindFullTestPayloads('find_channel', 'find_full_version'),
['foo', 'bar'])
# Call with different channel, which does a different lookup.
self.assertEqual(
paygen._FindFullTestPayloads('diff_channel', 'find_full_version'),
['foo'])
# Call a second time to verify we get cached results (no lookup).
self.assertEqual(
paygen._FindFullTestPayloads('find_channel', 'find_full_version'),
['foo', 'bar'])
def DoGeneratePayloadsTest(self, run_parallel, test_dry_run):
"""Test paygen_build_lib._GeneratePayloads."""
paygen = paygen_build_lib._PaygenBuild(self.foo_build, self.tempdir,
dry_run=test_dry_run,
run_parallel=run_parallel)
basic_payload = gspaths.Payload(tgt_image=self.npo_image,
src_image=self.basic_image)
premp_payload = gspaths.Payload(tgt_image=self.premp_npo_image,
src_image=self.premp_image)
self.mox.StubOutWithMock(parallel, 'RunTasksInProcessPool')
self.mox.StubOutWithMock(paygen_build_lib, '_GenerateSinglePayload')
expected_payload_args = [
(basic_payload, mox.IsA(str), True, None, test_dry_run),
(premp_payload, mox.IsA(str), True, None, test_dry_run)
]
if run_parallel:
parallel.RunTasksInProcessPool(paygen_build_lib._GenerateSinglePayload,
expected_payload_args)
else:
paygen_build_lib._GenerateSinglePayload(basic_payload, mox.IsA(str),
True, None, test_dry_run)
paygen_build_lib._GenerateSinglePayload(premp_payload, mox.IsA(str),
True, None, test_dry_run)
# Run the test verification.
self.mox.ReplayAll()
paygen._GeneratePayloads((basic_payload, premp_payload), lock=None)
self.mox.UnsetStubs()
def testGeneratePayloads(self):
"""Test paygen_build_lib._GeneratePayloads, no dry_run."""
# Test every combination of the boolean arguments.
for run_parallel in (True, False):
for test_dry_run in (True, False):
self.DoGeneratePayloadsTest(run_parallel, test_dry_run)
def testGeneratePayloadInProcess(self):
"""Make sure the _GenerateSinglePayload calls into paygen_payload_lib."""
basic_payload = gspaths.Payload(tgt_image=self.npo_image,
src_image=self.basic_image)
self.mox.StubOutWithMock(paygen_payload_lib, 'CreateAndUploadPayload')
# Verify that we actually generate the payload.
paygen_payload_lib.CreateAndUploadPayload(
basic_payload,
mox.IsA(download_cache.DownloadCache),
work_dir=self.tempdir,
sign=False,
dry_run=True,
au_generator_uri='foo.zip')
# Run the test verification.
self.mox.ReplayAll()
paygen_build_lib._GenerateSinglePayload(basic_payload, self.tempdir,
False, 'foo.zip', True)
def testCleanupBuild(self):
"""Test _PaygenBuild._CleanupBuild."""
paygen = self._GetPaygenBuildInstance()
self.mox.StubOutWithMock(gslib, 'Remove')
gslib.Remove('gs://crt/foo-channel/foo-board/1.2.3/payloads/signing',
recurse=True, ignore_no_match=True)
self.mox.ReplayAll()
paygen._CleanupBuild()
def _CreatePayloadsSetup(self, skip_test_payloads=False, disable_tests=False):
"""Helper method for related CreatePayloads tests."""
paygen = self._GetPaygenBuildInstance(skip_test_payloads=skip_test_payloads,
disable_tests=disable_tests)
self.mox.StubOutWithMock(gslock, 'Lock')
self.mox.StubOutWithMock(gslib, 'CreateWithContents')
self.mox.StubOutWithMock(gslib, 'Exists')
self.mox.StubOutWithMock(gslib, 'Remove')
self.mox.StubOutWithMock(paygen, '_DiscoverRequiredPayloads')
self.mox.StubOutWithMock(paygen, '_MapToArchive')
self.mox.StubOutWithMock(paygen, '_GeneratePayloads')
self.mox.StubOutWithMock(paygen, '_AutotestPayloads')
self.mox.StubOutWithMock(paygen, '_CreatePayloadTests')
self.mox.StubOutWithMock(paygen, '_CleanupBuild')
return paygen
def testCreatePayloadsLockedBuild(self):
"""Test paygen_build_lib._GeneratePayloads if the build is locked."""
paygen = self._CreatePayloadsSetup()
lock_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.LOCK)
gslock.Lock(lock_uri, dry_run=False).AndRaise(gslock.LockNotAcquired())
# Run the test verification.
self.mox.ReplayAll()
with self.assertRaises(paygen_build_lib.BuildLocked):
paygen.CreatePayloads()
def testCreatePayloadsSkipBuild(self):
"""Test paygen_build_lib._GeneratePayloads if the build marked skip."""
paygen = self._CreatePayloadsSetup()
lock_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.LOCK)
skip_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.SKIP)
lock = self.mox.CreateMockAnything()
gslock.Lock(lock_uri, dry_run=False).AndReturn(lock)
lock.__enter__().AndReturn(lock)
gslib.Exists(skip_uri).AndReturn(True)
lock.__exit__(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None)
# Run the test verification.
self.mox.ReplayAll()
with self.assertRaises(paygen_build_lib.BuildSkip):
paygen.CreatePayloads()
def testCreatePayloadsFinishedBuild(self):
"""Test paygen_build_lib._GeneratePayloads if the build marked finished."""
paygen = self._CreatePayloadsSetup()
lock_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.LOCK)
skip_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.SKIP)
finished_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.FINISHED)
lock = self.mox.CreateMockAnything()
gslock.Lock(lock_uri, dry_run=False).AndReturn(lock)
lock.__enter__().AndReturn(lock)
gslib.Exists(skip_uri).AndReturn(False)
gslib.Exists(finished_uri).AndReturn(True)
lock.__exit__(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None)
# Run the test verification.
self.mox.ReplayAll()
with self.assertRaises(paygen_build_lib.BuildFinished):
paygen.CreatePayloads()
def testCreatePayloadsBuildNotReady(self):
"""Test paygen_build_lib._GeneratePayloads if not all images are there."""
paygen = self._CreatePayloadsSetup()
lock_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.LOCK)
skip_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.SKIP)
finished_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.FINISHED)
lock = self.mox.CreateMockAnything()
gslock.Lock(lock_uri, dry_run=False).AndReturn(lock)
lock.__enter__().AndReturn(lock)
gslib.Exists(skip_uri).AndReturn(False)
gslib.Exists(finished_uri).AndReturn(False)
paygen._DiscoverRequiredPayloads(
).AndRaise(paygen_build_lib.BuildNotReady())
lock.__exit__(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None)
# Run the test verification.
self.mox.ReplayAll()
with self.assertRaises(paygen_build_lib.BuildNotReady):
paygen.CreatePayloads()
def testCreatePayloadsCreateFailed(self):
"""Test paygen_build_lib._GeneratePayloads if payload generation failed."""
paygen = self._CreatePayloadsSetup()
lock_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.LOCK)
skip_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.SKIP)
finished_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.FINISHED)
lock = self.mox.CreateMockAnything()
payload = 'foo'
payload_list = [payload]
payload_skip_list = [(payload, False)]
mock_exception = Exception()
gslock.Lock(lock_uri, dry_run=False).AndReturn(lock)
lock.__enter__().AndReturn(lock)
gslib.Exists(skip_uri).AndReturn(False)
gslib.Exists(finished_uri).AndReturn(False)
paygen._DiscoverRequiredPayloads(
).AndReturn(payload_skip_list)
self.mox.StubOutWithMock(paygen_payload_lib, 'FindExistingPayloads')
paygen_payload_lib.FindExistingPayloads(payload).AndReturn([])
paygen._GeneratePayloads(payload_list, lock).AndRaise(mock_exception)
lock.__exit__(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None)
# Run the test verification.
self.mox.ReplayAll()
with self.assertRaises(Exception):
paygen.CreatePayloads()
def testCreatePayloadsSuccess(self):
"""Test paygen_build_lib._GeneratePayloads success."""
paygen = self._CreatePayloadsSetup()
lock_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.LOCK)
skip_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.SKIP)
finished_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.FINISHED)
lock = self.mox.CreateMockAnything()
payload = 'foo'
payload_list = [payload]
payload_skip_list = [(payload, False)]
gslock.Lock(lock_uri, dry_run=False).AndReturn(lock)
lock.__enter__().AndReturn(lock)
gslib.Exists(skip_uri).AndReturn(False)
gslib.Exists(finished_uri).AndReturn(False)
paygen._DiscoverRequiredPayloads(
).AndReturn(payload_skip_list)
self.mox.StubOutWithMock(paygen_payload_lib, 'FindExistingPayloads')
paygen_payload_lib.FindExistingPayloads(payload).AndReturn([])
paygen._GeneratePayloads(payload_list, lock)
paygen._MapToArchive('foo-board', '1.2.3').AndReturn(
('archive_board', 'archive_build', 'archive_build_uri'))
paygen._CreatePayloadTests(['foo']).AndReturn(['Test Payloads'])
paygen._AutotestPayloads(['Test Payloads'])
paygen._CleanupBuild()
gslib.CreateWithContents(finished_uri, mox.IgnoreArg())
lock.__exit__(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None)
# Run the test verification.
self.mox.ReplayAll()
paygen.CreatePayloads()
def testCreatePayloadsAlreadyExists(self):
"""Test paygen_build_lib._GeneratePayloads success."""
paygen = self._CreatePayloadsSetup()
lock_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.LOCK)
skip_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.SKIP)
finished_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.FINISHED)
lock = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(paygen_payload_lib, 'FindExistingPayloads')
self.mox.StubOutWithMock(paygen_payload_lib, 'SetPayloadUri')
payload_existing = 'foo'
payload_new = 'bar'
payload_list = [(payload_existing, False), (payload_new, False)]
gslock.Lock(lock_uri, dry_run=False).AndReturn(lock)
lock.__enter__().AndReturn(lock)
gslib.Exists(skip_uri).AndReturn(False)
gslib.Exists(finished_uri).AndReturn(False)
paygen._DiscoverRequiredPayloads(
).AndReturn(payload_list)
paygen_payload_lib.FindExistingPayloads(payload_existing).AndReturn(
[payload_existing])
paygen_payload_lib.FindExistingPayloads(payload_new).AndReturn([])
paygen_payload_lib.SetPayloadUri(payload_existing, payload_existing)
paygen._GeneratePayloads([payload_new], lock)
paygen._MapToArchive('foo-board', '1.2.3').AndReturn(
('archive_board', 'archive_build', 'archive_build_uri'))
paygen._CreatePayloadTests(['foo', 'bar']).AndReturn(['Test Payloads'])
paygen._AutotestPayloads(['Test Payloads'])
gslib.CreateWithContents(finished_uri, mox.IgnoreArg())
paygen._CleanupBuild()
lock.__exit__(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None)
# Run the test verification.
self.mox.ReplayAll()
paygen.CreatePayloads()
def testCreatePayloadsSkipTests(self):
"""Test paygen_build_lib._GeneratePayloads success."""
paygen = self._CreatePayloadsSetup(skip_test_payloads=True,
disable_tests=True)
lock_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.LOCK)
skip_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.SKIP)
finished_uri = paygen._GetFlagURI(gspaths.ChromeosReleases.FINISHED)
lock = self.mox.CreateMockAnything()
payload = 'foo'
payload_list = [payload]
payload_skip_list = [(payload, False)]
gslock.Lock(lock_uri, dry_run=False).AndReturn(lock)
lock.__enter__().AndReturn(lock)
gslib.Exists(skip_uri).AndReturn(False)
gslib.Exists(finished_uri).AndReturn(False)
paygen._DiscoverRequiredPayloads(
).AndReturn(payload_skip_list)
self.mox.StubOutWithMock(paygen_payload_lib, 'FindExistingPayloads')
paygen_payload_lib.FindExistingPayloads(payload).AndReturn([])
paygen._GeneratePayloads(payload_list, lock)
paygen._CleanupBuild()
gslib.CreateWithContents(finished_uri, mox.IgnoreArg())
lock.__exit__(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None)
# Run the test verification.
self.mox.ReplayAll()
paygen.CreatePayloads()
def testFindControlFileDir(self):
"""Test that we find control files in the proper directory."""
# Test default dir in /tmp.
result = paygen_build_lib._FindControlFileDir(None)
self.assertTrue(os.path.isdir(result))
tempdir = tempfile.tempdir or '/tmp'
self.assertTrue(result.startswith(tempdir + '/'))
shutil.rmtree(result)
# Test in specified dir.
result = paygen_build_lib._FindControlFileDir(self.tempdir)
self.assertTrue(os.path.isdir(result))
self.assertTrue(result.startswith(
os.path.join(self.tempdir, 'paygen_build-control_files')))
@unittest.skipIf(not paygen_build_lib.config,
'Internal crostools repository needed.')
@unittest.skipIf(not paygen_build_lib.test_control,
'Autotest repository needed.')
def testEmitControlFile(self):
"""Test that we emit control files correctly."""
payload = gspaths.Payload(tgt_image=self.npo_image,
src_image=self.basic_image)
suite_name = 'paygen_foo'
control_dir = tempfile.mkdtemp(prefix='control_dir-')
paygen = paygen_build_lib._PaygenBuild(
self.foo_build, self.tempdir, control_dir=control_dir)
with tempfile.NamedTemporaryFile(prefix='control_file-', delete=False) as f:
control_file_name = f.name
f.write("""
AUTHOR = "Chromium OS"
NAME = "autoupdate_EndToEndTest"
TIME = "MEDIUM"
TEST_CATEGORY = "Functional"
TEST_CLASS = "platform"
TEST_TYPE = "server"
DOC = "Faux doc"
""")
self.mox.StubOutWithMock(cbuildbot_config, 'FindFullConfigsForBoard')
cbuildbot_config.FindFullConfigsForBoard().AndReturn(
([{'boards': ['foo_board']}], []))
self.mox.StubOutWithMock(urilib, 'ListFiles')
urilib.ListFiles(
gspaths.ChromeosReleases.PayloadUri(
self.basic_image.channel, self.basic_image.board,
self.basic_image.version,
'*', bucket=self.basic_image.bucket)).AndReturn(
['gs://foo/bar.tar.bz2'])
urilib.ListFiles(
gspaths.ChromeosImageArchive.BuildUri(
'foo_board', '*', self.basic_image.version)).AndReturn(
['gs://foo-archive/src-build'])
self.mox.StubOutWithMock(
paygen_build_lib.test_control, 'get_control_file_name')
paygen_build_lib.test_control.get_control_file_name().AndReturn(
control_file_name)
self.mox.ReplayAll()
payload_test = paygen_build_lib._PaygenBuild.PayloadTest(payload)
paygen._EmitControlFile(payload_test, suite_name, control_dir)
shutil.rmtree(control_dir)
os.remove(control_file_name)
def testAutotestPayloads(self):
"""Test the process of scheduling HWLab tests."""
control_dir = '/tmp/control_dir'
paygen = paygen_build_lib._PaygenBuild(
self.foo_build, self.tempdir, control_dir=control_dir)
control_dump_dir = os.path.join(control_dir, paygen.CONTROL_FILE_SUBDIR)
payloads = ['foo', 'bar']
test_channel = self.foo_build.channel.rpartition('-')[0]
suite_name = paygen.PAYGEN_AU_SUITE_TEMPLATE % test_channel
tarball_name = paygen.CONTROL_TARBALL_TEMPLATE % test_channel
tarball_path = os.path.join(control_dir, tarball_name)
test_archive_build = '%s-release/R99-%s' % (self.foo_build.board,
self.foo_build.version)
test_archive_build_uri = ('gs://chromeos-image-archive/%s' %
test_archive_build)
test_upload_path = os.path.join(test_archive_build_uri, tarball_name)
self.mox.StubOutWithMock(os, 'makedirs')
os.makedirs(os.path.join(control_dir, paygen.CONTROL_FILE_SUBDIR))
self.mox.StubOutWithMock(paygen, '_EmitControlFile')
paygen._EmitControlFile('foo', suite_name, control_dump_dir)
paygen._EmitControlFile('bar', suite_name, control_dump_dir)
self.mox.StubOutWithMock(cros_build_lib, 'CreateTarball')
cros_build_lib.CreateTarball(
tarball_path, control_dir,
compression=cros_build_lib.COMP_BZIP2,
inputs=[paygen.CONTROL_FILE_SUBDIR]).AndReturn(
cros_build_lib.CommandResult(returncode=0))
# Setup preliminary values needed for running autotests.
paygen._archive_board = self.foo_build.board
paygen._archive_build = test_archive_build
paygen._archive_build_uri = test_archive_build_uri
self.mox.StubOutWithMock(gslib, 'Copy')
gslib.Copy(tarball_path, test_upload_path, acl='public-read')
# Both utils and cros_build_lib versions of RunCommand exist. For now, stub
# them both out just to be safe (don't want unit tests running actual
# commands).
# TODO(garnold) remove the dryrun argument.
self.mox.StubOutWithMock(utils, 'RunCommand')
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
timeout_mins = cbuildbot_config.HWTestConfig.DEFAULT_HW_TEST_TIMEOUT / 60
expected_command = [
mox.StrContains('site_utils/run_suite.py'),
'--board', 'foo-board',
'--build', 'foo-board-release/R99-1.2.3',
'--suite_name', 'paygen_au_foo',
'--file_bugs', 'True',
'--pool', 'bvt',
'--retry', 'True',
'--timeout_mins', str(timeout_mins),
'--no_wait', 'False',
'--suite_min_duts', '2']
job_id_output = '''
Autotest instance: cautotest
02-23-2015 [06:26:51] Submitted create_suite_job rpc
02-23-2015 [06:26:53] Created suite job: http://cautotest.corp.google.com/afe/#tab_id=view_job&object_id=26960110
@@@STEP_LINK@Suite created@http://cautotest.corp.google.com/afe/#tab_id=view_job&object_id=26960110@@@
The suite job has another 3:09:50.012887 till timeout.
The suite job has another 2:39:39.789250 till timeout.
'''
cros_build_lib.RunCommand(
expected_command + ['-c'], capture_output=True,
combine_stdout_stderr=True).AndReturn(
utils.CommandResult(returncode=0, output=job_id_output))
cros_build_lib.RunCommand(
expected_command + ['-m', '26960110']).AndReturn(utils.CommandResult(
returncode=0,
output=job_id_output))
self.mox.ReplayAll()
paygen._AutotestPayloads(payloads)
def testScheduleAutotestTestsNormal(self):
"""Test scheduling autotest tests with run_suite.py."""
paygen = paygen_build_lib._PaygenBuild(
self.foo_build, self.tempdir)
self.mox.StubOutWithMock(commands, 'RunHWTestSuite')
self.mox.StubOutWithMock(utils, 'RunCommand')
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
timeout_mins = cbuildbot_config.HWTestConfig.DEFAULT_HW_TEST_TIMEOUT / 60
expected_command = [
mox.StrContains('site_utils/run_suite.py'),
'--board', 'foo-board',
'--build', 'foo-board-release/R99-1.2.3',
'--suite_name', 'paygen_au_foo',
'--file_bugs', 'True',
'--pool', 'bvt',
'--retry', 'True',
'--timeout_mins', str(timeout_mins),
'--no_wait', 'False',
'--suite_min_duts', '2']
cros_build_lib.RunCommand(
expected_command + ['-c'], capture_output=True,
combine_stdout_stderr=True).AndReturn(
utils.CommandResult(returncode=0, output=''))
self.mox.ReplayAll()
# Setup preliminary values needed for scheduling autotests.
paygen._archive_board = 'foo-board'
paygen._archive_build = 'foo-board-release/R99-1.2.3'
paygen._ScheduleAutotestTests('paygen_au_foo')
def testScheduleAutotestTestsBuilderEnvironment(self):
"""Test scheduling autotest tests with build autotest proxy."""
paygen = paygen_build_lib._PaygenBuild(
self.foo_build, self.tempdir, run_on_builder=True)
self.mox.StubOutWithMock(commands, 'RunHWTestSuite')
self.mox.StubOutWithMock(utils, 'RunCommand')
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
timeout_mins = cbuildbot_config.HWTestConfig.DEFAULT_HW_TEST_TIMEOUT / 60
paygen_build_lib.commands.RunHWTestSuite(
board='foo-board', build='foo-board-release/R99-1.2.3', file_bugs=True,
pool='bvt', priority=constants.HWTEST_BUILD_PRIORITY,
suite='paygen_au_foo', timeout_mins=timeout_mins,
retry=True, wait_for_results=True, suite_min_duts=2, debug=False)
self.mox.ReplayAll()
# Setup preliminary values needed for scheduling autotests.
paygen._archive_board = 'foo-board'
paygen._archive_build = 'foo-board-release/R99-1.2.3'
paygen._ScheduleAutotestTests('paygen_au_foo')
def testScheduleAutotestTestsBuilderEnvironmentWarn(self):
"""Test scheduling autotest tests with build autotest proxy."""
paygen = paygen_build_lib._PaygenBuild(
self.foo_build, self.tempdir, run_on_builder=True)
self.mox.StubOutWithMock(commands, 'RunHWTestSuite')
self.mox.StubOutWithMock(utils, 'RunCommand')
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
timeout_mins = cbuildbot_config.HWTestConfig.DEFAULT_HW_TEST_TIMEOUT / 60
paygen_build_lib.commands.RunHWTestSuite(
board='foo-board', build='foo-board-release/R99-1.2.3', file_bugs=True,
pool='bvt', priority=constants.HWTEST_BUILD_PRIORITY,
suite='paygen_au_foo', timeout_mins=timeout_mins,
retry=True, wait_for_results=True, suite_min_duts=2,
debug=False).AndRaise(
failures_lib.TestWarning('** Suite passed with a warning code **'))
self.mox.ReplayAll()
# Setup preliminary values needed for scheduling autotests.
paygen._archive_board = 'foo-board'
paygen._archive_build = 'foo-board-release/R99-1.2.3'
paygen._ScheduleAutotestTests('paygen_au_foo')
def testMapToArchive(self):
"""Test that mapping to images archive names/locations works."""
self.mox.StubOutWithMock(cbuildbot_config, 'FindFullConfigsForBoard')
cbuildbot_config.FindFullConfigsForBoard().MultipleTimes().AndReturn(
([{'boards': ['foo_board', 'bar_board', 'bar-board']}], []))
self.mox.StubOutWithMock(urilib, 'ListFiles')
urilib.ListFiles(
gspaths.ChromeosImageArchive.BuildUri(
'foo_board', '*', '1.2.3')).AndReturn(
['gs://foo-archive/foo_board/R11-1.2.3/somefile'])
self.mox.ReplayAll()
# Case 1: mapping successful.
self.assertEqual(
paygen_build_lib._PaygenBuild._MapToArchive('foo-board', '1.2.3'),
('foo_board', 'foo_board/R11-1.2.3',
'gs://foo-archive/foo_board/R11-1.2.3'))
# Case 2: failure, too many build board names found.
with self.assertRaises(paygen_build_lib.ArchiveError):
paygen_build_lib._PaygenBuild._MapToArchive('bar-board', '1.2.3')
# Case 3: failure, build board name not found.
with self.assertRaises(paygen_build_lib.ArchiveError):
paygen_build_lib._PaygenBuild._MapToArchive('baz-board', '1.2.3')
def testValidateBoardConfig(self):
"""Test ValidateBoardConfig."""
# If we are running on an external builder, we can't see the config.
# Without the config, we can't validate.
if not paygen_build_lib.config:
return
# Test a known board works.
paygen_build_lib.ValidateBoardConfig('x86-mario')
# Test an unknown board doesn't.
self.assertRaises(paygen_build_lib.BoardNotConfigured,
paygen_build_lib.ValidateBoardConfig, 'goofy-board')
| bsd-3-clause |
fnouama/intellij-community | python/lib/Lib/site-packages/django/contrib/contenttypes/models.py | 307 | 4052 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
class ContentTypeManager(models.Manager):
# Cache to avoid re-looking up ContentType objects all over the place.
# This cache is shared by all the get_for_* methods.
_cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self.__class__._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
return ct
def get_for_model(self, model):
"""
Returns the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = model._meta
while opts.proxy:
model = opts.proxy_for_model
opts = model._meta
key = (opts.app_label, opts.object_name.lower())
try:
ct = self.__class__._cache[self.db][key]
except KeyError:
# Load or create the ContentType entry. The smart_unicode() is
# needed around opts.verbose_name_raw because name_raw might be a
# django.utils.functional.__proxy__ object.
ct, created = self.get_or_create(
app_label = opts.app_label,
model = opts.object_name.lower(),
defaults = {'name': smart_unicode(opts.verbose_name_raw)},
)
self._add_to_cache(self.db, ct)
return ct
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Uses the same shared cache as get_for_model
(though ContentTypes are obviously not created on-the-fly by get_by_id).
"""
try:
ct = self.__class__._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache. This needs to happen during database
flushes to prevent caching of "stale" content type IDs (see
django.contrib.contenttypes.management.update_contenttypes for where
this gets called).
"""
self.__class__._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
model = ct.model_class()
key = (model._meta.app_label, model._meta.object_name.lower())
self.__class__._cache.setdefault(using, {})[key] = ct
self.__class__._cache.setdefault(using, {})[ct.id] = ct
class ContentType(models.Model):
name = models.CharField(max_length=100)
app_label = models.CharField(max_length=100)
model = models.CharField(_('python model class name'), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
db_table = 'django_content_type'
ordering = ('name',)
unique_together = (('app_label', 'model'),)
def __unicode__(self):
return self.name
def model_class(self):
"Returns the Python model class for this type of content."
from django.db import models
return models.get_model(self.app_label, self.model)
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._default_manager.using(self._state.db).get(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
| apache-2.0 |
brandonium21/snowflake | snowflakeEnv/lib/python2.7/site-packages/mako/cache.py | 60 | 7730 | # mako/cache.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import compat, util
_cache_plugins = util.PluginLoader("mako.cache")
register_plugin = _cache_plugins.register
register_plugin("beaker", "mako.ext.beaker_cache", "BeakerCacheImpl")
class Cache(object):
"""Represents a data content cache made available to the module
space of a specific :class:`.Template` object.
.. versionadded:: 0.6
:class:`.Cache` by itself is mostly a
container for a :class:`.CacheImpl` object, which implements
a fixed API to provide caching services; specific subclasses exist to
implement different
caching strategies. Mako includes a backend that works with
the Beaker caching system. Beaker itself then supports
a number of backends (i.e. file, memory, memcached, etc.)
The construction of a :class:`.Cache` is part of the mechanics
of a :class:`.Template`, and programmatic access to this
cache is typically via the :attr:`.Template.cache` attribute.
"""
impl = None
"""Provide the :class:`.CacheImpl` in use by this :class:`.Cache`.
This accessor allows a :class:`.CacheImpl` with additional
methods beyond that of :class:`.Cache` to be used programmatically.
"""
id = None
"""Return the 'id' that identifies this cache.
This is a value that should be globally unique to the
:class:`.Template` associated with this cache, and can
be used by a caching system to name a local container
for data specific to this template.
"""
starttime = None
"""Epochal time value for when the owning :class:`.Template` was
first compiled.
A cache implementation may wish to invalidate data earlier than
this timestamp; this has the effect of the cache for a specific
:class:`.Template` starting clean any time the :class:`.Template`
is recompiled, such as when the original template file changed on
the filesystem.
"""
def __init__(self, template, *args):
# check for a stale template calling the
# constructor
if isinstance(template, compat.string_types) and args:
return
self.template = template
self.id = template.module.__name__
self.starttime = template.module._modified_time
self._def_regions = {}
self.impl = self._load_impl(self.template.cache_impl)
def _load_impl(self, name):
return _cache_plugins.load(name)(self)
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
return self._ctx_get_or_create(key, creation_function, None, **kw)
def _ctx_get_or_create(self, key, creation_function, context, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
if not self.template.cache_enabled:
return creation_function()
return self.impl.get_or_create(
key,
creation_function,
**self._get_cache_kw(kw, context))
def set(self, key, value, **kw):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
self.impl.set(key, value, **self._get_cache_kw(kw, None))
put = set
"""A synonym for :meth:`.Cache.set`.
This is here for backwards compatibility.
"""
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
return self.impl.get(key, **self._get_cache_kw(kw, None))
def invalidate(self, key, **kw):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
self.impl.invalidate(key, **self._get_cache_kw(kw, None))
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this
template.
"""
self.invalidate('render_body', __M_defname='render_body')
def invalidate_def(self, name):
"""Invalidate the cached content of a particular ``<%def>`` within this
template.
"""
self.invalidate('render_%s' % name, __M_defname='render_%s' % name)
def invalidate_closure(self, name):
"""Invalidate a nested ``<%def>`` within this template.
Caching of nested defs is a blunt tool as there is no
management of scope -- nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, __M_defname=name)
def _get_cache_kw(self, kw, context):
defname = kw.pop('__M_defname', None)
if not defname:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
elif defname in self._def_regions:
tmpl_kw = self._def_regions[defname]
else:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
self._def_regions[defname] = tmpl_kw
if context and self.impl.pass_context:
tmpl_kw = tmpl_kw.copy()
tmpl_kw.setdefault('context', context)
return tmpl_kw
class CacheImpl(object):
"""Provide a cache implementation for use by :class:`.Cache`."""
def __init__(self, cache):
self.cache = cache
pass_context = False
"""If ``True``, the :class:`.Context` will be passed to
:meth:`get_or_create <.CacheImpl.get_or_create>` as the name ``'context'``.
"""
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value.
This function *must* return a value, either from
the cache, or via the given creation function.
If the creation function is called, the newly
created value should be populated into the cache
under the given key before being returned.
:param key: the value's key.
:param creation_function: function that when called generates
a new value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def set(self, key, value, **kw):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def invalidate(self, key, **kw):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
| bsd-2-clause |
zamont/core-setup | tools-local/setuptools/dotnet-deb-tool/tool/scripts/manpage_generator.py | 32 | 9419 | #!/usr/bin/python
#
# Copyright (c) .NET Foundation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
#
# manpage_generator
# Converts top level docs.json format command info to
# nroff manpage format. Done in python for easy json parsing.
#
# Usage: argv[1] = path to docs.json; argv[2] = output path
import sys
import os
import json
import datetime
SECTION_SEPARATOR = "\n.P \n"
MANPAGE_EXTENSION = ".1"
# For now this is a magic number
# See https://www.debian.org/doc/manuals/maint-guide/dother.en.html#manpage
SECTION_NUMBER = 1
def generate_man_pages(doc_path, output_dir):
with open(doc_path) as doc_file:
doc_json = None
try:
doc_json = json.load(doc_file)
except:
raise Exception("Failed to load json file. Check formatting.")
tools = doc_json.get("tools", None)
if tools is None:
raise Exception("No tool sections in doc.json")
for tool_name in tools:
tool_data = tools[tool_name]
man_page_content = generate_man_page(tool_name, tool_data)
man_page_path = get_output_path(tool_name, output_dir)
write_man_page(man_page_path, man_page_content)
def get_output_path(toolname, output_dir):
out_filename = toolname + MANPAGE_EXTENSION
return os.path.join(output_dir, out_filename)
def write_man_page(path, content):
with open(path, 'w') as man_file:
man_file.write(content)
#Build Fails without a final newline
man_file.write('\n')
def generate_man_page(tool_name, tool_data):
sections = [
generate_header_section(tool_name, tool_data),
generate_name_section(tool_name, tool_data),
generate_synopsis_section(tool_name, tool_data),
generate_description_section(tool_name, tool_data),
generate_options_section(tool_name, tool_data),
generate_author_section(tool_name, tool_data),
generate_copyright_section(tool_name, tool_data)
]
return SECTION_SEPARATOR.join(sections)
def generate_header_section(tool_name, tool_data):#
roff_text_builder = []
header_format = ".TH {program_name} {section_number} {center_footer} {left_footer} {center_header}"
today = datetime.date.today()
today_string = today.strftime("%B %d, %Y")
format_args = {
"program_name" : tool_name,
"section_number" : SECTION_NUMBER,
"center_footer" : "", # Omitted
"left_footer" : "", # Omitted
"center_header" : "" # Omitted
}
roff_text_builder.append(header_format.format(**format_args))
return SECTION_SEPARATOR.join(roff_text_builder)
def generate_name_section(tool_name, tool_data):#
roff_text_builder = []
roff_text_builder.append(".SH NAME")
tool_short_description = tool_data.get("short_description", "")
name_format = ".B {program_name} - {short_description}"
name_format_args = {
"program_name": tool_name,
"short_description" : tool_short_description
}
roff_text_builder.append(name_format.format(**name_format_args))
return SECTION_SEPARATOR.join(roff_text_builder)
def generate_synopsis_section(tool_name, tool_data):#
roff_text_builder = []
roff_text_builder.append(".SH SYNOPSIS")
synopsis_format = '.B {program_name} {command_name} \n.RI {options} " "\n.I "{argument_list_name}"'
tool_commands = tool_data.get("commands", [])
for command_name in tool_commands:
command_data = tool_commands[command_name]
# Default options to empty list so the loop doesn't blow up
options = command_data.get("options", [])
argument_list = command_data.get("argumentlist", None)
# Construct Option Strings
option_string_list = []
argument_list_name = ""
for option_name in options:
option_data = options[option_name]
specifier_short = option_data.get("short", None)
specifier_long = option_data.get("long", None)
parameter = option_data.get("parameter", None)
option_string = _option_string_helper(specifier_short, specifier_long, parameter)
option_string_list.append(option_string)
# Populate Argument List Name
if argument_list:
argument_list_name = argument_list.get("name", "")
cmd_format_args = {
'program_name' : tool_name,
'command_name' : command_name,
'options' : '" "'.join(option_string_list),
'argument_list_name' : argument_list_name
}
cmd_string = synopsis_format.format(**cmd_format_args)
roff_text_builder.append(cmd_string)
return SECTION_SEPARATOR.join(roff_text_builder)
def generate_description_section(tool_name, tool_data):#
roff_text_builder = []
roff_text_builder.append(".SH DESCRIPTION")
# Tool Description
long_description = tool_data.get("long_description", "")
roff_text_builder.append(".PP {0}".format(long_description))
# Command Descriptions
cmd_description_format = ".B {program_name} {command_name}\n{command_description}"
tool_commands = tool_data.get("commands", [])
for command_name in tool_commands:
command_data = tool_commands[command_name]
command_description = command_data.get("description", "")
format_args = {
"program_name" : tool_name,
"command_name" : command_name,
"command_description" : command_description
}
cmd_string = cmd_description_format.format(**format_args)
roff_text_builder.append(cmd_string)
return SECTION_SEPARATOR.join(roff_text_builder)
def generate_options_section(tool_name, tool_data):#
roff_text_builder = []
roff_text_builder.append(".SH OPTIONS")
options_format = '.TP\n.B {option_specifiers}\n{option_description}'
tool_commands = tool_data.get("commands", [])
for command_name in tool_commands:
command_data = tool_commands[command_name]
# Default to empty list so the loop doesn't blow up
options = command_data.get("options", [])
for option_name in options:
option_data = options[option_name]
specifier_short = option_data.get("short", None)
specifier_long = option_data.get("long", None)
parameter = option_data.get("parameter", None)
description = option_data.get("description", "")
option_specifiers_string = _option_string_helper(specifier_short,
specifier_long,
parameter,
include_brackets = False,
delimiter=' ", " ')
format_args = {
"option_specifiers": option_specifiers_string,
"option_description" : description
}
roff_text_builder.append(options_format.format(**format_args))
return SECTION_SEPARATOR.join(roff_text_builder)
def generate_author_section(tool_name, tool_data):#
roff_text_builder = []
roff_text_builder.append(".SH AUTHOR")
author_format = '.B "{author_name}" " " \n.RI ( "{author_email}" )'
author_name = tool_data.get("author", "")
author_email = tool_data.get("author_email", "")
format_args = {
"author_name" : author_name,
"author_email" : author_email
}
roff_text_builder.append(author_format.format(**format_args))
return SECTION_SEPARATOR.join(roff_text_builder)
def generate_copyright_section(tool_name, tool_data):#
roff_text_builder = []
roff_text_builder.append(".SH COPYRIGHT")
copyright_data = tool_data.get("copyright")
roff_text_builder.append('.B "{0}"'.format(copyright_data))
return SECTION_SEPARATOR.join(roff_text_builder)
def _option_string_helper(specifier_short, specifier_long, parameter, include_brackets = True, delimiter = " | "):
option_string = ""
if include_brackets:
option_string = " [ "
if specifier_short:
option_string += ' "{0}" '.format(specifier_short)
if specifier_short and specifier_long:
option_string += delimiter
if specifier_long:
option_string += ' "{0}" '.format(specifier_long)
if parameter:
option_string += ' " " '
option_string += ' "{0}" '.format(parameter)
if include_brackets:
option_string += " ] "
return option_string
def print_usage():
print "Usage: argv[1] = path to docs.json; argv[2] = output path"
print "Example: manpage_generator.py ../docs.json ./dotnet-1.0/debian"
def parse_args():
doc_path = sys.argv[1]
output_dir = sys.argv[2]
return (doc_path, output_dir)
def validate_args(doc_path, output_dir):
if not os.path.isfile(doc_path):
raise Exception("Docs.json path is not valid.")
if not os.path.isdir(output_dir):
raise Exception("Output Directory Path is not valid.")
def execute_command_line():
try:
doc_path, output_dir = parse_args()
validate_args(doc_path, output_dir)
generate_man_pages(doc_path, output_dir)
except Exception as exc:
print "Error: ", exc
print_usage()
if __name__ == "__main__":
execute_command_line()
| mit |
cg31/tensorflow | tensorflow/examples/learn/text_classification.py | 6 | 4438 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(x, y):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(y, 15, 1, 0)
word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
features = tf.reduce_max(word_vectors, reduction_indices=1)
prediction, loss = learn.models.logistic_regression(features, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
def rnn_model(x, y):
"""Recurrent neural network model to predict from sequence of words
to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unpack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(y, 15, 1, 0)
prediction, loss = learn.models.logistic_regression(encoding, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = learn.Estimator(model_fn=bag_of_words_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
FLAGS = parser.parse_args()
tf.app.run()
| apache-2.0 |
jiwang576/incubator-airflow | dags/test_dag.py | 14 | 1336 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import utils
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime, timedelta
now = datetime.now()
now_to_the_hour = (now - timedelta(0, 0, 0, 0, 0, 3)).replace(minute=0, second=0, microsecond=0)
START_DATE = now_to_the_hour
DAG_NAME = 'test_dag_v1'
default_args = {
'owner': 'airflow',
'depends_on_past': True,
'start_date': utils.dates.days_ago(2)
}
dag = DAG(DAG_NAME, schedule_interval='*/10 * * * *', default_args=default_args)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag)
run_this_3.set_upstream(run_this_2)
| apache-2.0 |
GREO/gnuradio-git | gnuradio-examples/python/usrp/usrp_am_mw_rcv.py | 9 | 13134 | #!/usr/bin/env python
#
# Copyright 2005,2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru, eng_notation, optfir
from gnuradio import audio
from gnuradio import usrp
from gnuradio import blks2
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import slider, powermate
from gnuradio.wxgui import stdgui2, fftsink2, form
from optparse import OptionParser
from usrpm import usrp_dbid
import sys
import math
import wx
def pick_subdevice(u):
"""
The user didn't specify a subdevice on the command line.
Try for one of these, in order: BASIC_RX,TV_RX, BASIC_RX, whatever is on side A.
@return a subdev_spec
"""
return usrp.pick_subdev(u, (usrp_dbid.BASIC_RX,
usrp_dbid.LF_RX,
usrp_dbid.TV_RX,
usrp_dbid.TV_RX_REV_2,
usrp_dbid.TV_RX_REV_3,
usrp_dbid.TV_RX_MIMO,
usrp_dbid.TV_RX_REV_2_MIMO,
usrp_dbid.TV_RX_REV_3_MIMO))
class wfm_rx_block (stdgui2.std_top_block):
def __init__(self,frame,panel,vbox,argv):
stdgui2.std_top_block.__init__ (self,frame,panel,vbox,argv)
parser=OptionParser(option_class=eng_option)
parser.add_option("-R", "--rx-subdev-spec", type="subdev", default=None,
help="select USRP Rx side A or B (default=A)")
parser.add_option("-f", "--freq", type="eng_float", default=1008.0e3,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-I", "--use-if-freq", action="store_true", default=False,
help="use intermediate freq (compensates DC problems in quadrature boards)" )
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is maximum)")
parser.add_option("-V", "--volume", type="eng_float", default=None,
help="set volume (default is midpoint)")
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm device name. E.g., hw:0,0 or surround51 or /dev/dsp")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
self.frame = frame
self.panel = panel
self.use_IF=options.use_if_freq
if self.use_IF:
self.IF_freq=64000.0
else:
self.IF_freq=0.0
self.vol = 0
self.state = "FREQ"
self.freq = 0
# build graph
#TODO: add an AGC after the channel filter and before the AM_demod
self.u = usrp.source_c() # usrp is data source
adc_rate = self.u.adc_rate() # 64 MS/s
usrp_decim = 250
self.u.set_decim_rate(usrp_decim)
usrp_rate = adc_rate / usrp_decim # 256 kS/s
chanfilt_decim = 4
demod_rate = usrp_rate / chanfilt_decim # 64 kHz
audio_decimation = 2
audio_rate = demod_rate / audio_decimation # 32 kHz
if options.rx_subdev_spec is None:
options.rx_subdev_spec = pick_subdevice(self.u)
self.u.set_mux(usrp.determine_rx_mux_value(self.u, options.rx_subdev_spec))
self.subdev = usrp.selected_subdev(self.u, options.rx_subdev_spec)
print "Using RX d'board %s" % (self.subdev.side_and_name(),)
chan_filt_coeffs = optfir.low_pass (1, # gain
usrp_rate, # sampling rate
8e3, # passband cutoff
12e3, # stopband cutoff
1.0, # passband ripple
60) # stopband attenuation
#print len(chan_filt_coeffs)
self.chan_filt = gr.fir_filter_ccf (chanfilt_decim, chan_filt_coeffs)
if self.use_IF:
# Turn If to baseband and filter.
self.chan_filt = gr.freq_xlating_fir_filter_ccf (chanfilt_decim, chan_filt_coeffs, self.IF_freq, usrp_rate)
else:
self.chan_filt = gr.fir_filter_ccf (chanfilt_decim, chan_filt_coeffs)
self.am_demod = gr.complex_to_mag()
self.volume_control = gr.multiply_const_ff(self.vol)
audio_filt_coeffs = optfir.low_pass (1, # gain
demod_rate, # sampling rate
8e3, # passband cutoff
10e3, # stopband cutoff
0.1, # passband ripple
60) # stopband attenuation
self.audio_filt=gr.fir_filter_fff(audio_decimation,audio_filt_coeffs)
# sound card as final sink
audio_sink = audio.sink (int (audio_rate),
options.audio_output,
False) # ok_to_block
# now wire it all together
self.connect (self.u, self.chan_filt, self.am_demod, self.audio_filt, self.volume_control, audio_sink)
self._build_gui(vbox, usrp_rate, demod_rate, audio_rate)
if options.gain is None:
g = self.subdev.gain_range()
if True:
# if no gain was specified, use the maximum gain available
# (usefull for Basic_RX which is relatively deaf and the most probable board to be used for AM)
# TODO: check db type to decide on default gain.
options.gain = float(g[1])
else:
# if no gain was specified, use the mid-point in dB
options.gain = float(g[0]+g[1])/2
if options.volume is None:
g = self.volume_range()
options.volume = float(g[0]*3+g[1])/4
if abs(options.freq) < 1e3:
options.freq *= 1e3
# set initial values
self.set_gain(options.gain)
self.set_vol(options.volume)
if not(self.set_freq(options.freq)):
self._set_status_msg("Failed to set initial frequency")
def _set_status_msg(self, msg, which=0):
self.frame.GetStatusBar().SetStatusText(msg, which)
def _build_gui(self, vbox, usrp_rate, demod_rate, audio_rate):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
if 1:
self.src_fft = fftsink2.fft_sink_c(self.panel, title="Data from USRP",
fft_size=512, sample_rate=usrp_rate,
ref_scale=32768.0, ref_level=0.0, y_divs=12)
self.connect (self.u, self.src_fft)
vbox.Add (self.src_fft.win, 4, wx.EXPAND)
if 0:
self.post_filt_fft = fftsink2.fft_sink_c(self.panel, title="Post Channel filter",
fft_size=512, sample_rate=demod_rate)
self.connect (self.chan_filt, self.post_filt_fft)
vbox.Add (self.post_filt_fft.win, 4, wx.EXPAND)
if 0:
post_demod_fft = fftsink2.fft_sink_f(self.panel, title="Post Demod",
fft_size=1024, sample_rate=demod_rate,
y_per_div=10, ref_level=0)
self.connect (self.am_demod, post_demod_fft)
vbox.Add (post_demod_fft.win, 4, wx.EXPAND)
if 1:
audio_fft = fftsink2.fft_sink_f(self.panel, title="Audio",
fft_size=512, sample_rate=audio_rate,
y_per_div=10, ref_level=20)
self.connect (self.audio_filt, audio_fft)
vbox.Add (audio_fft.win, 4, wx.EXPAND)
# control area form at bottom
self.myform = myform = form.form()
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['freq'] = form.float_field(
parent=self.panel, sizer=hbox, label="Freq", weight=1,
callback=myform.check_input_and_call(_form_set_freq, self._set_status_msg))
hbox.Add((5,0), 0)
myform['freq_slider'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, weight=3,
range=(520.0e3, 1611.0e3, 1.0e3),
callback=self.set_freq)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['volume'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Volume",
weight=3, range=self.volume_range(),
callback=self.set_vol)
hbox.Add((5,0), 1)
myform['gain'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Gain",
weight=3, range=self.subdev.gain_range(),
callback=self.set_gain)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
try:
self.knob = powermate.powermate(self.frame)
self.rot = 0
powermate.EVT_POWERMATE_ROTATE (self.frame, self.on_rotate)
powermate.EVT_POWERMATE_BUTTON (self.frame, self.on_button)
except:
print "FYI: No Powermate or Contour Knob found"
def on_rotate (self, event):
self.rot += event.delta
if (self.state == "FREQ"):
if self.rot >= 3:
self.set_freq(self.freq + .1e6)
self.rot -= 3
elif self.rot <=-3:
self.set_freq(self.freq - .1e6)
self.rot += 3
else:
step = self.volume_range()[2]
if self.rot >= 3:
self.set_vol(self.vol + step)
self.rot -= 3
elif self.rot <=-3:
self.set_vol(self.vol - step)
self.rot += 3
def on_button (self, event):
if event.value == 0: # button up
return
self.rot = 0
if self.state == "FREQ":
self.state = "VOL"
else:
self.state = "FREQ"
self.update_status_bar ()
def set_vol (self, vol):
g = self.volume_range()
self.vol = max(g[0], min(g[1], vol))
self.volume_control.set_k(10**(self.vol/10))
self.myform['volume'].set_value(self.vol)
self.update_status_bar ()
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
@param target_freq: frequency in Hz
@rypte: bool
Tuning is a two step process. First we ask the front-end to
tune as close to the desired frequency as it can. Then we use
the result of that operation and our target_frequency to
determine the value for the digital down converter.
"""
r = usrp.tune(self.u, 0, self.subdev, target_freq + self.IF_freq)
#TODO: check if db is inverting the spectrum or not to decide if we should do + self.IF_freq or - self.IF_freq
if r:
self.freq = target_freq
self.myform['freq'].set_value(target_freq) # update displayed value
self.myform['freq_slider'].set_value(target_freq) # update displayed value
self.update_status_bar()
self._set_status_msg("OK", 0)
return True
self._set_status_msg("Failed", 0)
return False
def set_gain(self, gain):
self.myform['gain'].set_value(gain) # update displayed value
self.subdev.set_gain(gain)
def update_status_bar (self):
msg = "Volume:%r Setting:%s" % (self.vol, self.state)
self._set_status_msg(msg, 1)
try:
self.src_fft.set_baseband_freq(self.freq)
except:
None
def volume_range(self):
return (-40.0, 0.0, 0.5)
if __name__ == '__main__':
app = stdgui2.stdapp (wfm_rx_block, "USRP Broadcast AM MW RX")
app.MainLoop ()
| gpl-3.0 |
QInfer/python-qinfer | src/qinfer/_lib/__init__.py | 3 | 1933 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##
# _lib/: Copies of external libraries used by QInfer, such as docopt.
# Note that all other files in this directory are copyrighted and
# licensed as described within each file, or by the corresponding
# LICENSE and/or COPYING files.
##
# © 2017, Chris Ferrie (csferrie@gmail.com) and
# Christopher Granade (cgranade@cgranade.com).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##
| bsd-3-clause |
harmy/kbengine | kbe/src/lib/python/Lib/test/json_tests/test_fail.py | 52 | 3233 | from test.json_tests import PyTest, CTest
# Fri Dec 30 18:57:26 2005
JSONDOCS = [
# http://json.org/JSON_checker/test/fail1.json
'"A JSON payload should be an object or array, not a string."',
# http://json.org/JSON_checker/test/fail2.json
'["Unclosed array"',
# http://json.org/JSON_checker/test/fail3.json
'{unquoted_key: "keys must be quoted}',
# http://json.org/JSON_checker/test/fail4.json
'["extra comma",]',
# http://json.org/JSON_checker/test/fail5.json
'["double extra comma",,]',
# http://json.org/JSON_checker/test/fail6.json
'[ , "<-- missing value"]',
# http://json.org/JSON_checker/test/fail7.json
'["Comma after the close"],',
# http://json.org/JSON_checker/test/fail8.json
'["Extra close"]]',
# http://json.org/JSON_checker/test/fail9.json
'{"Extra comma": true,}',
# http://json.org/JSON_checker/test/fail10.json
'{"Extra value after close": true} "misplaced quoted value"',
# http://json.org/JSON_checker/test/fail11.json
'{"Illegal expression": 1 + 2}',
# http://json.org/JSON_checker/test/fail12.json
'{"Illegal invocation": alert()}',
# http://json.org/JSON_checker/test/fail13.json
'{"Numbers cannot have leading zeroes": 013}',
# http://json.org/JSON_checker/test/fail14.json
'{"Numbers cannot be hex": 0x14}',
# http://json.org/JSON_checker/test/fail15.json
'["Illegal backslash escape: \\x15"]',
# http://json.org/JSON_checker/test/fail16.json
'["Illegal backslash escape: \\\'"]',
# http://json.org/JSON_checker/test/fail17.json
'["Illegal backslash escape: \\017"]',
# http://json.org/JSON_checker/test/fail18.json
'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
# http://json.org/JSON_checker/test/fail19.json
'{"Missing colon" null}',
# http://json.org/JSON_checker/test/fail20.json
'{"Double colon":: null}',
# http://json.org/JSON_checker/test/fail21.json
'{"Comma instead of colon", null}',
# http://json.org/JSON_checker/test/fail22.json
'["Colon instead of comma": false]',
# http://json.org/JSON_checker/test/fail23.json
'["Bad value", truth]',
# http://json.org/JSON_checker/test/fail24.json
"['single quote']",
# http://code.google.com/p/simplejson/issues/detail?id=3
'["A\u001FZ control characters in string"]',
]
SKIPS = {
1: "why not have a string payload?",
18: "spec doesn't specify any nesting limitations",
}
class TestFail:
def test_failures(self):
for idx, doc in enumerate(JSONDOCS):
idx = idx + 1
if idx in SKIPS:
self.loads(doc)
continue
try:
self.loads(doc)
except ValueError:
pass
else:
self.fail("Expected failure for fail{0}.json: {1!r}".format(idx, doc))
def test_non_string_keys_dict(self):
data = {'a' : 1, (1, 2) : 2}
#This is for c encoder
self.assertRaises(TypeError, self.dumps, data)
#This is for python encoder
self.assertRaises(TypeError, self.dumps, data, indent=True)
class TestPyFail(TestFail, PyTest): pass
class TestCFail(TestFail, CTest): pass
| lgpl-3.0 |
srm912/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/assertion/util.py | 175 | 11730 | """Utilities for assertion debugging"""
import pprint
import _pytest._code
import py
try:
from collections import Sequence
except ImportError:
Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare = None
# the re-encoding is needed for python2 repr
# with non-ascii characters (see issue 877 and 1379)
def ecu(s):
try:
return u(s, 'utf-8', 'replace')
except TypeError:
return s
def format_explanation(explanation):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
explanation = ecu(explanation)
explanation = _collapse_false(explanation)
lines = _split_explanation(explanation)
result = _format_lines(lines)
return u('\n').join(result)
def _collapse_false(explanation):
"""Collapse expansions of False
So this strips out any "assert False\n{where False = ...\n}"
blocks.
"""
where = 0
while True:
start = where = explanation.find("False\n{False = ", where)
if where == -1:
break
level = 0
prev_c = explanation[start]
for i, c in enumerate(explanation[start:]):
if prev_c + c == "\n{":
level += 1
elif prev_c + c == "\n}":
level -= 1
if not level:
break
prev_c = c
else:
raise AssertionError("unbalanced braces: %r" % (explanation,))
end = start + i
where = end
if explanation[end - 1] == '\n':
explanation = (explanation[:start] + explanation[start+15:end-1] +
explanation[end+1:])
where -= 17
return explanation
def _split_explanation(explanation):
"""Return a list of individual lines in the explanation
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters.
"""
raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l and l[0] in ['{', '}', '~', '>']:
lines.append(l)
else:
lines[-1] += '\\n' + l
return lines
def _format_lines(lines):
"""Format the individual lines
This will replace the '{', '}' and '~' characters of our mini
formatting language with the proper 'where ...', 'and ...' and ' +
...' text, taking care of indentation along the way.
Return a list of formatted lines.
"""
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = u('and ')
else:
s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ['~', '>']
stack[-1] += 1
indent = len(stack) if line.startswith('~') else len(stack) - 1
result.append(u(' ')*indent + line[1:])
assert len(stack) == 1
return result
# Provide basestring in python3
try:
basestring = basestring
except NameError:
basestring = str
def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands"""
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width/2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
isset = lambda x: isinstance(x, (set, frozenset))
def isiterable(obj):
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
if issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
if explanation is not None:
explanation.extend(expl)
else:
explanation = expl
elif op == 'not in':
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except Exception:
explanation = [
u('(pytest_assertion plugin: representation of details failed. '
'Probably an object has a faulty __repr__.)'),
u(_pytest._code.ExceptionInfo())]
if not explanation:
return None
return [summary] + explanation
def _diff_text(left, right, verbose=False):
"""Return the explanation for the diff between text or bytes
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
If the input are bytes they will be safely converted to text.
"""
from difflib import ndiff
explanation = []
if isinstance(left, py.builtin.bytes):
left = u(repr(left)[1:-1]).replace(r'\n', '\n')
if isinstance(right, py.builtin.bytes):
right = u(repr(right)[1:-1]).replace(r'\n', '\n')
if not verbose:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [u('Skipping %s identical leading '
'characters in diff, use -v to show') % i]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [u('Skipping %s identical trailing '
'characters in diff, use -v to show') % i]
left = left[:-i]
right = right[:-i]
explanation += [line.strip('\n')
for line in ndiff(left.splitlines(),
right.splitlines())]
return explanation
def _compare_eq_iterable(left, right, verbose=False):
if not verbose:
return [u('Use -v to get the full diff')]
# dynamic import to speedup pytest
import difflib
try:
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = [u('Full diff:')]
except Exception:
# hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
# sorted() on a list would raise. See issue #718.
# As a workaround, the full diff is generated by using the repr() string of each item of each container.
left_formatting = sorted(repr(x) for x in left)
right_formatting = sorted(repr(x) for x in right)
explanation = [u('Full diff (fallback to calling repr on each item):')]
explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
return explanation
def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
explanation += [u('At index %s diff: %r != %r')
% (i, left[i], right[i])]
break
if len(left) > len(right):
explanation += [u('Left contains more items, first extra item: %s')
% py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
explanation += [
u('Right contains more items, first extra item: %s') %
py.io.saferepr(right[len(left)],)]
return explanation
def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
def _compare_eq_dict(left, right, verbose=False):
explanation = []
common = set(left).intersection(set(right))
same = dict((k, left[k]) for k in common if left[k] == right[k])
if same and not verbose:
explanation += [u('Omitting %s identical items, use -v to show') %
len(same)]
elif same:
explanation += [u('Common items:')]
explanation += pprint.pformat(same).splitlines()
diff = set(k for k in common if left[k] != right[k])
if diff:
explanation += [u('Differing items:')]
for k in diff:
explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
py.io.saferepr({k: right[k]})]
extra_left = set(left) - set(right)
if extra_left:
explanation.append(u('Left contains more items:'))
explanation.extend(pprint.pformat(
dict((k, left[k]) for k in extra_left)).splitlines())
extra_right = set(right) - set(left)
if extra_right:
explanation.append(u('Right contains more items:'))
explanation.extend(pprint.pformat(
dict((k, right[k]) for k in extra_right)).splitlines())
return explanation
def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
diff = _diff_text(correct_text, text, verbose)
newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
if line.startswith(u('Skipping')):
continue
if line.startswith(u('- ')):
continue
if line.startswith(u('+ ')):
newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
| mpl-2.0 |
roshchupkin/VBM | scripts/tools/summary.py | 1 | 3768 | import nipy
import pandas as pd
import numpy as np
import argparse
import os
from collections import OrderedDict
import sys
sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) )
from config import *
parser = argparse.ArgumentParser(description='Script to create summary of VBM experiment.'
'Summary table:'
'Brain Region,Region size (#voxels),min p-value,# negative voxels,# positive voxels ')
parser.add_argument("-p",required=True, type=str, help="p-map image (with real p-values, not (1-p_value)!)")
parser.add_argument("-t",required=True, type=str, help="t-map or b-map image")
parser.add_argument("-a", type=str,default='Hammer',choices=['Hammer','FreeSurfer','Tracts'], help="Atlas name")
parser.add_argument("-o",required=True, type=str, help="output folder")
parser.add_argument("-n",required=True, type=str, help="result table name")
parser.add_argument("-th",required=True, type=float, help="p-value threshold")
parser.add_argument("-mask",default=None, type=str, help="path mask image")
parser.add_argument("-atlas",default=None, type=str, help="path atlas image")
parser.add_argument("-tract_th", type=float, default=0.0, help='tracts threshold for minimum probability to include voxel')
args = parser.parse_args()
print args
def get_atlas(atlas_name,atlas_path):
Atlas={}
Atlas['mask']={}
Atlas['regions']={}
if atlas_path is None:
Atlas_path=ATLAS[atlas_name]
else:
Atlas_path = atlas_path
Atlas_table=INFO_TABLE[atlas_name]
A=nipy.load_image(Atlas_path)
Table=pd.read_csv(Atlas_table, sep=',', header=None)
if atlas_name!='Tracts':
u=np.unique(A._data)
for j,i in enumerate(Table[0]):
if i in u:
Atlas['regions'][i]=Table[1][j]
Atlas['mask'][i]=np.where(A._data==i)
return Atlas
else:
Atlas['mask']=A
Atlas['regions']=Table[1].tolist()
return Atlas
if __name__=="__main__":
if args.th<=0 or args.th>=1:
raise ValueError('Threshold should be 0 < threshold < 1, not {}'.format(args.th))
results=OrderedDict()
Atlas=get_atlas(args.a, args.atlas)
P=nipy.load_image(args.p)
if args.mask is not None:
M=nipy.load_image(args.mask)
P._data[M._data==0]=1
results['Brain Region']=[]
results['Region size (#voxels)']=[]
results['min p-value']=[]
results['# negative voxels']=[]
results['# positive voxels']=[]
mask=np.where(P._data>args.th)
P._data[mask]=1
P._data[P._data==0]=1 #TODO change:check with mask, if zero outside, then it is significant
T_neg=nipy.load_image(args.t)
T_pos=nipy.load_image(args.t)
T_neg._data[mask]=0
T_pos._data[mask]=0
T_pos[T_pos._data<0]=0
T_neg[T_neg._data>0]=0
if args.a!='Tracts':
for k in Atlas['mask']:
results['Brain Region'].append(Atlas['regions'][k])
results['Region size (#voxels)'].append( len(Atlas['mask'][k][0]) )
results['min p-value'].append(np.min( P._data[Atlas['mask'][k]] ))
results['# negative voxels'].append(len( np.where(T_neg._data[Atlas['mask'][k]]!=0)[0] ))
results['# positive voxels'].append(len( np.where(T_pos._data[Atlas['mask'][k]]!=0)[0] ))
else:
for j,i in enumerate(Atlas['regions']):
results['Brain Region'].append(i)
tract=Atlas['mask'][:,:,:,j+1] #Tract atlas starts from 0 dim with no info
#print i, tract.shape, args.tract_th
tract_mask=np.where(tract._data>args.tract_th)
#print tract_mask[0]
results['Region size (#voxels)'].append( len(tract_mask[0]) )
results['min p-value'].append(np.min(P._data[tract_mask]))
results['# negative voxels'].append(len(np.where(T_neg._data[tract_mask] != 0)[0]))
results['# positive voxels'].append(len(np.where(T_pos._data[tract_mask] != 0)[0]))
df=pd.DataFrame.from_dict(results)
df.to_csv(os.path.join(args.o,args.n), sep=',', index=False)
| gpl-2.0 |
doordash/auto_ml | tests/regressors.py | 1 | 12027 | import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from quantile_ml import Predictor
from quantile_ml.utils_models import load_ml_model
import dill
from nose.tools import assert_equal, assert_not_equal, with_setup
import numpy as np
from sklearn.model_selection import train_test_split
import utils_testing as utils
def optimize_final_model_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, optimize_final_model=True, model_names=model_name)
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
# the random seed gets a score of -3.21 on python 3.5
# There's a ton of noise here, due to small sample sizes
lower_bound = -3.4
if model_name == 'DeepLearningRegressor':
lower_bound = -20
if model_name == 'LGBMRegressor':
lower_bound = -5.5
if model_name == 'GradientBoostingRegressor':
lower_bound = -3.5
assert lower_bound < test_score < -2.8
def categorical_ensembling_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train_categorical_ensemble(df_boston_train, perform_feature_selection=True, model_names=model_name, categorical_column='CHAS')
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
# Bumping this up since without these features our score drops
lower_bound = -4.0
if model_name == 'DeepLearningRegressor':
lower_bound = -19
if model_name == 'LGBMRegressor':
lower_bound = -4.95
assert lower_bound < test_score < -2.8
def getting_single_predictions_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, perform_feature_scaling=False, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
# if model_name == 'DeepLearningRegressor':
# from quantile_ml.utils_models import load_keras_model
# saved_ml_pipeline = load_keras_model(file_name)
# else:
# with open(file_name, 'rb') as read_file:
# saved_ml_pipeline = dill.load(read_file)
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('predictions[0]')
print(predictions[0])
print('type(predictions)')
print(type(predictions))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -3.2
if model_name == 'DeepLearningRegressor':
lower_bound = -8.8
if model_name == 'LGBMRegressor':
lower_bound = -4.95
if model_name == 'XGBRegressor':
lower_bound = -3.4
assert lower_bound < first_score < -2.8
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.1 < duration.total_seconds() / 1.0 < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.8
def feature_learning_getting_single_predictions_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set, but we don't have enough data to do it any other way
df_boston_train, fl_data = train_test_split(df_boston_train, test_size=0.2)
ml_predictor.train(df_boston_train, model_names=model_name, feature_learning=True, fl_data=fl_data)
file_name = ml_predictor.save(str(random.random()))
# from quantile_ml.utils_models import load_keras_model
# saved_ml_pipeline = load_keras_model(file_name)
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -3.2
if model_name == 'DeepLearningRegressor':
lower_bound = -23
if model_name == 'LGBMRegressor':
lower_bound = -4.95
if model_name == 'XGBRegressor':
lower_bound = -3.3
assert lower_bound < first_score < -2.8
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() / 1.0 < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.8
def feature_learning_categorical_ensembling_getting_single_predictions_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set, but we don't have enough data to do it any other way
df_boston_train, fl_data = train_test_split(df_boston_train, test_size=0.2)
ml_predictor.train_categorical_ensemble(df_boston_train, model_names=model_name, feature_learning=False, fl_data=fl_data, categorical_column='CHAS')
file_name = ml_predictor.save(str(random.random()))
from quantile_ml.utils_models import load_ml_model
saved_ml_pipeline = load_ml_model(file_name)
# with open(file_name, 'rb') as read_file:
# saved_ml_pipeline = dill.load(read_file)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -3.2
if model_name == 'DeepLearningRegressor':
lower_bound = -21.5
if model_name == 'LGBMRegressor':
lower_bound = -5.1
if model_name == 'XGBRegressor':
lower_bound = -3.6
if model_name == 'GradientBoostingRegressor':
lower_bound = -3.6
assert lower_bound < first_score < -2.8
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() / 1.0 < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.8
| mit |
HackerEarth/cassandra-python-driver | tests/unit/test_metadata.py | 3 | 14551 | # Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from mock import Mock
import cassandra
from cassandra.cqltypes import IntegerType, AsciiType, TupleType
from cassandra.metadata import (Murmur3Token, MD5Token,
BytesToken, ReplicationStrategy,
NetworkTopologyStrategy, SimpleStrategy,
LocalStrategy, NoMurmur3, protect_name,
protect_names, protect_value, is_valid_name,
UserType, KeyspaceMetadata, Metadata,
_UnknownStrategy)
from cassandra.policies import SimpleConvictionPolicy
from cassandra.pool import Host
class StrategiesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
if not hasattr(cls, 'assertItemsEqual'):
cls.assertItemsEqual = cls.assertCountEqual
def test_replication_strategy(self):
"""
Basic code coverage testing that ensures different ReplicationStrategies
can be initiated using parameters correctly.
"""
rs = ReplicationStrategy()
self.assertEqual(rs.create('OldNetworkTopologyStrategy', None), _UnknownStrategy('OldNetworkTopologyStrategy', None))
fake_options_map = {'options': 'map'}
uks = rs.create('OldNetworkTopologyStrategy', fake_options_map)
self.assertEqual(uks, _UnknownStrategy('OldNetworkTopologyStrategy', fake_options_map))
self.assertEqual(uks.make_token_replica_map({}, []), {})
fake_options_map = {'dc1': '3'}
self.assertIsInstance(rs.create('NetworkTopologyStrategy', fake_options_map), NetworkTopologyStrategy)
self.assertEqual(rs.create('NetworkTopologyStrategy', fake_options_map).dc_replication_factors,
NetworkTopologyStrategy(fake_options_map).dc_replication_factors)
fake_options_map = {'options': 'map'}
self.assertIsNone(rs.create('SimpleStrategy', fake_options_map))
fake_options_map = {'options': 'map'}
self.assertIsInstance(rs.create('LocalStrategy', fake_options_map), LocalStrategy)
fake_options_map = {'options': 'map', 'replication_factor': 3}
self.assertIsInstance(rs.create('SimpleStrategy', fake_options_map), SimpleStrategy)
self.assertEqual(rs.create('SimpleStrategy', fake_options_map).replication_factor,
SimpleStrategy(fake_options_map).replication_factor)
self.assertEqual(rs.create('xxxxxxxx', fake_options_map), _UnknownStrategy('xxxxxxxx', fake_options_map))
self.assertRaises(NotImplementedError, rs.make_token_replica_map, None, None)
self.assertRaises(NotImplementedError, rs.export_for_schema)
def test_nts_make_token_replica_map(self):
token_to_host_owner = {}
dc1_1 = Host('dc1.1', SimpleConvictionPolicy)
dc1_2 = Host('dc1.2', SimpleConvictionPolicy)
dc1_3 = Host('dc1.3', SimpleConvictionPolicy)
for host in (dc1_1, dc1_2, dc1_3):
host.set_location_info('dc1', 'rack1')
token_to_host_owner[MD5Token(0)] = dc1_1
token_to_host_owner[MD5Token(100)] = dc1_2
token_to_host_owner[MD5Token(200)] = dc1_3
dc2_1 = Host('dc2.1', SimpleConvictionPolicy)
dc2_2 = Host('dc2.2', SimpleConvictionPolicy)
dc2_1.set_location_info('dc2', 'rack1')
dc2_2.set_location_info('dc2', 'rack1')
token_to_host_owner[MD5Token(1)] = dc2_1
token_to_host_owner[MD5Token(101)] = dc2_2
dc3_1 = Host('dc3.1', SimpleConvictionPolicy)
dc3_1.set_location_info('dc3', 'rack3')
token_to_host_owner[MD5Token(2)] = dc3_1
ring = [MD5Token(0),
MD5Token(1),
MD5Token(2),
MD5Token(100),
MD5Token(101),
MD5Token(200)]
nts = NetworkTopologyStrategy({'dc1': 2, 'dc2': 2, 'dc3': 1})
replica_map = nts.make_token_replica_map(token_to_host_owner, ring)
self.assertItemsEqual(replica_map[MD5Token(0)], (dc1_1, dc1_2, dc2_1, dc2_2, dc3_1))
def test_nts_make_token_replica_map_empty_dc(self):
host = Host('1', SimpleConvictionPolicy)
host.set_location_info('dc1', 'rack1')
token_to_host_owner = {MD5Token(0): host}
ring = [MD5Token(0)]
nts = NetworkTopologyStrategy({'dc1': 1, 'dc2': 0})
replica_map = nts.make_token_replica_map(token_to_host_owner, ring)
self.assertEqual(set(replica_map[MD5Token(0)]), set([host]))
def test_nts_export_for_schema(self):
strategy = NetworkTopologyStrategy({'dc1': '1', 'dc2': '2'})
self.assertEqual("{'class': 'NetworkTopologyStrategy', 'dc1': '1', 'dc2': '2'}",
strategy.export_for_schema())
def test_simple_strategy_make_token_replica_map(self):
host1 = Host('1', SimpleConvictionPolicy)
host2 = Host('2', SimpleConvictionPolicy)
host3 = Host('3', SimpleConvictionPolicy)
token_to_host_owner = {
MD5Token(0): host1,
MD5Token(100): host2,
MD5Token(200): host3
}
ring = [MD5Token(0), MD5Token(100), MD5Token(200)]
rf1_replicas = SimpleStrategy({'replication_factor': '1'}).make_token_replica_map(token_to_host_owner, ring)
self.assertItemsEqual(rf1_replicas[MD5Token(0)], [host1])
self.assertItemsEqual(rf1_replicas[MD5Token(100)], [host2])
self.assertItemsEqual(rf1_replicas[MD5Token(200)], [host3])
rf2_replicas = SimpleStrategy({'replication_factor': '2'}).make_token_replica_map(token_to_host_owner, ring)
self.assertItemsEqual(rf2_replicas[MD5Token(0)], [host1, host2])
self.assertItemsEqual(rf2_replicas[MD5Token(100)], [host2, host3])
self.assertItemsEqual(rf2_replicas[MD5Token(200)], [host3, host1])
rf3_replicas = SimpleStrategy({'replication_factor': '3'}).make_token_replica_map(token_to_host_owner, ring)
self.assertItemsEqual(rf3_replicas[MD5Token(0)], [host1, host2, host3])
self.assertItemsEqual(rf3_replicas[MD5Token(100)], [host2, host3, host1])
self.assertItemsEqual(rf3_replicas[MD5Token(200)], [host3, host1, host2])
def test_ss_equals(self):
self.assertNotEqual(SimpleStrategy({'replication_factor': '1'}), NetworkTopologyStrategy({'dc1': 2}))
class NameEscapingTest(unittest.TestCase):
def test_protect_name(self):
"""
Test cassandra.metadata.protect_name output
"""
self.assertEqual(protect_name('tests'), 'tests')
self.assertEqual(protect_name('test\'s'), '"test\'s"')
self.assertEqual(protect_name('test\'s'), "\"test's\"")
self.assertEqual(protect_name('tests ?!@#$%^&*()'), '"tests ?!@#$%^&*()"')
self.assertEqual(protect_name('1'), '"1"')
self.assertEqual(protect_name('1test'), '"1test"')
def test_protect_names(self):
"""
Test cassandra.metadata.protect_names output
"""
self.assertEqual(protect_names(['tests']), ['tests'])
self.assertEqual(protect_names(
[
'tests',
'test\'s',
'tests ?!@#$%^&*()',
'1'
]),
[
'tests',
"\"test's\"",
'"tests ?!@#$%^&*()"',
'"1"'
])
def test_protect_value(self):
"""
Test cassandra.metadata.protect_value output
"""
self.assertEqual(protect_value(True), "true")
self.assertEqual(protect_value(False), "false")
self.assertEqual(protect_value(3.14), '3.14')
self.assertEqual(protect_value(3), '3')
self.assertEqual(protect_value('test'), "'test'")
self.assertEqual(protect_value('test\'s'), "'test''s'")
self.assertEqual(protect_value(None), 'NULL')
def test_is_valid_name(self):
"""
Test cassandra.metadata.is_valid_name output
"""
self.assertEqual(is_valid_name(None), False)
self.assertEqual(is_valid_name('test'), True)
self.assertEqual(is_valid_name('Test'), False)
self.assertEqual(is_valid_name('t_____1'), True)
self.assertEqual(is_valid_name('test1'), True)
self.assertEqual(is_valid_name('1test1'), False)
non_valid_keywords = cassandra.metadata._keywords - cassandra.metadata._unreserved_keywords
for keyword in non_valid_keywords:
self.assertEqual(is_valid_name(keyword), False)
class TokensTest(unittest.TestCase):
def test_murmur3_tokens(self):
try:
murmur3_token = Murmur3Token(cassandra.metadata.MIN_LONG - 1)
self.assertEqual(murmur3_token.hash_fn('123'), -7468325962851647638)
self.assertEqual(murmur3_token.hash_fn(b'\x00\xff\x10\xfa\x99' * 10), 5837342703291459765)
self.assertEqual(murmur3_token.hash_fn(b'\xfe' * 8), -8927430733708461935)
self.assertEqual(murmur3_token.hash_fn(b'\x10' * 8), 1446172840243228796)
self.assertEqual(murmur3_token.hash_fn(str(cassandra.metadata.MAX_LONG)), 7162290910810015547)
self.assertEqual(str(murmur3_token), '<Murmur3Token: -9223372036854775809>')
except NoMurmur3:
raise unittest.SkipTest('The murmur3 extension is not available')
def test_md5_tokens(self):
md5_token = MD5Token(cassandra.metadata.MIN_LONG - 1)
self.assertEqual(md5_token.hash_fn('123'), 42767516990368493138776584305024125808)
self.assertEqual(md5_token.hash_fn(str(cassandra.metadata.MAX_LONG)), 28528976619278518853815276204542453639)
self.assertEqual(str(md5_token), '<MD5Token: %s>' % -9223372036854775809)
def test_bytes_tokens(self):
bytes_token = BytesToken(str(cassandra.metadata.MIN_LONG - 1))
self.assertEqual(bytes_token.hash_fn('123'), '123')
self.assertEqual(bytes_token.hash_fn(123), 123)
self.assertEqual(bytes_token.hash_fn(str(cassandra.metadata.MAX_LONG)), str(cassandra.metadata.MAX_LONG))
self.assertEqual(str(bytes_token), "<BytesToken: -9223372036854775809>")
try:
bytes_token = BytesToken(cassandra.metadata.MIN_LONG - 1)
self.fail('Tokens for ByteOrderedPartitioner should be only strings')
except TypeError:
pass
class KeyspaceMetadataTest(unittest.TestCase):
def test_export_as_string_user_types(self):
keyspace_name = 'test'
keyspace = KeyspaceMetadata(keyspace_name, True, 'SimpleStrategy', dict(replication_factor=3))
keyspace.user_types['a'] = UserType(keyspace_name, 'a', ['one', 'two'],
[self.mock_user_type('UserType', 'c'),
self.mock_user_type('IntType', 'int')])
keyspace.user_types['b'] = UserType(keyspace_name, 'b', ['one', 'two', 'three'],
[self.mock_user_type('UserType', 'd'),
self.mock_user_type('IntType', 'int'),
self.mock_user_type('UserType', 'a')])
keyspace.user_types['c'] = UserType(keyspace_name, 'c', ['one'],
[self.mock_user_type('IntType', 'int')])
keyspace.user_types['d'] = UserType(keyspace_name, 'd', ['one'],
[self.mock_user_type('UserType', 'c')])
self.assertEqual("""CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'} AND durable_writes = true;
CREATE TYPE test.c (
one int
);
CREATE TYPE test.a (
one c,
two int
);
CREATE TYPE test.d (
one c
);
CREATE TYPE test.b (
one d,
two int,
three a
);""", keyspace.export_as_string())
def mock_user_type(self, cassname, typename):
return Mock(**{'cassname': cassname, 'typename': typename, 'cql_parameterized_type.return_value': typename})
class UserTypesTest(unittest.TestCase):
def test_as_cql_query(self):
field_types = [IntegerType, AsciiType, TupleType.apply_parameters([IntegerType, AsciiType])]
udt = UserType("ks1", "mytype", ["a", "b", "c"], field_types)
self.assertEqual("CREATE TYPE ks1.mytype (a varint, b ascii, c frozen<tuple<varint, ascii>>);", udt.as_cql_query(formatted=False))
self.assertEqual("""CREATE TYPE ks1.mytype (
a varint,
b ascii,
c frozen<tuple<varint, ascii>>
);""", udt.as_cql_query(formatted=True))
def test_as_cql_query_name_escaping(self):
udt = UserType("MyKeyspace", "MyType", ["AbA", "keyspace"], [AsciiType, AsciiType])
self.assertEqual('CREATE TYPE "MyKeyspace"."MyType" ("AbA" ascii, "keyspace" ascii);', udt.as_cql_query(formatted=False))
class IndexTest(unittest.TestCase):
def test_build_index_as_cql(self):
column_meta = Mock()
column_meta.name = 'column_name_here'
column_meta.table.name = 'table_name_here'
column_meta.table.keyspace.name = 'keyspace_name_here'
meta_model = Metadata()
row = {'index_name': 'index_name_here', 'index_type': 'index_type_here'}
index_meta = meta_model._build_index_metadata(column_meta, row)
self.assertEqual(index_meta.as_cql_query(),
'CREATE INDEX index_name_here ON keyspace_name_here.table_name_here (column_name_here)')
row['index_options'] = '{ "class_name": "class_name_here" }'
row['index_type'] = 'CUSTOM'
index_meta = meta_model._build_index_metadata(column_meta, row)
self.assertEqual(index_meta.as_cql_query(),
"CREATE CUSTOM INDEX index_name_here ON keyspace_name_here.table_name_here (column_name_here) USING 'class_name_here'")
| apache-2.0 |
termoshtt/DataProcessor | lib/dataprocessor/pipes/addcomment.py | 3 | 1275 | # coding=utf-8
from ..nodes import get
from ..utility import abspath
from ..exception import DataProcessorError
def add(node_list, comment, node_path):
"""
Add comment to node spedcified path.
Parameters
----------
comment : str
comment.
node_path : str
This path specify the unique node.
Examples
--------
>>> node_list = [{"path": "/path/to/hoge"},
... {"path": "/path/to/hogehoge"}]
>>> add(node_list, "some comments", "/path/to/hoge") == [
... {"path": "/path/to/hoge", "comment": "some comments"},
... {"path": "/path/to/hogehoge"}]
True
>>> add(node_list, "some comments aho", "/path/to/hogehom")
Traceback (most recent call last):
...
DataProcessorError: 'There is no node with specified path: /path/to/hogehom'
"""
path = abspath(node_path)
node = get(node_list, path)
if node:
node["comment"] = comment
else:
raise DataProcessorError("There is no node with specified path: %s"
% path)
return node_list
def register(pipe_dics):
pipe_dics["add_comment"] = {
"func": add,
"args": ["comment", "path"],
"desc": "add comment to node with path",
}
| gpl-3.0 |
openmicroscopy/omero-marshal | omero_marshal/encode/encoders/experimenter.py | 1 | 1420 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Glencoe Software, Inc. All rights reserved.
#
# This software is distributed under the terms described by the LICENCE file
# you can find at the root of the distribution bundle.
# If the file is missing please request a copy by contacting
# jason@glencoesoftware.com.
#
from ... import SCHEMA_VERSION
from .. import Encoder
from omero.model import ExperimenterI
class Experimenter201501Encoder(Encoder):
TYPE = 'http://www.openmicroscopy.org/Schemas/OME/2015-01#Experimenter'
def encode(self, obj):
v = super(Experimenter201501Encoder, self).encode(obj)
if not obj.isLoaded():
return v
self.set_if_not_none(v, 'FirstName', obj.firstName)
self.set_if_not_none(v, 'MiddleName', obj.middleName)
self.set_if_not_none(v, 'LastName', obj.lastName)
self.set_if_not_none(v, 'Email', obj.email)
self.set_if_not_none(v, 'Institution', obj.institution)
self.set_if_not_none(v, 'UserName', obj.omeName)
return v
class Experimenter201606Encoder(Experimenter201501Encoder):
TYPE = 'http://www.openmicroscopy.org/Schemas/OME/2016-06#Experimenter'
if SCHEMA_VERSION == '2015-01':
encoder = (ExperimenterI, Experimenter201501Encoder)
elif SCHEMA_VERSION == '2016-06':
encoder = (ExperimenterI, Experimenter201606Encoder)
ExperimenterEncoder = encoder[1]
| gpl-2.0 |
KevinScottt/kernel-nk1-negalite-lt02ltespr | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
iij/TracPortalPlugin | setup.py | 1 | 2427 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Python setup script for TracPortalPlugin
#
# (C) 2013-2016 Internet Initiative Japan Inc.
# All rights reserved.
#
# Created on 2013/06/27
# @author: yosinobu@iij.ad.jp
from setuptools import setup, find_packages
extra = {}
try:
import babel
extractors = [
('**.py', 'trac.dist:extract_python', None),
('**/templates/**.html', 'genshi', None),
('**/templates/**.txt', 'genshi',
{'template_class': 'genshi.template:NewTextTemplate'}),
]
extra['message_extractors'] = {
'tracportal': extractors,
'tracportalopt': extractors,
}
from trac.dist import get_l10n_js_cmdclass
extra['cmdclass'] = get_l10n_js_cmdclass()
except ImportError:
pass
setup(
name='TracPortalPlugin',
version='0.2',
author='yosinobu',
author_email='yosinobu@iij.ad.jp',
description='Provide trac portal pages for multiple projects.',
url='https://github.com/iij/TracPortalPlugin',
license='MIT',
packages=find_packages(exclude=['*.tests']),
package_data={
'': ['templates/*', 'screenshot/*.png'],
'tracportal': ['htdocs/*.*', 'htdocs/README', 'htdocs/js/*.*',
'htdocs/js/messages/*.*', 'htdocs/css/*.*', 'htdocs/guide/*',
'locale/*.pot', 'locale/*/LC_MESSAGES/*.po', 'locale/*/LC_MESSAGES/*.mo',
'htdocs/css/smoothness/*.css', 'htdocs/css/smoothness/images/*.*'],
'tracportalopt': []
},
zip_safe=True,
setup_requires=[
'Trac>=0.12',
],
install_requires=[
'Trac>=0.12',
],
extras_require={
'xmlrpc': ['TracXMLRPC>=1.1.5'],
},
entry_points="""
[trac.plugins]
tracportal.api = tracportal.api
tracportal.core = tracportal.core
tracportal.project_list.api = tracportal.project_list.api
tracportal.project_list.web_ui = tracportal.project_list.web_ui
tracportal.upgrade = tracportal.upgrade
tracportal.search.web_ui = tracportal.search.web_ui
tracportal.dashboard.web_ui = tracportal.dashboard.web_ui
tracportal.dashboard.web_api = tracportal.dashboard.web_api
tracportal.project.web_ui = tracportal.project.web_ui
tracportal.i18n = tracportal.i18n
tracportalopt.project.notification = tracportalopt.project.notification
""",
**extra
)
| mit |
mverrilli/kombu | kombu/utils/limits.py | 16 | 2195 | """
kombu.utils.limits
==================
Token bucket implementation for rate limiting.
"""
from __future__ import absolute_import
from kombu.five import monotonic
__all__ = ['TokenBucket']
class TokenBucket(object):
"""Token Bucket Algorithm.
See http://en.wikipedia.org/wiki/Token_Bucket
Most of this code was stolen from an entry in the ASPN Python Cookbook:
http://code.activestate.com/recipes/511490/
.. admonition:: Thread safety
This implementation is not thread safe. Access to a `TokenBucket`
instance should occur within the critical section of any multithreaded
code.
"""
#: The rate in tokens/second that the bucket will be refilled.
fill_rate = None
#: Maximum number of tokens in the bucket.
capacity = 1
#: Timestamp of the last time a token was taken out of the bucket.
timestamp = None
def __init__(self, fill_rate, capacity=1):
self.capacity = float(capacity)
self._tokens = capacity
self.fill_rate = float(fill_rate)
self.timestamp = monotonic()
def can_consume(self, tokens=1):
"""Return :const:`True` if the number of tokens can be consumed
from the bucket. If they can be consumed, a call will also consume the
requested number of tokens from the bucket. Calls will only consume
`tokens` (the number requested) or zero tokens -- it will never consume
a partial number of tokens."""
if tokens <= self._get_tokens():
self._tokens -= tokens
return True
return False
def expected_time(self, tokens=1):
"""Return the time (in seconds) when a new token is expected
to be available. This will not consume any tokens from the bucket."""
_tokens = self._get_tokens()
tokens = max(tokens, _tokens)
return (tokens - _tokens) / self.fill_rate
def _get_tokens(self):
if self._tokens < self.capacity:
now = monotonic()
delta = self.fill_rate * (now - self.timestamp)
self._tokens = min(self.capacity, self._tokens + delta)
self.timestamp = now
return self._tokens
| bsd-3-clause |
lpetrov-pivotal/gpdb | gpMgmt/bin/gppylib/test/behave/mgmt_utils/steps/minirepro_mgmt_utils.py | 18 | 3771 | import os, mmap
from gppylib.test.behave_utils.utils import drop_database_if_exists, drop_table_if_exists
@given('database "{dbname}" does not exist')
def impl(context, dbname):
drop_database_if_exists(context, dbname)
@given('the file "{file_name}" does not exist')
def impl(context, file_name):
if os.path.isfile(file_name):
os.remove(file_name)
@given('the file "{file_name}" exists and contains "{sql_query}"')
def impl(context, file_name, sql_query):
if os.path.isfile(file_name):
os.remove(file_name)
file_dir = os.path.dirname(file_name)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
with open(file_name, 'w') as query_f:
query_f.writelines(sql_query)
@given('the table "{rel_name}" does not exist in database "{db_name}"')
def impl(context, rel_name, db_name):
drop_table_if_exists(context, rel_name, db_name)
@then('minirepro error should contain {output}')
def impl(context, output):
pat = re.compile(output)
if not pat.search(context.error_message):
err_str = "Expected stderr string '%s', but found:\n'%s'" % (output, context.error_message)
raise Exception(err_str)
@then('the output file "{output_file}" should exist')
def impl(context, output_file):
if not os.path.isfile(output_file):
err_str = "The output file '%s' does not exist.\n" % output_file
raise Exception(err_str)
@then('the output file "{output_file}" should contain "{str_before}" before "{str_after}"')
def impl(context, output_file, str_before, str_after):
with open(output_file, 'r') as output_f:
s = mmap.mmap(output_f.fileno(), 0, access=mmap.ACCESS_READ)
pos_before = s.find(str_before)
pos_after = s.find(str_after)
if pos_before == -1:
raise Exception('%s not found.' % str_before)
if pos_after == -1:
raise Exception('%s not found.' % str_after)
if pos_before >= pos_after:
raise Exception('%s not before %s.' % (str_before, str_after))
@then('the output file "{output_file}" should contain "{search_str}"')
def impl(context, output_file, search_str):
with open(output_file, 'r') as output_f:
s = mmap.mmap(output_f.fileno(), 0, access=mmap.ACCESS_READ)
if s.find(search_str) == -1:
raise Exception('%s not found.' % search_str)
@then('the output file "{output_file}" should not contain "{search_str}"')
def impl(context, output_file, search_str):
with open(output_file, 'r') as output_f:
s = mmap.mmap(output_f.fileno(), 0, access=mmap.ACCESS_READ)
if s.find(search_str) != -1:
raise Exception('%s should not exist.' % search_str)
@then('the output file "{output_file}" should be loaded to database "{db_name}" without error')
def impl(context, output_file, db_name):
drop_database_if_exists(context, db_name)
create_database_if_not_exists(context, db_name)
with open(output_file, "r") as fin:
sql_command = fin.read().replace('\\connect ', '--\\connect ')
with open(output_file, "w") as fout:
fout.writelines(sql_command)
run_gpcommand(context, 'psql -d %s -f %s' % (db_name, output_file))
if 'ERROR:' in context.error_message:
raise Exception('Database %s failed to run %s, error message: %s' % (db_name, output_file, context.error_message))
@then('the file "{query_file}" should be executed in database "{db_name}" without error')
def impl(context, query_file, db_name):
run_gpcommand(context, 'psql -d %s -f %s' % (db_name, query_file))
if 'ERROR:' in context.error_message:
raise Exception('Database %s failed to run %s, error message: %s' % (db_name, query_file, context.error_message))
drop_database_if_exists(context, db_name)
| apache-2.0 |
shuggiefisher/potato | django/conf/locale/sk/formats.py | 232 | 1288 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j. F Y G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| bsd-3-clause |
glue-viz/glue-qt | glue/core/tests/test_registry.py | 2 | 1676 | #pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
from ..registry import Registry
def setup_function(function):
Registry().clear()
def test_singleton():
assert Registry() is Registry()
def test_unique():
r = Registry()
assert r.register(3, "test") == "test"
assert r.register(4, "test2") == "test2"
def test_disambiguate():
r = Registry()
assert r.register(3, "test") == "test"
assert r.register(4, "test") == "test_01"
def test_rename():
r = Registry()
assert r.register(3, "test") == "test"
assert r.register(4, "test2") == "test2"
assert r.register(3, "test") == "test"
def test_rename_then_new():
r = Registry()
assert r.register(3, "test") == "test"
assert r.register(3, "test2") == "test2"
assert r.register(4, "test") == "test"
def test_cross_class():
r = Registry()
assert r.register(3, "test") == "test"
assert r.register(3.5, "test") == "test"
assert r.register(4.5, "test") == "test_01"
def test_group_override():
r = Registry()
assert r.register(3, "test") == "test"
assert r.register(3.5, "test", group=int) == "test_01"
assert r.register(4, "test", group=float) == "test"
def test_unregister():
r = Registry()
assert r.register(3, "test") == "test"
r.unregister(3)
assert r.register(4, "test") == "test"
def test_relabel_to_self():
r = Registry()
assert r.register(3, "test") == "test"
assert r.register(3, "test") == "test"
def test_lowest_disambiguation():
r = Registry()
assert r.register(3, "test") == "test"
assert r.register(4, "test") == "test_01"
assert r.register(4, "test") == "test_01"
| bsd-3-clause |
egoid/baytree | lib/python2.7/site-packages/django/contrib/sessions/middleware.py | 129 | 3423 | import time
from importlib import import_module
from django.conf import settings
from django.contrib.sessions.backends.base import UpdateError
from django.core.exceptions import SuspiciousOperation
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import cookie_date
class SessionMiddleware(MiddlewareMixin):
def __init__(self, get_response=None):
self.get_response = get_response
engine = import_module(settings.SESSION_ENGINE)
self.SessionStore = engine.SessionStore
def process_request(self, request):
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
request.session = self.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie or delete
the session cookie if the session has been emptied.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
except AttributeError:
pass
else:
# First check if we need to delete this cookie.
# The session should be deleted only if the session is entirely empty
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN,
)
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
try:
request.session.save()
except UpdateError:
raise SuspiciousOperation(
"The request's session was deleted before the "
"request completed. The user may have logged "
"out in a concurrent request, for example."
)
response.set_cookie(
settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
)
return response
| mit |
3dfxsoftware/cbss-addons | document_ftp/wizard/ftp_configuration.py | 54 | 2210 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools import config
class document_ftp_configuration(osv.osv_memory):
_name='document.ftp.configuration'
_description = 'Auto Directory Configuration'
_inherit = 'res.config'
_rec_name = 'host'
_columns = {
'host': fields.char('Address', size=64,
help="Server address or IP and port to which users should connect to for DMS access",
required=True),
}
_defaults = {
'host': config.get('ftp_server_host', 'localhost') + ':' + config.get('ftp_server_port', '8021'),
}
def execute(self, cr, uid, ids, context=None):
conf = self.browse(cr, uid, ids[0], context=context)
data_pool = self.pool.get('ir.model.data')
# Update the action for FTP browse.
aid = data_pool._get_id(cr, uid, 'document_ftp', 'action_document_browse')
aid = data_pool.browse(cr, uid, aid, context=context).res_id
self.pool.get('ir.actions.act_url').write(cr, uid, [aid],
{'url': 'ftp://'+(conf.host or 'localhost:8021')+'/' + cr.dbname+'/'})
document_ftp_configuration()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 |
obnoxxx/samba | third_party/waf/wafadmin/Tools/gnome.py | 32 | 7744 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2008 (ita)
"Gnome support"
import os, re
import TaskGen, Utils, Runner, Task, Build, Options, Logs
import cc
from Logs import error
from TaskGen import taskgen, before, after, feature
n1_regexp = re.compile('<refentrytitle>(.*)</refentrytitle>', re.M)
n2_regexp = re.compile('<manvolnum>(.*)</manvolnum>', re.M)
def postinstall_schemas(prog_name):
if Build.bld.is_install:
dir = Build.bld.get_install_path('${PREFIX}/etc/gconf/schemas/%s.schemas' % prog_name)
if not Options.options.destdir:
# add the gconf schema
Utils.pprint('YELLOW', 'Installing GConf schema')
command = 'gconftool-2 --install-schema-file=%s 1> /dev/null' % dir
ret = Utils.exec_command(command)
else:
Utils.pprint('YELLOW', 'GConf schema not installed. After install, run this:')
Utils.pprint('YELLOW', 'gconftool-2 --install-schema-file=%s' % dir)
def postinstall_icons():
dir = Build.bld.get_install_path('${DATADIR}/icons/hicolor')
if Build.bld.is_install:
if not Options.options.destdir:
# update the pixmap cache directory
Utils.pprint('YELLOW', "Updating Gtk icon cache.")
command = 'gtk-update-icon-cache -q -f -t %s' % dir
ret = Utils.exec_command(command)
else:
Utils.pprint('YELLOW', 'Icon cache not updated. After install, run this:')
Utils.pprint('YELLOW', 'gtk-update-icon-cache -q -f -t %s' % dir)
def postinstall_scrollkeeper(prog_name):
if Build.bld.is_install:
# now the scrollkeeper update if we can write to the log file
if os.access('/var/log/scrollkeeper.log', os.W_OK):
dir1 = Build.bld.get_install_path('${PREFIX}/var/scrollkeeper')
dir2 = Build.bld.get_install_path('${DATADIR}/omf/%s' % prog_name)
command = 'scrollkeeper-update -q -p %s -o %s' % (dir1, dir2)
ret = Utils.exec_command(command)
def postinstall(prog_name='myapp', schemas=1, icons=1, scrollkeeper=1):
if schemas: postinstall_schemas(prog_name)
if icons: postinstall_icons()
if scrollkeeper: postinstall_scrollkeeper(prog_name)
# OBSOLETE
class gnome_doc_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('gnome_doc')
def init_gnome_doc(self):
self.default_install_path = '${PREFIX}/share'
@feature('gnome_doc')
@after('init_gnome_doc')
def apply_gnome_doc(self):
self.env['APPNAME'] = self.doc_module
lst = self.to_list(self.doc_linguas)
bld = self.bld
lst.append('C')
for x in lst:
if not x == 'C':
tsk = self.create_task('xml2po')
node = self.path.find_resource(x+'/'+x+'.po')
src = self.path.find_resource('C/%s.xml' % self.doc_module)
out = self.path.find_or_declare('%s/%s.xml' % (x, self.doc_module))
tsk.set_inputs([node, src])
tsk.set_outputs(out)
else:
out = self.path.find_resource('%s/%s.xml' % (x, self.doc_module))
tsk2 = self.create_task('xsltproc2po')
out2 = self.path.find_or_declare('%s/%s-%s.omf' % (x, self.doc_module, x))
tsk2.set_outputs(out2)
node = self.path.find_resource(self.doc_module+".omf.in")
tsk2.inputs = [node, out]
tsk2.run_after.append(tsk)
if bld.is_install:
path = self.install_path + '/gnome/help/%s/%s' % (self.doc_module, x)
bld.install_files(self.install_path + '/omf', out2, env=self.env)
for y in self.to_list(self.doc_figures):
try:
os.stat(self.path.abspath() + '/' + x + '/' + y)
bld.install_as(path + '/' + y, self.path.abspath() + '/' + x + '/' + y)
except:
bld.install_as(path + '/' + y, self.path.abspath() + '/C/' + y)
bld.install_as(path + '/%s.xml' % self.doc_module, out.abspath(self.env))
if x == 'C':
xmls = self.to_list(self.doc_includes)
xmls.append(self.doc_entities)
for z in xmls:
out = self.path.find_resource('%s/%s' % (x, z))
bld.install_as(path + '/%s' % z, out.abspath(self.env))
# OBSOLETE
class xml_to_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('xml_to')
def init_xml_to(self):
Utils.def_attrs(self,
source = 'xmlfile',
xslt = 'xlsltfile',
target = 'hey',
default_install_path = '${PREFIX}',
task_created = None)
@feature('xml_to')
@after('init_xml_to')
def apply_xml_to(self):
xmlfile = self.path.find_resource(self.source)
xsltfile = self.path.find_resource(self.xslt)
tsk = self.create_task('xmlto', [xmlfile, xsltfile], xmlfile.change_ext('html'))
tsk.install_path = self.install_path
def sgml_scan(self):
node = self.inputs[0]
env = self.env
variant = node.variant(env)
fi = open(node.abspath(env), 'r')
content = fi.read()
fi.close()
# we should use a sgml parser :-/
name = n1_regexp.findall(content)[0]
num = n2_regexp.findall(content)[0]
doc_name = name+'.'+num
if not self.outputs:
self.outputs = [self.generator.path.find_or_declare(doc_name)]
return ([], [doc_name])
class gnome_sgml2man_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('gnome_sgml2man')
def apply_gnome_sgml2man(self):
"""
we could make it more complicated, but for now we just scan the document each time
"""
assert(getattr(self, 'appname', None))
def install_result(task):
out = task.outputs[0]
name = out.name
ext = name[-1]
env = task.env
self.bld.install_files('${DATADIR}/man/man%s/' % ext, out, env)
self.bld.rescan(self.path)
for name in self.bld.cache_dir_contents[self.path.id]:
base, ext = os.path.splitext(name)
if ext != '.sgml': continue
task = self.create_task('sgml2man')
task.set_inputs(self.path.find_resource(name))
task.task_generator = self
if self.bld.is_install: task.install = install_result
# no outputs, the scanner does it
# no caching for now, this is not a time-critical feature
# in the future the scanner can be used to do more things (find dependencies, etc)
task.scan()
cls = Task.simple_task_type('sgml2man', '${SGML2MAN} -o ${TGT[0].bld_dir(env)} ${SRC} > /dev/null', color='BLUE')
cls.scan = sgml_scan
cls.quiet = 1
Task.simple_task_type('xmlto', '${XMLTO} html -m ${SRC[1].abspath(env)} ${SRC[0].abspath(env)}')
Task.simple_task_type('xml2po', '${XML2PO} ${XML2POFLAGS} ${SRC} > ${TGT}', color='BLUE')
# how do you expect someone to understand this?!
xslt_magic = """${XSLTPROC2PO} -o ${TGT[0].abspath(env)} \
--stringparam db2omf.basename ${APPNAME} \
--stringparam db2omf.format docbook \
--stringparam db2omf.lang ${TGT[0].abspath(env)[:-4].split('-')[-1]} \
--stringparam db2omf.dtd '-//OASIS//DTD DocBook XML V4.3//EN' \
--stringparam db2omf.omf_dir ${PREFIX}/share/omf \
--stringparam db2omf.help_dir ${PREFIX}/share/gnome/help \
--stringparam db2omf.omf_in ${SRC[0].abspath(env)} \
--stringparam db2omf.scrollkeeper_cl ${SCROLLKEEPER_DATADIR}/Templates/C/scrollkeeper_cl.xml \
${DB2OMF} ${SRC[1].abspath(env)}"""
#--stringparam db2omf.dtd '-//OASIS//DTD DocBook XML V4.3//EN' \
Task.simple_task_type('xsltproc2po', xslt_magic, color='BLUE')
def detect(conf):
conf.check_tool('gnu_dirs glib2 dbus')
sgml2man = conf.find_program('docbook2man', var='SGML2MAN')
def getstr(varname):
return getattr(Options.options, varname, '')
# addefine also sets the variable to the env
conf.define('GNOMELOCALEDIR', os.path.join(conf.env['DATADIR'], 'locale'))
xml2po = conf.find_program('xml2po', var='XML2PO')
xsltproc2po = conf.find_program('xsltproc', var='XSLTPROC2PO')
conf.env['XML2POFLAGS'] = '-e -p'
conf.env['SCROLLKEEPER_DATADIR'] = Utils.cmd_output("scrollkeeper-config --pkgdatadir", silent=1).strip()
conf.env['DB2OMF'] = Utils.cmd_output("/usr/bin/pkg-config --variable db2omf gnome-doc-utils", silent=1).strip()
def set_options(opt):
opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]')
| gpl-3.0 |
skyoo/jumpserver | apps/assets/filters.py | 2 | 4717 | # -*- coding: utf-8 -*-
#
from rest_framework.compat import coreapi, coreschema
from rest_framework import filters
from django.db.models import Q
from .models import Label
from assets.utils import is_query_node_all_assets, get_node
class AssetByNodeFilterBackend(filters.BaseFilterBackend):
fields = ['node', 'all']
def get_schema_fields(self, view):
return [
coreapi.Field(
name=field, location='query', required=False,
type='string', example='', description='', schema=None,
)
for field in self.fields
]
def filter_node_related_all(self, queryset, node):
return queryset.filter(
Q(nodes__key__istartswith=f'{node.key}:') |
Q(nodes__key=node.key)
).distinct()
def filter_node_related_direct(self, queryset, node):
return queryset.filter(nodes__key=node.key).distinct()
def filter_queryset(self, request, queryset, view):
node = get_node(request)
if node is None:
return queryset
query_all = is_query_node_all_assets(request)
if query_all:
return self.filter_node_related_all(queryset, node)
else:
return self.filter_node_related_direct(queryset, node)
class FilterAssetByNodeFilterBackend(filters.BaseFilterBackend):
"""
需要与 `assets.api.mixin.FilterAssetByNodeMixin` 配合使用
"""
fields = ['node', 'all']
def get_schema_fields(self, view):
return [
coreapi.Field(
name=field, location='query', required=False,
type='string', example='', description='', schema=None,
)
for field in self.fields
]
def filter_queryset(self, request, queryset, view):
node = view.node
if node is None:
return queryset
query_all = view.is_query_node_all_assets
if query_all:
return queryset.filter(
Q(nodes__key__istartswith=f'{node.key}:') |
Q(nodes__key=node.key)
).distinct()
else:
return queryset.filter(nodes__key=node.key).distinct()
class LabelFilterBackend(filters.BaseFilterBackend):
sep = ':'
query_arg = 'label'
def get_schema_fields(self, view):
example = self.sep.join(['os', 'linux'])
return [
coreapi.Field(
name=self.query_arg, location='query', required=False,
type='string', example=example, description=''
)
]
def get_query_labels(self, request):
labels_query = request.query_params.getlist(self.query_arg)
if not labels_query:
return None
q = None
for kv in labels_query:
if '#' in kv:
self.sep = '#'
if self.sep not in kv:
continue
key, value = kv.strip().split(self.sep)[:2]
if not all([key, value]):
continue
if q:
q |= Q(name=key, value=value)
else:
q = Q(name=key, value=value)
if not q:
return []
labels = Label.objects.filter(q, is_active=True)\
.values_list('id', flat=True)
return labels
def filter_queryset(self, request, queryset, view):
labels = self.get_query_labels(request)
if labels is None:
return queryset
if len(labels) == 0:
return queryset.none()
for label in labels:
queryset = queryset.filter(labels=label)
return queryset
class AssetRelatedByNodeFilterBackend(AssetByNodeFilterBackend):
def filter_node_related_all(self, queryset, node):
return queryset.filter(
Q(asset__nodes__key__istartswith=f'{node.key}:') |
Q(asset__nodes__key=node.key)
).distinct()
def filter_node_related_direct(self, queryset, node):
return queryset.filter(asset__nodes__key=node.key).distinct()
class IpInFilterBackend(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
ips = request.query_params.get('ips')
if not ips:
return queryset
ip_list = [i.strip() for i in ips.split(',')]
queryset = queryset.filter(ip__in=ip_list)
return queryset
def get_schema_fields(self, view):
return [
coreapi.Field(
name='ips', location='query', required=False, type='string',
schema=coreschema.String(
title='ips',
description='ip in filter'
)
)
]
| gpl-2.0 |
huxh10/iSDX | flanc/ofdpa20.py | 2 | 8033 | # Author:
# Rick Porter (Applied Communication Sciences)
import os
import sys
np = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if np not in sys.path:
sys.path.append(np)
import util.log
LOG = True
class OFDPA20():
__shared_state = {}
def __init__(self, config):
self.__dict__ = self.__shared_state # makes this a singleton
if hasattr(self,'config'):
# shared instance already initialized
return
self.config = config
self.switch_info = {}
def get_table_id(self):
return 60
def make_instructions_and_group_mods(self, fm, datapath):
if not datapath.id in self.switch_info:
self.switch_info[datapath.id] = OFDPA20_switch(self.config, datapath)
return self.switch_info[datapath.id].make_instructions_and_group_mods(fm)
def is_group_mod_installed_in_switch(self, datapath, group_mod):
return self.switch_info[datapath.id].is_group_mod_installed_in_switch(group_mod)
def mark_group_mod_as_installed(self, datapath, group_mod):
self.switch_info[datapath.id].mark_group_mod_as_installed(group_mod)
class OFDPA20_switch():
def __init__(self, config, datapath):
self.config = config
self.datapath = datapath
self.config = config
self.logger = util.log.getLogger('OFDPA20')
self.log_info = ' Switch: ' + self.config.dpid_2_name[datapath.id] + ' (' + str(datapath.id) + ')';
self.logger.info('Init OFDPA_switch: ' + self.log_info)
self.vlan = 1 # untagged inputs go on vlan 1
self.l2_rewrite_to_gid = {} # mapping from src/dst rewrite string to unique ID
self.l2_rewrite_uniq = 0
self.l2_multicast_to_gid = {} # mapping from mcast tuple of port #'s to unique ID
self.l2_multicast_uniq = 0
self.gid_to_group_mod = {}
self.installed_group_mods = set()
def make_instructions_and_group_mods(self, fm):
fwd_ports = []
eth_src = None
eth_dst = None
group_mods = []
for action, value in fm.actions.iteritems():
if action == "fwd":
if self.config.isMultiTableMode():
self.logger.error('ERROR: OFDPA does not support multi table mode')
elif self.config.isMultiSwitchMode():
for port in value:
if isinstance( port, int ) or port.isdigit():
fwd_ports.append(int(port))
else:
if port in self.config.dp_alias:
port = self.config.dp_alias[port]
fwd_ports.append(self.config.datapath_ports[fm.rule_type][port])
elif self.config.isOneSwitchMode():
for port in value:
if isinstance( port, int ) or port.isdigit():
fwd_ports.append(int(port))
elif port in self.config.loops:
fwd_ports.append(self.config.loops[port][0])
elif port in self.config.datapath_ports["main"]:
fwd_ports.append(self.config.datapath_ports["main"][port])
elif port in self.config.datapath_ports["arp"]:
fwd_ports.append(self.config.datapath_ports["arp"][port])
elif action == "set_eth_src":
eth_src = value
elif action == "set_eth_dst":
eth_dst = value
else:
self.logger.error('Unhandled action: ' + action + self.log_info)
if fwd_ports:
for port in fwd_ports:
self.logger.debug("making l2 ifc group" + self.log_info)
group_mods.append(self.make_l2_interface_group_mod(fm, port))
else:
self.logger.warning('No forward action, so match will result in drop' + self.log_info)
if eth_src or eth_dst:
if len(fwd_ports) > 1:
self.logger.error('Multicast not supported in combination with MAC rewrite - ignoring all but first port' + self.log_info)
group_mods.append(self.make_l2_rewrite_group_mod(fm, fwd_ports[0], eth_src,eth_dst))
group_actions = [fm.parser.OFPActionGroup(group_id=self.l2_rewrite_group_id(fwd_ports[0],eth_src,eth_dst))]
elif len(fwd_ports) == 1:
group_actions = [fm.parser.OFPActionGroup(group_id=self.l2_interface_group_id(fwd_ports[0]))]
elif len(fwd_ports) > 1:
group_mods.append(self.make_l2_multicast_group_mod(fm, fwd_ports))
group_actions = [fm.parser.OFPActionGroup(group_id=self.l2_multicast_group_id(fwd_ports))]
elif len(fwd_ports) == 0:
# drop
group_actions = []
else:
self.logger.error("Unreachable code (I thought)!" + self.log_info)
instructions = [fm.parser.OFPInstructionActions(self.config.ofproto.OFPIT_APPLY_ACTIONS, group_actions)]
return (instructions, group_mods)
def make_group_mod(self, fm, gid, actions):
if gid in self.gid_to_group_mod:
# only ever create one GroupMod object for a gid
return self.gid_to_group_mod[gid]
buckets = [fm.parser.OFPBucket(actions=actions)]
group_mod = fm.parser.OFPGroupMod(datapath=self.datapath,
command=self.config.ofproto.OFPGC_ADD,
type_=self.config.ofproto.OFPGT_INDIRECT,
group_id=gid,
buckets=buckets)
self.gid_to_group_mod[gid] = group_mod
return group_mod
# L2 Interface Group stuff
def make_l2_interface_group_mod(self, fm, port):
actions = [fm.parser.OFPActionOutput(port),
fm.parser.OFPActionPopVlan()]
return self.make_group_mod(fm, self.l2_interface_group_id(port), actions)
def l2_interface_group_id(self, port):
return (self.vlan << 16) + (port & 0xffff)
# L2 Multicast Group stuff
def make_l2_multicast_group_mod(self, fm, ports):
actions = []
for port in ports:
actions.append(fm.parser.OFPActionGroup(group_id=self.l2_interface_group_id(port)))
return self.make_group_mod(fm, self.l2_multicast_group_id(ports), actions)
def l2_multicast_group_id(self, ports):
mcast_key = tuple(sorted(ports))
if not mcast_key in self.l2_multicast_to_gid:
self.l2_multicast_to_gid[mcast_key] = 0x30000000 | (self.vlan << 16) | (self.l2_multicast_uniq & 0xffff)
self.l2_multicast_uniq += 1
return self.l2_multicast_to_gid[mcast_key]
# L2 Rewrite Group stuff
def make_l2_rewrite_group_mod(self, fm, port, eth_src, eth_dst):
actions = [fm.parser.OFPActionGroup(group_id=self.l2_interface_group_id(port))]
if eth_src:
actions.append(fm.parser.OFPActionSetField(eth_src=eth_src))
if eth_dst:
actions.append(fm.parser.OFPActionSetField(eth_dst=eth_dst))
return self.make_group_mod(fm, self.l2_rewrite_group_id(port, eth_src, eth_dst), actions)
def l2_rewrite_group_id(self, port, eth_src, eth_dst):
rewrite_key = "port: " + str(port) + ('' if not eth_src else " eth_src: " + eth_src) + ('' if not eth_dst else " eth_dst: " + eth_dst)
if not rewrite_key in self.l2_rewrite_to_gid:
self.l2_rewrite_to_gid[rewrite_key] = (1 << 28) | (self.l2_rewrite_uniq & 0xffff)
self.l2_rewrite_uniq += 1
return self.l2_rewrite_to_gid[rewrite_key]
def is_group_mod_installed_in_switch(self, group_mod):
return group_mod in self.installed_group_mods
def mark_group_mod_as_installed(self, group_mod):
self.logger.info('Group mod installed ' + str(group_mod) + self.log_info)
self.installed_group_mods.add(group_mod)
| apache-2.0 |
proxysh/Safejumper-for-Desktop | buildlinux/env32/lib/python2.7/site-packages/twisted/python/failure.py | 11 | 23460 | # -*- test-case-name: twisted.test.test_failure -*-
# See also test suite twisted.test.test_pbfailure
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Asynchronous-friendly error mechanism.
See L{Failure}.
"""
from __future__ import division, absolute_import, print_function
# System Imports
import sys
import linecache
import inspect
import opcode
from inspect import getmro
from twisted.python.compat import _PY3, NativeStringIO as StringIO
from twisted.python import reflect
from twisted.python._oldstyle import _oldStyle, _shouldEnableNewStyle
count = 0
traceupLength = 4
class DefaultException(Exception):
pass
def format_frames(frames, write, detail="default"):
"""Format and write frames.
@param frames: is a list of frames as used by Failure.frames, with
each frame being a list of
(funcName, fileName, lineNumber, locals.items(), globals.items())
@type frames: list
@param write: this will be called with formatted strings.
@type write: callable
@param detail: Four detail levels are available:
default, brief, verbose, and verbose-vars-not-captured.
C{Failure.printDetailedTraceback} uses the latter when the caller asks
for verbose, but no vars were captured, so that an explicit warning
about the missing data is shown.
@type detail: string
"""
if detail not in ('default', 'brief', 'verbose',
'verbose-vars-not-captured'):
raise ValueError(
"Detail must be default, brief, verbose, or "
"verbose-vars-not-captured. (not %r)" % (detail,))
w = write
if detail == "brief":
for method, filename, lineno, localVars, globalVars in frames:
w('%s:%s:%s\n' % (filename, lineno, method))
elif detail == "default":
for method, filename, lineno, localVars, globalVars in frames:
w( ' File "%s", line %s, in %s\n' % (filename, lineno, method))
w( ' %s\n' % linecache.getline(filename, lineno).strip())
elif detail == "verbose-vars-not-captured":
for method, filename, lineno, localVars, globalVars in frames:
w("%s:%d: %s(...)\n" % (filename, lineno, method))
w(' [Capture of Locals and Globals disabled (use captureVars=True)]\n')
elif detail == "verbose":
for method, filename, lineno, localVars, globalVars in frames:
w("%s:%d: %s(...)\n" % (filename, lineno, method))
w(' [ Locals ]\n')
# Note: the repr(val) was (self.pickled and val) or repr(val)))
for name, val in localVars:
w(" %s : %s\n" % (name, repr(val)))
w(' ( Globals )\n')
for name, val in globalVars:
w(" %s : %s\n" % (name, repr(val)))
# slyphon: i have a need to check for this value in trial
# so I made it a module-level constant
EXCEPTION_CAUGHT_HERE = "--- <exception caught here> ---"
class NoCurrentExceptionError(Exception):
"""
Raised when trying to create a Failure from the current interpreter
exception state and there is no current exception state.
"""
class _Traceback(object):
"""
Fake traceback object which can be passed to functions in the standard
library L{traceback} module.
"""
def __init__(self, frames):
"""
Construct a fake traceback object using a list of frames. Note that
although frames generally include locals and globals, this information
is not kept by this object, since locals and globals are not used in
standard tracebacks.
@param frames: [(methodname, filename, lineno, locals, globals), ...]
"""
assert len(frames) > 0, "Must pass some frames"
head, frames = frames[0], frames[1:]
name, filename, lineno, localz, globalz = head
self.tb_frame = _Frame(name, filename)
self.tb_lineno = lineno
if len(frames) == 0:
self.tb_next = None
else:
self.tb_next = _Traceback(frames)
class _Frame(object):
"""
A fake frame object, used by L{_Traceback}.
@ivar f_code: fake L{code<types.CodeType>} object
@ivar f_globals: fake f_globals dictionary (usually empty)
@ivar f_locals: fake f_locals dictionary (usually empty)
"""
def __init__(self, name, filename):
"""
@param name: method/function name for this frame.
@type name: C{str}
@param filename: filename for this frame.
@type name: C{str}
"""
self.f_code = _Code(name, filename)
self.f_globals = {}
self.f_locals = {}
class _Code(object):
"""
A fake code object, used by L{_Traceback} via L{_Frame}.
"""
def __init__(self, name, filename):
self.co_name = name
self.co_filename = filename
@_oldStyle
class Failure:
"""
A basic abstraction for an error that has occurred.
This is necessary because Python's built-in error mechanisms are
inconvenient for asynchronous communication.
The C{stack} and C{frame} attributes contain frames. Each frame is a tuple
of (funcName, fileName, lineNumber, localsItems, globalsItems), where
localsItems and globalsItems are the contents of
C{locals().items()}/C{globals().items()} for that frame, or an empty tuple
if those details were not captured.
@ivar value: The exception instance responsible for this failure.
@ivar type: The exception's class.
@ivar stack: list of frames, innermost last, excluding C{Failure.__init__}.
@ivar frames: list of frames, innermost first.
"""
pickled = 0
stack = None
# The opcode of "yield" in Python bytecode. We need this in _findFailure in
# order to identify whether an exception was thrown by a
# throwExceptionIntoGenerator.
_yieldOpcode = chr(opcode.opmap["YIELD_VALUE"])
def __init__(self, exc_value=None, exc_type=None, exc_tb=None,
captureVars=False):
"""
Initialize me with an explanation of the error.
By default, this will use the current C{exception}
(L{sys.exc_info}()). However, if you want to specify a
particular kind of failure, you can pass an exception as an
argument.
If no C{exc_value} is passed, then an "original" C{Failure} will
be searched for. If the current exception handler that this
C{Failure} is being constructed in is handling an exception
raised by L{raiseException}, then this C{Failure} will act like
the original C{Failure}.
For C{exc_tb} only L{traceback} instances or L{None} are allowed.
If L{None} is supplied for C{exc_value}, the value of C{exc_tb} is
ignored, otherwise if C{exc_tb} is L{None}, it will be found from
execution context (ie, L{sys.exc_info}).
@param captureVars: if set, capture locals and globals of stack
frames. This is pretty slow, and makes no difference unless you
are going to use L{printDetailedTraceback}.
"""
global count
count = count + 1
self.count = count
self.type = self.value = tb = None
self.captureVars = captureVars
if isinstance(exc_value, str) and exc_type is None:
raise TypeError("Strings are not supported by Failure")
stackOffset = 0
if exc_value is None:
exc_value = self._findFailure()
if exc_value is None:
self.type, self.value, tb = sys.exc_info()
if self.type is None:
raise NoCurrentExceptionError()
stackOffset = 1
elif exc_type is None:
if isinstance(exc_value, Exception):
self.type = exc_value.__class__
else: #allow arbitrary objects.
self.type = type(exc_value)
self.value = exc_value
else:
self.type = exc_type
self.value = exc_value
if isinstance(self.value, Failure):
self.__dict__ = self.value.__dict__
return
if tb is None:
if exc_tb:
tb = exc_tb
elif _PY3:
if hasattr(self.value, "__traceback__"):
tb = self.value.__traceback__
frames = self.frames = []
stack = self.stack = []
# added 2003-06-23 by Chris Armstrong. Yes, I actually have a
# use case where I need this traceback object, and I've made
# sure that it'll be cleaned up.
self.tb = tb
if tb:
f = tb.tb_frame
elif not isinstance(self.value, Failure):
# we don't do frame introspection since it's expensive,
# and if we were passed a plain exception with no
# traceback, it's not useful anyway
f = stackOffset = None
while stackOffset and f:
# This excludes this Failure.__init__ frame from the
# stack, leaving it to start with our caller instead.
f = f.f_back
stackOffset -= 1
# Keeps the *full* stack. Formerly in spread.pb.print_excFullStack:
#
# The need for this function arises from the fact that several
# PB classes have the peculiar habit of discarding exceptions
# with bareword "except:"s. This premature exception
# catching means tracebacks generated here don't tend to show
# what called upon the PB object.
while f:
if captureVars:
localz = f.f_locals.copy()
if f.f_locals is f.f_globals:
globalz = {}
else:
globalz = f.f_globals.copy()
for d in globalz, localz:
if "__builtins__" in d:
del d["__builtins__"]
localz = localz.items()
globalz = globalz.items()
else:
localz = globalz = ()
stack.insert(0, (
f.f_code.co_name,
f.f_code.co_filename,
f.f_lineno,
localz,
globalz,
))
f = f.f_back
while tb is not None:
f = tb.tb_frame
if captureVars:
localz = f.f_locals.copy()
if f.f_locals is f.f_globals:
globalz = {}
else:
globalz = f.f_globals.copy()
for d in globalz, localz:
if "__builtins__" in d:
del d["__builtins__"]
localz = list(localz.items())
globalz = list(globalz.items())
else:
localz = globalz = ()
frames.append((
f.f_code.co_name,
f.f_code.co_filename,
tb.tb_lineno,
localz,
globalz,
))
tb = tb.tb_next
if inspect.isclass(self.type) and issubclass(self.type, Exception):
parentCs = getmro(self.type)
self.parents = list(map(reflect.qual, parentCs))
else:
self.parents = [self.type]
def trap(self, *errorTypes):
"""Trap this failure if its type is in a predetermined list.
This allows you to trap a Failure in an error callback. It will be
automatically re-raised if it is not a type that you expect.
The reason for having this particular API is because it's very useful
in Deferred errback chains::
def _ebFoo(self, failure):
r = failure.trap(Spam, Eggs)
print('The Failure is due to either Spam or Eggs!')
if r == Spam:
print('Spam did it!')
elif r == Eggs:
print('Eggs did it!')
If the failure is not a Spam or an Eggs, then the Failure will be
'passed on' to the next errback. In Python 2 the Failure will be
raised; in Python 3 the underlying exception will be re-raised.
@type errorTypes: L{Exception}
"""
error = self.check(*errorTypes)
if not error:
if _shouldEnableNewStyle:
self.raiseException()
else:
raise self
return error
def check(self, *errorTypes):
"""Check if this failure's type is in a predetermined list.
@type errorTypes: list of L{Exception} classes or
fully-qualified class names.
@returns: the matching L{Exception} type, or None if no match.
"""
for error in errorTypes:
err = error
if inspect.isclass(error) and issubclass(error, Exception):
err = reflect.qual(error)
if err in self.parents:
return error
return None
# It would be nice to use twisted.python.compat.reraise, but that breaks
# the stack exploration in _findFailure; possibly this can be fixed in
# #5931.
if _PY3:
def raiseException(self):
raise self.value.with_traceback(self.tb)
else:
exec("""def raiseException(self):
raise self.type, self.value, self.tb""")
raiseException.__doc__ = (
"""
raise the original exception, preserving traceback
information if available.
""")
def throwExceptionIntoGenerator(self, g):
"""
Throw the original exception into the given generator,
preserving traceback information if available.
@return: The next value yielded from the generator.
@raise StopIteration: If there are no more values in the generator.
@raise anything else: Anything that the generator raises.
"""
return g.throw(self.type, self.value, self.tb)
def _findFailure(cls):
"""
Find the failure that represents the exception currently in context.
"""
tb = sys.exc_info()[-1]
if not tb:
return
secondLastTb = None
lastTb = tb
while lastTb.tb_next:
secondLastTb = lastTb
lastTb = lastTb.tb_next
lastFrame = lastTb.tb_frame
# NOTE: f_locals.get('self') is used rather than
# f_locals['self'] because psyco frames do not contain
# anything in their locals() dicts. psyco makes debugging
# difficult anyhow, so losing the Failure objects (and thus
# the tracebacks) here when it is used is not that big a deal.
# handle raiseException-originated exceptions
if lastFrame.f_code is cls.raiseException.__code__:
return lastFrame.f_locals.get('self')
# handle throwExceptionIntoGenerator-originated exceptions
# this is tricky, and differs if the exception was caught
# inside the generator, or above it:
# it is only really originating from
# throwExceptionIntoGenerator if the bottom of the traceback
# is a yield.
# Pyrex and Cython extensions create traceback frames
# with no co_code, but they can't yield so we know it's okay to just return here.
if ((not lastFrame.f_code.co_code) or
lastFrame.f_code.co_code[lastTb.tb_lasti] != cls._yieldOpcode):
return
# if the exception was caught above the generator.throw
# (outside the generator), it will appear in the tb (as the
# second last item):
if secondLastTb:
frame = secondLastTb.tb_frame
if frame.f_code is cls.throwExceptionIntoGenerator.__code__:
return frame.f_locals.get('self')
# if the exception was caught below the generator.throw
# (inside the generator), it will appear in the frames' linked
# list, above the top-level traceback item (which must be the
# generator frame itself, thus its caller is
# throwExceptionIntoGenerator).
frame = tb.tb_frame.f_back
if frame and frame.f_code is cls.throwExceptionIntoGenerator.__code__:
return frame.f_locals.get('self')
_findFailure = classmethod(_findFailure)
def __repr__(self):
return "<%s %s: %s>" % (reflect.qual(self.__class__),
reflect.qual(self.type),
self.getErrorMessage())
def __str__(self):
return "[Failure instance: %s]" % self.getBriefTraceback()
def __getstate__(self):
"""Avoid pickling objects in the traceback.
"""
if self.pickled:
return self.__dict__
c = self.__dict__.copy()
c['frames'] = [
[
v[0], v[1], v[2],
_safeReprVars(v[3]),
_safeReprVars(v[4]),
] for v in self.frames
]
# added 2003-06-23. See comment above in __init__
c['tb'] = None
if self.stack is not None:
# XXX: This is a band-aid. I can't figure out where these
# (failure.stack is None) instances are coming from.
c['stack'] = [
[
v[0], v[1], v[2],
_safeReprVars(v[3]),
_safeReprVars(v[4]),
] for v in self.stack
]
c['pickled'] = 1
return c
def cleanFailure(self):
"""
Remove references to other objects, replacing them with strings.
On Python 3, this will also set the C{__traceback__} attribute of the
exception instance to L{None}.
"""
self.__dict__ = self.__getstate__()
if _PY3:
self.value.__traceback__ = None
def getTracebackObject(self):
"""
Get an object that represents this Failure's stack that can be passed
to traceback.extract_tb.
If the original traceback object is still present, return that. If this
traceback object has been lost but we still have the information,
return a fake traceback object (see L{_Traceback}). If there is no
traceback information at all, return None.
"""
if self.tb is not None:
return self.tb
elif len(self.frames) > 0:
return _Traceback(self.frames)
else:
return None
def getErrorMessage(self):
"""Get a string of the exception which caused this Failure."""
if isinstance(self.value, Failure):
return self.value.getErrorMessage()
return reflect.safe_str(self.value)
def getBriefTraceback(self):
io = StringIO()
self.printBriefTraceback(file=io)
return io.getvalue()
def getTraceback(self, elideFrameworkCode=0, detail='default'):
io = StringIO()
self.printTraceback(file=io, elideFrameworkCode=elideFrameworkCode, detail=detail)
return io.getvalue()
def printTraceback(self, file=None, elideFrameworkCode=False, detail='default'):
"""
Emulate Python's standard error reporting mechanism.
@param file: If specified, a file-like object to which to write the
traceback.
@param elideFrameworkCode: A flag indicating whether to attempt to
remove uninteresting frames from within Twisted itself from the
output.
@param detail: A string indicating how much information to include
in the traceback. Must be one of C{'brief'}, C{'default'}, or
C{'verbose'}.
"""
if file is None:
from twisted.python import log
file = log.logerr
w = file.write
if detail == 'verbose' and not self.captureVars:
# We don't have any locals or globals, so rather than show them as
# empty make the output explicitly say that we don't have them at
# all.
formatDetail = 'verbose-vars-not-captured'
else:
formatDetail = detail
# Preamble
if detail == 'verbose':
w( '*--- Failure #%d%s---\n' %
(self.count,
(self.pickled and ' (pickled) ') or ' '))
elif detail == 'brief':
if self.frames:
hasFrames = 'Traceback'
else:
hasFrames = 'Traceback (failure with no frames)'
w("%s: %s: %s\n" % (
hasFrames,
reflect.safe_str(self.type),
reflect.safe_str(self.value)))
else:
w( 'Traceback (most recent call last):\n')
# Frames, formatted in appropriate style
if self.frames:
if not elideFrameworkCode:
format_frames(self.stack[-traceupLength:], w, formatDetail)
w("%s\n" % (EXCEPTION_CAUGHT_HERE,))
format_frames(self.frames, w, formatDetail)
elif not detail == 'brief':
# Yeah, it's not really a traceback, despite looking like one...
w("Failure: ")
# postamble, if any
if not detail == 'brief':
w("%s: %s\n" % (reflect.qual(self.type),
reflect.safe_str(self.value)))
# chaining
if isinstance(self.value, Failure):
# TODO: indentation for chained failures?
file.write(" (chained Failure)\n")
self.value.printTraceback(file, elideFrameworkCode, detail)
if detail == 'verbose':
w('*--- End of Failure #%d ---\n' % self.count)
def printBriefTraceback(self, file=None, elideFrameworkCode=0):
"""Print a traceback as densely as possible.
"""
self.printTraceback(file, elideFrameworkCode, detail='brief')
def printDetailedTraceback(self, file=None, elideFrameworkCode=0):
"""Print a traceback with detailed locals and globals information.
"""
self.printTraceback(file, elideFrameworkCode, detail='verbose')
def _safeReprVars(varsDictItems):
"""
Convert a list of (name, object) pairs into (name, repr) pairs.
L{twisted.python.reflect.safe_repr} is used to generate the repr, so no
exceptions will be raised by faulty C{__repr__} methods.
@param varsDictItems: a sequence of (name, value) pairs as returned by e.g.
C{locals().items()}.
@returns: a sequence of (name, repr) pairs.
"""
return [(name, reflect.safe_repr(obj)) for (name, obj) in varsDictItems]
# slyphon: make post-morteming exceptions tweakable
DO_POST_MORTEM = True
def _debuginit(self, exc_value=None, exc_type=None, exc_tb=None,
captureVars=False,
Failure__init__=Failure.__init__):
"""
Initialize failure object, possibly spawning pdb.
"""
if (exc_value, exc_type, exc_tb) == (None, None, None):
exc = sys.exc_info()
if not exc[0] == self.__class__ and DO_POST_MORTEM:
try:
strrepr = str(exc[1])
except:
strrepr = "broken str"
print("Jumping into debugger for post-mortem of exception '%s':" % (strrepr,))
import pdb
pdb.post_mortem(exc[2])
Failure__init__(self, exc_value, exc_type, exc_tb, captureVars)
def startDebugMode():
"""Enable debug hooks for Failures."""
Failure.__init__ = _debuginit
| gpl-2.0 |
oe-alliance/oe-alliance-enigma2 | lib/python/Screens/VideoWizard.py | 7 | 6733 | from boxbranding import getBoxType, getMachineName
from Screens.Wizard import WizardSummary
from Screens.WizardLanguage import WizardLanguage
from Screens.Rc import Rc
from Components.AVSwitch import iAVSwitch
from Components.Pixmap import Pixmap
from Components.config import config, ConfigBoolean, configfile
from Tools.Directories import resolveFilename, SCOPE_SKIN, SCOPE_ACTIVE_SKIN
from Tools.HardwareInfo import HardwareInfo
config.misc.showtestcard = ConfigBoolean(default = False)
boxtype = getBoxType()
has_rca = False
has_dvi = False
if boxtype == 'gbquad' or boxtype == 'gbquadplus' or boxtype == 'et5x00' or boxtype == 'et6000' or boxtype == 'e3hd' or boxtype == 'odinm6' or getMachineName() == 'AX-Odin' or boxtype == 'ebox7358' or boxtype == 'eboxlumi' or boxtype == 'tmnano' or boxtype == 'ultra' or boxtype == "me" or boxtype == "minime" or boxtype == 'optimussos1' or boxtype == 'optimussos2' or boxtype == 'gb800seplus' or boxtype == 'gb800ueplus' or boxtype == 'ini-1000ru' or boxtype == 'ini-1000sv' or boxtype == 'ixussone' or boxtype == 'ixusszero' or boxtype == 'enfinity' or boxtype == 'force1':
has_rca = True
if boxtype == 'dm8000' or boxtype == 'dm800':
has_dvi = True
class VideoWizardSummary(WizardSummary):
def __init__(self, session, parent):
WizardSummary.__init__(self, session, parent)
def setLCDPicCallback(self):
self.parent.setLCDTextCallback(self.setText)
def setLCDPic(self, file):
self["pic"].instance.setPixmapFromFile(file)
class VideoWizard(WizardLanguage, Rc):
skin = """
<screen position="fill" title="Welcome..." flags="wfNoBorder" >
<panel name="WizardMarginsTemplate"/>
<panel name="WizardPictureLangTemplate"/>
<panel name="RemoteControlTemplate"/>
<panel position="left" size="10,*" />
<panel position="right" size="10,*" />
<panel position="fill">
<widget name="text" position="top" size="*,270" font="Regular;23" valign="center" />
<panel position="fill">
<panel position="left" size="150,*">
<widget name="portpic" position="top" zPosition="10" size="150,150" transparent="1" alphatest="on"/>
</panel>
<panel position="fill" layout="stack">
<widget source="list" render="Listbox" position="fill" scrollbarMode="showOnDemand" >
<convert type="StringList" />
</widget>
<!--<widget name="config" position="fill" zPosition="1" scrollbarMode="showOnDemand" />-->
</panel>
</panel>
</panel>
</screen>"""
def __init__(self, session):
# FIXME anyone knows how to use relative paths from the plugin's directory?
self.xmlfile = resolveFilename(SCOPE_SKIN, "videowizard.xml")
self.hw = iAVSwitch
WizardLanguage.__init__(self, session, showSteps = False, showStepSlider = False)
Rc.__init__(self)
self["wizard"] = Pixmap()
self["portpic"] = Pixmap()
self.port = None
self.mode = None
self.rate = None
def createSummary(self):
return VideoWizardSummary
def markDone(self):
self.hw.saveMode(self.port, self.mode, self.rate)
config.misc.videowizardenabled.value = 0
config.misc.videowizardenabled.save()
configfile.save()
def listInputChannels(self):
hw_type = HardwareInfo().get_device_name()
has_hdmi = HardwareInfo().has_hdmi()
list = []
for port in self.hw.getPortList():
if self.hw.isPortUsed(port):
descr = port
if descr == 'HDMI' and has_dvi:
descr = 'DVI'
elif descr == 'Scart' and has_rca:
descr = 'RCA'
if port != "DVI-PC":
list.append((descr,port))
list.sort(key = lambda x: x[0])
print "listInputChannels:", list
return list
def inputSelectionMade(self, index):
print "inputSelectionMade:", index
self.port = index
self.inputSelect(index)
def inputSelectionMoved(self):
hw_type = HardwareInfo().get_device_name()
has_hdmi = HardwareInfo().has_hdmi()
print "input selection moved:", self.selection
self.inputSelect(self.selection)
if self["portpic"].instance is not None:
picname = self.selection
if picname == 'HDMI' and has_dvi:
picname = "DVI"
elif picname == 'Scart' and has_rca:
picname = "RCA"
self["portpic"].instance.setPixmapFromFile(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/" + picname + ".png"))
def inputSelect(self, port):
print "inputSelect:", port
modeList = self.hw.getModeList(self.selection)
print "modeList:", modeList
self.port = port
if len(modeList) > 0:
ratesList = self.listRates(modeList[0][0])
self.hw.setMode(port = port, mode = modeList[0][0], rate = ratesList[0][0])
def listModes(self):
list = []
print "modes for port", self.port
for mode in self.hw.getModeList(self.port):
#if mode[0] != "PC":
list.append((mode[0], mode[0]))
print "modeslist:", list
return list
def modeSelectionMade(self, index):
print "modeSelectionMade:", index
self.mode = index
self.modeSelect(index)
def modeSelectionMoved(self):
print "mode selection moved:", self.selection
self.modeSelect(self.selection)
def modeSelect(self, mode):
ratesList = self.listRates(mode)
print "ratesList:", ratesList
if self.port == "HDMI" and mode in ("720p", "1080i", "1080p"):
self.rate = "multi"
self.hw.setMode(port = self.port, mode = mode, rate = "multi")
else:
self.hw.setMode(port = self.port, mode = mode, rate = ratesList[0][0])
def listRates(self, querymode = None):
if querymode is None:
querymode = self.mode
list = []
print "modes for port", self.port, "and mode", querymode
for mode in self.hw.getModeList(self.port):
print mode
if mode[0] == querymode:
for rate in mode[1]:
if self.port == "DVI-PC":
print "rate:", rate
if rate == "640x480":
list.insert(0, (rate, rate))
continue
list.append((rate, rate))
return list
def rateSelectionMade(self, index):
print "rateSelectionMade:", index
self.rate = index
self.rateSelect(index)
def rateSelectionMoved(self):
print "rate selection moved:", self.selection
self.rateSelect(self.selection)
def rateSelect(self, rate):
self.hw.setMode(port = self.port, mode = self.mode, rate = rate)
def showTestCard(self, selection = None):
if selection is None:
selection = self.selection
print "set config.misc.showtestcard to", {'yes': True, 'no': False}[selection]
if selection == "yes":
config.misc.showtestcard.value = True
else:
config.misc.showtestcard.value = False
def keyNumberGlobal(self, number):
if number in (1,2,3):
if number == 1:
self.hw.saveMode("HDMI", "720p", "multi")
elif number == 2:
self.hw.saveMode("HDMI", "1080i", "multi")
elif number == 3:
self.hw.saveMode("Scart", "Multi", "multi")
self.hw.setConfiguredMode()
self.close()
WizardLanguage.keyNumberGlobal(self, number)
| gpl-2.0 |
shifter/grr | lib/flows/general/transfer.py | 4 | 30109 | #!/usr/bin/env python
"""These flows are designed for high performance transfers."""
import hashlib
import time
import zlib
import logging
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib.aff4_objects import filestore
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import flows_pb2
class GetFileArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.GetFileArgs
class GetFile(flow.GRRFlow):
"""An efficient file transfer mechanism (deprecated, use MultiGetFile).
This flow is deprecated in favor of MultiGetFile, but kept for now for use by
MemoryCollector since the buffer hashing performed by MultiGetFile is
pointless for memory acquisition.
GetFile can also retrieve content from device files that report a size of 0 in
stat when read_length is specified.
Returns to parent flow:
An PathSpec.
"""
category = "/Filesystem/"
args_type = GetFileArgs
class SchemaCls(flow.GRRFlow.SchemaCls):
PROGRESS_GRAPH = aff4.Attribute(
"aff4:progress", rdf_flows.ProgressGraph,
"Show a button to generate a progress graph for this flow.",
default="")
# We have a maximum of this many chunk reads outstanding (about 10mb)
WINDOW_SIZE = 200
CHUNK_SIZE = 512 * 1024
@classmethod
def GetDefaultArgs(cls, token=None):
_ = token
result = cls.args_type()
result.pathspec.pathtype = "OS"
return result
@flow.StateHandler(next_state=["Stat"])
def Start(self):
"""Get information about the file from the client."""
self.state.Register("max_chunk_number",
max(2, self.args.read_length / self.CHUNK_SIZE))
self.state.Register("current_chunk_number", 0)
self.state.Register("file_size", 0)
self.state.Register("fd", None)
self.state.Register("stat", None)
self.CallClient("StatFile", rdf_client.ListDirRequest(
pathspec=self.args.pathspec), next_state="Stat")
@flow.StateHandler(next_state=["ReadBuffer", "CheckHashes"])
def Stat(self, responses):
"""Fix up the pathspec of the file."""
response = responses.First()
if responses.success and response:
self.state.stat = response
# TODO(user): This is a workaround for broken clients sending back
# empty pathspecs for pathtype MEMORY. Not needed for clients > 3.0.0.5.
if self.state.stat.pathspec.path:
self.args.pathspec = self.state.stat.pathspec
else:
if not self.args.ignore_stat_failure:
raise IOError("Error: %s" % responses.status)
# Just fill up a bogus stat entry.
self.state.stat = rdf_client.StatEntry(pathspec=self.args.pathspec)
# Adjust the size from st_size if read length is not specified.
if self.args.read_length == 0:
self.state.file_size = self.state.stat.st_size
else:
self.state.file_size = self.args.read_length
self.state.max_chunk_number = (self.state.file_size /
self.CHUNK_SIZE) + 1
self.CreateBlobImage()
self.FetchWindow(min(
self.WINDOW_SIZE,
self.state.max_chunk_number - self.state.current_chunk_number))
def FetchWindow(self, number_of_chunks_to_readahead):
"""Read ahead a number of buffers to fill the window."""
for _ in range(number_of_chunks_to_readahead):
# Do not read past the end of file
if self.state.current_chunk_number > self.state.max_chunk_number:
return
request = rdf_client.BufferReference(
pathspec=self.args.pathspec,
offset=self.state.current_chunk_number * self.CHUNK_SIZE,
length=self.CHUNK_SIZE)
self.CallClient("TransferBuffer", request, next_state="ReadBuffer")
self.state.current_chunk_number += 1
def CreateBlobImage(self):
"""Force creation of the new AFF4 object.
Note that this is pinned on the client id - i.e. the client can not change
aff4 objects outside its tree.
"""
urn = aff4.AFF4Object.VFSGRRClient.PathspecToURN(
self.args.pathspec, self.client_id)
self.state.stat.aff4path = urn
# Create a new BlobImage for the data. Note that this object is pickled
# with this flow between states.
self.state.fd = aff4.FACTORY.Create(urn, "VFSBlobImage", token=self.token)
# The chunksize must be set to be the same as the transfer chunk size.
self.state.fd.SetChunksize(self.CHUNK_SIZE)
self.state.fd.Set(self.state.fd.Schema.STAT(self.state.stat))
@flow.StateHandler(next_state=["ReadBuffer", "CheckHashes"])
def ReadBuffer(self, responses):
"""Read the buffer and write to the file."""
# Did it work?
if responses.success:
response = responses.First()
if not response:
raise IOError("Missing hash for offset %s missing" % response.offset)
if response.offset <= self.state.max_chunk_number * self.CHUNK_SIZE:
# Write the hash to the index. Note that response.data is the hash of
# the block (32 bytes) and response.length is the length of the block.
self.state.fd.AddBlob(response.data, response.length)
self.Log("Received blob hash %s", response.data.encode("hex"))
self.Status("Received %s bytes", self.state.fd.size)
# Add one more chunk to the window.
self.FetchWindow(1)
@flow.StateHandler()
def End(self):
"""Finalize reading the file."""
fd = self.state.fd
if fd is None:
self.Notify("ViewObject", self.client_id, "File failed to be transferred")
else:
self.Notify("ViewObject", fd.urn, "File transferred successfully")
self.Log("Finished reading %s", fd.urn)
self.Log("Flow Completed in %s seconds",
time.time() - self.state.context.create_time / 1e6)
stat_response = self.state.fd.Get(self.state.fd.Schema.STAT)
fd.size = min(fd.size, self.state.file_size)
fd.Set(fd.Schema.CONTENT_LAST, rdfvalue.RDFDatetime().Now())
fd.Close(sync=True)
# Notify any parent flows the file is ready to be used now.
self.SendReply(stat_response)
super(GetFile, self).End()
class HashTracker(object):
def __init__(self, hash_response, is_known=False):
self.hash_response = hash_response
self.is_known = is_known
self.blob_urn = rdfvalue.RDFURN("aff4:/blobs").Add(
hash_response.data.encode("hex"))
class FileTracker(object):
"""A Class to track a single file download."""
def __init__(self, stat_entry, client_id, request_data, index=None):
self.fd = None
self.stat_entry = stat_entry
self.hash_obj = None
self.hash_list = []
self.pathspec = stat_entry.pathspec
self.urn = aff4.AFF4Object.VFSGRRClient.PathspecToURN(
self.pathspec, client_id)
self.stat_entry.aff4path = self.urn
self.request_data = request_data
self.index = index
# The total number of bytes available in this file. This may be different
# from the size as reported by stat() for special files (e.g. proc files).
self.bytes_read = 0
def __str__(self):
sha256 = self.hash_obj and self.hash_obj.sha256
if sha256:
return "<Tracker: %s (sha256: %s)>" % (self.urn, sha256)
else:
return "<Tracker: %s >" % self.urn
def CreateVFSFile(self, filetype, token=None, chunksize=None):
"""Create a VFSFile with stat_entry metadata.
We don't do this in __init__ since we need to first need to determine the
appropriate filetype.
Args:
filetype: string filetype
token: ACL token
chunksize: BlobImage chunksize
Side-Effect:
sets self.fd
Returns:
filehandle open for write
"""
# We create the file in the client namespace and populate with metadata.
self.fd = aff4.FACTORY.Create(self.urn, filetype, mode="w",
token=token)
self.fd.SetChunksize(chunksize)
self.fd.Set(self.fd.Schema.STAT(self.stat_entry))
self.fd.Set(self.fd.Schema.PATHSPEC(self.pathspec))
self.fd.Set(self.fd.Schema.CONTENT_LAST(rdfvalue.RDFDatetime().Now()))
return self.fd
class MultiGetFileMixin(object):
"""A flow mixin to efficiently retrieve a number of files.
The class extending this can provide a self.state with the following
attributes:
- file_size: int. Maximum number of bytes to download.
- use_external_stores: boolean. If true, look in any defined external file
stores for files before downloading them, and offer any new files to
external stores. This should be true unless the external checks are
misbehaving.
"""
CHUNK_SIZE = 512 * 1024
# Batch calls to the filestore to at least to group this many items. This
# allows us to amortize file store round trips and increases throughput.
MIN_CALL_TO_FILE_STORE = 200
def Start(self):
"""Initialize our state."""
super(MultiGetFileMixin, self).Start()
self.state.Register("files_hashed", 0)
self.state.Register("use_external_stores", False)
self.state.Register("file_size", 0)
self.state.Register("files_to_fetch", 0)
self.state.Register("files_fetched", 0)
self.state.Register("files_skipped", 0)
# Counter to batch up hash checking in the filestore
self.state.Register("files_hashed_since_check", 0)
# A dict of file trackers which are waiting to be checked by the file
# store. Keys are vfs urns and values are FileTrack instances. Values are
# copied to pending_files for download if not present in FileStore.
self.state.Register("pending_hashes", {})
# A dict of file trackers currently being fetched. Keys are vfs urns and
# values are FileTracker instances.
self.state.Register("pending_files", {})
# A mapping of index values to the original pathspecs.
self.state.Register("indexed_pathspecs", {})
# Set of blobs we still need to fetch.
self.state.Register("blobs_we_need", set())
fd = aff4.FACTORY.Open(filestore.FileStore.PATH, "FileStore", mode="r",
token=self.token)
self.state.Register("filestore", fd)
def GenerateIndex(self, pathspec):
h = hashlib.sha256()
h.update(pathspec.SerializeToString())
return h.hexdigest()
def StartFileFetch(self, pathspec, request_data=None):
"""The entry point for this flow mixin - Schedules new file transfer."""
# Create an index so we can find this pathspec later.
index = self.GenerateIndex(pathspec)
self.state.indexed_pathspecs[index] = pathspec
request_data = request_data or {}
request_data["index"] = index
self.CallClient("StatFile", pathspec=pathspec,
next_state="StoreStat",
request_data=request_data)
request = rdf_client.FingerprintRequest(pathspec=pathspec,
max_filesize=self.state.file_size)
request.AddRequest(
fp_type=rdf_client.FingerprintTuple.Type.FPT_GENERIC,
hashers=[rdf_client.FingerprintTuple.HashType.MD5,
rdf_client.FingerprintTuple.HashType.SHA1,
rdf_client.FingerprintTuple.HashType.SHA256])
self.CallClient("HashFile", request, next_state="ReceiveFileHash",
request_data=request_data)
def ReceiveFetchedFile(self, stat_entry, file_hash, request_data=None):
"""This method will be called for each new file successfully fetched.
Args:
stat_entry: rdf_client.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
"""
@flow.StateHandler()
def StoreStat(self, responses):
if not responses.success:
self.Log("Failed to stat file: %s", responses.status)
return
stat_entry = responses.First()
index = responses.request_data["index"]
self.state.pending_hashes[index] = FileTracker(
stat_entry, self.client_id, responses.request_data, index)
@flow.StateHandler(next_state="CheckHash")
def ReceiveFileHash(self, responses):
"""Add hash digest to tracker and check with filestore."""
# Support old clients which may not have the new client action in place yet.
# TODO(user): Deprecate once all clients have the HashFile action.
if not responses.success and responses.request.request.name == "HashFile":
logging.debug(
"HashFile action not available, falling back to FingerprintFile.")
self.CallClient("FingerprintFile", responses.request.request.payload,
next_state="ReceiveFileHash",
request_data=responses.request_data)
return
index = responses.request_data["index"]
if not responses.success:
self.Log("Failed to hash file: %s", responses.status)
self.state.pending_hashes.pop(index, None)
return
self.state.files_hashed += 1
response = responses.First()
if response.HasField("hash"):
hash_obj = response.hash
else:
# Deprecate this method of returning hashes.
hash_obj = rdf_crypto.Hash()
if len(response.results) < 1 or response.results[0]["name"] != "generic":
self.Log("Failed to hash file: %s", self.state.indexed_pathspecs[index])
self.state.pending_hashes.pop(index, None)
return
result = response.results[0]
try:
for hash_type in ["md5", "sha1", "sha256"]:
value = result.GetItem(hash_type)
setattr(hash_obj, hash_type, value)
except AttributeError:
self.Log("Failed to hash file: %s", self.state.indexed_pathspecs[index])
self.state.pending_hashes.pop(index, None)
return
tracker = self.state.pending_hashes[index]
tracker.hash_obj = hash_obj
tracker.bytes_read = response.bytes_read
self.state.files_hashed_since_check += 1
if self.state.files_hashed_since_check >= self.MIN_CALL_TO_FILE_STORE:
self._CheckHashesWithFileStore()
def _CheckHashesWithFileStore(self):
"""Check all queued up hashes for existence in file store.
Hashes which do not exist in the file store will be downloaded. This
function flushes the entire queue (self.state.pending_hashes) in order to
minimize the round trips to the file store.
If a file was found in the file store it is copied from there into the
client's VFS namespace. Otherwise, we request the client to hash every block
in the file, and add it to the file tracking queue
(self.state.pending_files).
"""
if not self.state.pending_hashes:
return
# This map represents all the hashes in the pending urns.
file_hashes = {}
# Store urns by hash to allow us to remove duplicates.
# keys are hashdigest objects, values are arrays of tracker objects.
hash_to_urn = {}
for index, tracker in self.state.pending_hashes.iteritems():
# We might not have gotten this hash yet
if tracker.hash_obj is None:
continue
digest = tracker.hash_obj.sha256
file_hashes[index] = tracker.hash_obj
hash_to_urn.setdefault(digest, []).append(tracker)
# First we get all the files which are present in the file store.
files_in_filestore = set()
for file_store_urn, hash_obj in self.state.filestore.CheckHashes(
file_hashes.values(), external=self.state.use_external_stores):
self.HeartBeat()
# Since checkhashes only returns one digest per unique hash we need to
# find any other files pending download with the same hash.
for tracker in hash_to_urn[hash_obj.sha256]:
self.state.files_skipped += 1
file_hashes.pop(tracker.index)
files_in_filestore.add(file_store_urn)
# Remove this tracker from the pending_hashes store since we no longer
# need to process it.
self.state.pending_hashes.pop(tracker.index)
# Now that the check is done, reset our counter
self.state.files_hashed_since_check = 0
# Now copy all existing files to the client aff4 space.
for existing_blob in aff4.FACTORY.MultiOpen(files_in_filestore,
mode="rw", token=self.token):
hashset = existing_blob.Get(existing_blob.Schema.HASH)
if hashset is None:
self.Log("Filestore File %s has no hash.", existing_blob.urn)
continue
for file_tracker in hash_to_urn.get(hashset.sha256, []):
# Due to potential filestore corruption, the existing_blob files can
# have 0 size, make sure our size matches the actual size in that case.
if existing_blob.size == 0:
existing_blob.size = (file_tracker.bytes_read or
file_tracker.stat_entry.st_size)
# Create a file in the client name space with the same classtype and
# populate its attributes.
file_tracker.CreateVFSFile(existing_blob.__class__.__name__,
token=self.token,
chunksize=self.CHUNK_SIZE)
file_tracker.fd.FromBlobImage(existing_blob)
file_tracker.fd.Set(hashset)
# Add this file to the index at the canonical location
existing_blob.AddIndex(file_tracker.urn)
# It is not critical that this file be written immediately.
file_tracker.fd.Close(sync=False)
# Let the caller know we have this file already.
self.ReceiveFetchedFile(file_tracker.stat_entry, file_tracker.hash_obj,
request_data=file_tracker.request_data)
# Now we iterate over all the files which are not in the store and arrange
# for them to be copied.
for index in file_hashes:
# Move the tracker from the pending hashes store to the pending files
# store - it will now be downloaded.
file_tracker = self.state.pending_hashes.pop(index)
self.state.pending_files[index] = file_tracker
# Create the VFS file for this file tracker.
file_tracker.CreateVFSFile("VFSBlobImage", token=self.token,
chunksize=self.CHUNK_SIZE)
# If we already know how big the file is we use that, otherwise fall back
# to the size reported by stat.
if file_tracker.bytes_read > 0:
size_to_download = file_tracker.bytes_read
else:
size_to_download = file_tracker.stat_entry.st_size
# We do not have the file here yet - we need to retrieve it.
expected_number_of_hashes = size_to_download / self.CHUNK_SIZE + 1
# We just hash ALL the chunks in the file now. NOTE: This maximizes client
# VFS cache hit rate and is far more efficient than launching multiple
# GetFile flows.
self.state.files_to_fetch += 1
for i in range(expected_number_of_hashes):
self.CallClient("HashBuffer", pathspec=file_tracker.pathspec,
offset=i * self.CHUNK_SIZE,
length=self.CHUNK_SIZE, next_state="CheckHash",
request_data=dict(index=index))
if self.state.files_hashed % 100 == 0:
self.Log("Hashed %d files, skipped %s already stored.",
self.state.files_hashed, self.state.files_skipped)
@flow.StateHandler(next_state="WriteBuffer")
def CheckHash(self, responses):
"""Adds the block hash to the file tracker responsible for this vfs URN."""
index = responses.request_data["index"]
if index not in self.state.pending_files:
# This is a blobhash for a file we already failed to read and logged as
# below, check here to avoid logging dups.
return
file_tracker = self.state.pending_files[index]
hash_response = responses.First()
if not responses.success or not hash_response:
self.Log("Failed to read %s: %s" % (file_tracker.urn, responses.status))
del self.state.pending_files[index]
return
hash_tracker = HashTracker(hash_response)
file_tracker.hash_list.append(hash_tracker)
self.state.blobs_we_need.add(hash_tracker.blob_urn)
if len(self.state.blobs_we_need) > self.MIN_CALL_TO_FILE_STORE:
self.FetchFileContent()
def FetchFileContent(self):
"""Fetch as much as the file's content as possible.
This drains the pending_files store by checking which blobs we already have
in the store and issuing calls to the client to receive outstanding blobs.
"""
if not self.state.pending_files:
return
# Check if we have all the blobs in the blob AFF4 namespace..
stats = aff4.FACTORY.Stat(self.state.blobs_we_need, token=self.token)
blobs_we_have = set([x["urn"] for x in stats])
self.state.blobs_we_need = set()
# Now iterate over all the blobs and add them directly to the blob image.
for index, file_tracker in self.state.pending_files.iteritems():
for hash_tracker in file_tracker.hash_list:
# Make sure we read the correct pathspec on the client.
hash_tracker.hash_response.pathspec = file_tracker.pathspec
if hash_tracker.blob_urn in blobs_we_have:
# If we have the data we may call our state directly.
self.CallState([hash_tracker.hash_response],
next_state="WriteBuffer",
request_data=dict(index=index))
else:
# We dont have this blob - ask the client to transmit it.
self.CallClient("TransferBuffer", hash_tracker.hash_response,
next_state="WriteBuffer",
request_data=dict(index=index))
# Clear the file tracker's hash list.
file_tracker.hash_list = []
@flow.StateHandler(next_state="IterateFind")
def WriteBuffer(self, responses):
"""Write the hash received to the blob image."""
# Note that hashes must arrive at this state in the correct order since they
# are sent in the correct order (either via CallState or CallClient).
index = responses.request_data["index"]
if index not in self.state.pending_files:
return
# Failed to read the file - ignore it.
if not responses.success:
return self.RemoveInFlightFile(index)
response = responses.First()
file_tracker = self.state.pending_files.get(index)
if file_tracker:
file_tracker.fd.AddBlob(response.data, response.length)
if (response.length < file_tracker.fd.chunksize or
response.offset + response.length >= file_tracker.stat_entry.st_size):
# File done, remove from the store and close it.
self.RemoveInFlightFile(index)
# Close and write the file to the data store.
file_tracker.fd.Close(sync=True)
# Publish the new file event to cause the file to be added to the
# filestore. This is not time critical so do it when we have spare
# capacity.
self.Publish("FileStore.AddFileToStore", file_tracker.fd.urn,
priority=rdf_flows.GrrMessage.Priority.LOW_PRIORITY)
self.state.files_fetched += 1
if not self.state.files_fetched % 100:
self.Log("Fetched %d of %d files.", self.state.files_fetched,
self.state.files_to_fetch)
def RemoveInFlightFile(self, index):
file_tracker = self.state.pending_files.pop(index)
if file_tracker:
self.ReceiveFetchedFile(file_tracker.stat_entry, file_tracker.hash_obj,
request_data=file_tracker.request_data)
@flow.StateHandler(next_state=["CheckHash", "WriteBuffer"])
def End(self):
# There are some files still in flight.
if self.state.pending_hashes or self.state.pending_files:
self._CheckHashesWithFileStore()
self.FetchFileContent()
if not self.runner.OutstandingRequests():
super(MultiGetFileMixin, self).End()
class MultiGetFileArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.MultiGetFileArgs
class MultiGetFile(MultiGetFileMixin, flow.GRRFlow):
"""A flow to effectively retrieve a number of files."""
args_type = MultiGetFileArgs
@flow.StateHandler(next_state=["ReceiveFileHash", "StoreStat"])
def Start(self):
"""Start state of the flow."""
super(MultiGetFile, self).Start()
self.state.use_external_stores = self.args.use_external_stores
self.state.file_size = self.args.file_size
unique_paths = set()
for pathspec in self.args.pathspecs:
vfs_urn = aff4.AFF4Object.VFSGRRClient.PathspecToURN(
pathspec, self.client_id)
if vfs_urn not in unique_paths:
# Only Stat/Hash each path once, input pathspecs can have dups.
unique_paths.add(vfs_urn)
self.StartFileFetch(pathspec)
def ReceiveFetchedFile(self, stat_entry, unused_hash_obj,
request_data=None):
"""This method will be called for each new file successfully fetched."""
_ = request_data
self.SendReply(stat_entry)
class FileStoreCreateFile(flow.EventListener):
"""Receive an event about a new file and add it to the file store.
The file store is a central place where files are managed in the data
store. Files are deduplicated and stored centrally.
This event listener will be fired when a new file is downloaded through
e.g. the GetFile flow. We then recalculate the file's hashes and store it in
the data store under a canonical URN.
"""
EVENTS = ["FileStore.AddFileToStore"]
well_known_session_id = rdfvalue.SessionID(
flow_name="FileStoreCreateFile")
CHUNK_SIZE = 512 * 1024
def UpdateIndex(self, target_urn, src_urn):
"""Update the index from the source to the target."""
idx = aff4.FACTORY.Create(src_urn, "AFF4Index", mode="w", token=self.token)
idx.Add(target_urn, "", target_urn)
@flow.EventHandler()
def ProcessMessage(self, message=None, event=None):
"""Process the new file and add to the file store."""
_ = event
vfs_urn = message.payload
vfs_fd = aff4.FACTORY.Open(vfs_urn, mode="rw", token=self.token)
filestore_fd = aff4.FACTORY.Create(filestore.FileStore.PATH, "FileStore",
mode="w", token=self.token)
filestore_fd.AddFile(vfs_fd)
vfs_fd.Flush(sync=False)
class GetMBRArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.GetMBRArgs
class GetMBR(flow.GRRFlow):
"""A flow to retrieve the MBR.
Returns to parent flow:
The retrieved MBR.
"""
category = "/Filesystem/"
args_type = GetMBRArgs
behaviours = flow.GRRFlow.behaviours + "BASIC"
@flow.StateHandler(next_state=["StoreMBR"])
def Start(self):
"""Schedules the ReadBuffer client action."""
pathspec = rdf_paths.PathSpec(
path="\\\\.\\PhysicalDrive0\\",
pathtype=rdf_paths.PathSpec.PathType.OS,
path_options=rdf_paths.PathSpec.Options.CASE_LITERAL)
request = rdf_client.BufferReference(pathspec=pathspec, offset=0,
length=self.args.length)
self.CallClient("ReadBuffer", request, next_state="StoreMBR")
@flow.StateHandler()
def StoreMBR(self, responses):
"""This method stores the MBR."""
if not responses.success:
msg = "Could not retrieve MBR: %s" % responses.status
self.Log(msg)
raise flow.FlowError(msg)
response = responses.First()
mbr = aff4.FACTORY.Create(self.client_id.Add("mbr"), "VFSMemoryFile",
mode="rw", token=self.token)
mbr.write(response.data)
mbr.Close()
self.Log("Successfully stored the MBR (%d bytes)." % len(response.data))
self.SendReply(rdfvalue.RDFBytes(response.data))
class TransferStore(flow.WellKnownFlow):
"""Store a buffer into a determined location."""
well_known_session_id = rdfvalue.SessionID(flow_name="TransferStore")
def ProcessMessage(self, message):
"""Write the blob into the AFF4 blob storage area."""
# Check that the message is authenticated
if (message.auth_state !=
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):
logging.error("TransferStore request from %s is not authenticated.",
message.source)
return
read_buffer = rdf_protodict.DataBlob(message.payload)
# Only store non empty buffers
if read_buffer.data:
data = read_buffer.data
if (read_buffer.compression ==
rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION):
cdata = data
data = zlib.decompress(cdata)
elif (read_buffer.compression ==
rdf_protodict.DataBlob.CompressionType.UNCOMPRESSED):
cdata = zlib.compress(data)
else:
raise RuntimeError("Unsupported compression")
# The hash is done on the uncompressed data
digest = hashlib.sha256(data).digest()
urn = rdfvalue.RDFURN("aff4:/blobs").Add(digest.encode("hex"))
fd = aff4.FACTORY.Create(urn, "AFF4MemoryStream", mode="w",
token=self.token)
fd.OverwriteAndClose(cdata, len(data), sync=True)
logging.debug("Got blob %s (length %s)", digest.encode("hex"),
len(cdata))
class SendFile(flow.GRRFlow):
"""This flow sends a file to remote listener.
To use this flow, choose a key and an IV in hex format (if run from the GUI,
there will be a pregenerated pair key and iv for you to use) and run a
listener on the server you want to use like this:
nc -l <port> | openssl aes-128-cbc -d -K <key> -iv <iv> > <filename>
Returns to parent flow:
A rdf_client.StatEntry of the sent file.
"""
category = "/Filesystem/"
args_type = rdf_client.SendFileRequest
@flow.StateHandler(next_state="Done")
def Start(self):
"""This issues the sendfile request."""
self.CallClient("SendFile", self.args, next_state="Done")
@flow.StateHandler()
def Done(self, responses):
if not responses.success:
self.Log(responses.status.error_message)
raise flow.FlowError(responses.status.error_message)
| apache-2.0 |
kyleterry/gatherings | gatherings/conference/migrations/0004_auto__del_field_session_speaker.py | 1 | 6537 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Session.speaker'
db.delete_column('conference_session', 'speaker_id')
# Adding M2M table for field speakers on 'Session'
db.create_table('conference_session_speakers', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('session', models.ForeignKey(orm['conference.session'], null=False)),
('speaker', models.ForeignKey(orm['conference.speaker'], null=False))
))
db.create_unique('conference_session_speakers', ['session_id', 'speaker_id'])
def backwards(self, orm):
# Adding field 'Session.speaker'
db.add_column('conference_session', 'speaker',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['conference.Speaker']),
keep_default=False)
# Removing M2M table for field speakers on 'Session'
db.delete_table('conference_session_speakers')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'conference.event': {
'Meta': {'object_name': 'Event'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start': ('django.db.models.fields.DateTimeField', [], {})
},
'conference.room': {
'Meta': {'object_name': 'Room'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'conference.session': {
'Meta': {'object_name': 'Session'},
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Room']", 'null': 'True', 'blank': 'True'}),
'speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['conference.Speaker']", 'symmetrical': 'False'}),
'start': ('django.db.models.fields.DateTimeField', [], {})
},
'conference.speaker': {
'Meta': {'object_name': 'Speaker'},
'bio': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'conference.track': {
'Meta': {'object_name': 'Track'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['conference']
| mit |
friedrich420/N910G-AEL-Kernel-Lollipop-Sources | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
heeraj123/oh-mainline | vendor/packages/Django/tests/regressiontests/test_client_regress/urls.py | 85 | 2347 | from __future__ import absolute_import
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from . import views
urlpatterns = patterns('',
(r'^no_template_view/$', views.no_template_view),
(r'^staff_only/$', views.staff_only_view),
(r'^get_view/$', views.get_view),
(r'^request_data/$', views.request_data),
(r'^request_data_extended/$', views.request_data, {'template':'extended.html', 'data':'bacon'}),
url(r'^arg_view/(?P<name>.+)/$', views.view_with_argument, name='arg_view'),
(r'^login_protected_redirect_view/$', views.login_protected_redirect_view),
(r'^redirects/$', RedirectView.as_view(url='/test_client_regress/redirects/further/')),
(r'^redirects/further/$', RedirectView.as_view(url='/test_client_regress/redirects/further/more/')),
(r'^redirects/further/more/$', RedirectView.as_view(url='/test_client_regress/no_template_view/')),
(r'^redirect_to_non_existent_view/$', RedirectView.as_view(url='/test_client_regress/non_existent_view/')),
(r'^redirect_to_non_existent_view2/$', RedirectView.as_view(url='/test_client_regress/redirect_to_non_existent_view/')),
(r'^redirect_to_self/$', RedirectView.as_view(url='/test_client_regress/redirect_to_self/')),
(r'^circular_redirect_1/$', RedirectView.as_view(url='/test_client_regress/circular_redirect_2/')),
(r'^circular_redirect_2/$', RedirectView.as_view(url='/test_client_regress/circular_redirect_3/')),
(r'^circular_redirect_3/$', RedirectView.as_view(url='/test_client_regress/circular_redirect_1/')),
(r'^redirect_other_host/$', RedirectView.as_view(url='https://otherserver:8443/test_client_regress/no_template_view/')),
(r'^set_session/$', views.set_session_view),
(r'^check_session/$', views.check_session_view),
(r'^request_methods/$', views.request_methods_view),
(r'^check_unicode/$', views.return_unicode),
(r'^check_binary/$', views.return_undecodable_binary),
(r'^parse_unicode_json/$', views.return_json_file),
(r'^check_headers/$', views.check_headers),
(r'^check_headers_redirect/$', RedirectView.as_view(url='/test_client_regress/check_headers/')),
(r'^body/$', views.body),
(r'^read_all/$', views.read_all),
(r'^read_buffer/$', views.read_buffer),
(r'^request_context_view/$', views.request_context_view),
)
| agpl-3.0 |
bsipocz/scikit-image | doc/examples/plot_view_as_blocks.py | 17 | 2002 | """
============================
Block views on images/arrays
============================
This example illustrates the use of `view_as_blocks` from
`skimage.util.shape`. Block views can be incredibly useful when one
wants to perform local operations on non-overlapping image patches.
We use `astronaut` from `skimage.data` and virtually 'slice' it into square
blocks. Then, on each block, we either pool the mean, the max or the
median value of that block. The results are displayed altogether, along
with a spline interpolation of order 3 rescaling of the original `astronaut`
image.
"""
import numpy as np
from scipy import ndimage as ndi
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from skimage import data
from skimage import color
from skimage.util.shape import view_as_blocks
# -- get `astronaut` from skimage.data in grayscale
l = color.rgb2gray(data.astronaut())
# -- size of blocks
block_shape = (4, 4)
# -- see `astronaut` as a matrix of blocks (of shape
# `block_shape`)
view = view_as_blocks(l, block_shape)
# -- collapse the last two dimensions in one
flatten_view = view.reshape(view.shape[0], view.shape[1], -1)
# -- resampling `astronaut` by taking either the `mean`,
# the `max` or the `median` value of each blocks.
mean_view = np.mean(flatten_view, axis=2)
max_view = np.max(flatten_view, axis=2)
median_view = np.median(flatten_view, axis=2)
# -- display resampled images
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
ax0, ax1, ax2, ax3 = axes.ravel()
ax0.set_title("Original rescaled with\n spline interpolation (order=3)")
l_resized = ndi.zoom(l, 2, order=3)
ax0.imshow(l_resized, cmap=cm.Greys_r)
ax1.set_title("Block view with\n local mean pooling")
ax1.imshow(mean_view, cmap=cm.Greys_r)
ax2.set_title("Block view with\n local max pooling")
ax2.imshow(max_view, cmap=cm.Greys_r)
ax3.set_title("Block view with\n local median pooling")
ax3.imshow(median_view, cmap=cm.Greys_r)
fig.subplots_adjust(hspace=0.4, wspace=0.4)
plt.show()
| bsd-3-clause |
vrenaville/stock-logistics-workflow | stock_picking_reorder_lines/__openerp__.py | 5 | 1436 | # -*- coding: utf-8 -*-
#
#
# Author: Alexandre Fayolle
# Copyright 2013 Camptocamp SA
#
# Author: Damien Crier
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'Stock picking lines with sequence number',
'version': '0.1',
'category': 'Warehouse Management',
'summary': '''
Provide a new field on stock moves, allowing to manage the orders of moves
in a picking.
''',
'author': "Camptocamp,Odoo Community Association (OCA)",
'website': 'http://www.camptocamp.com',
'depends': ['stock', 'sale', 'sale_stock'],
'data': ['stock_view.xml'],
'demo': [],
'test': ['test/invoice_from_picking.yml'],
'installable': True,
'auto_install': False,
'application': False,
'license': "AGPL-3",
}
| agpl-3.0 |
sahmed95/sympy | examples/galgebra/manifold_check.py | 66 | 3450 | #!/usr/bin/env python
from __future__ import print_function
from sympy import symbols, log, simplify, diff, cos, sin
from sympy.galgebra.ga import MV, ReciprocalFrame
from sympy.galgebra.debug import oprint
from sympy.galgebra.printing import GA_Printer, enhance_print, Get_Program, Print_Function
from sympy.galgebra.manifold import Manifold
def Test_Reciprocal_Frame():
Print_Function()
coords = symbols('x y z')
(ex, ey, ez, grad) = MV.setup('ex ey ez', metric='[1,1,1]', coords=coords)
mfvar = (u, v) = symbols('u v')
eu = ex + ey
ev = ex - ey
(eu_r, ev_r) = ReciprocalFrame([eu, ev])
oprint('Frame', (eu, ev), 'Reciprocal Frame', (eu_r, ev_r))
print('eu.eu_r =', eu | eu_r)
print('eu.ev_r =', eu | ev_r)
print('ev.eu_r =', ev | eu_r)
print('ev.ev_r =', ev | ev_r)
eu = ex + ey + ez
ev = ex - ey
(eu_r, ev_r) = ReciprocalFrame([eu, ev])
oprint('Frame', (eu, ev), 'Reciprocal Frame', (eu_r, ev_r))
print('eu.eu_r =', eu | eu_r)
print('eu.ev_r =', eu | ev_r)
print('ev.eu_r =', ev | eu_r)
print('ev.ev_r =', ev | ev_r)
return
def Plot_Mobius_Strip_Manifold():
Print_Function()
coords = symbols('x y z')
(ex, ey, ez, grad) = MV.setup('ex ey ez', metric='[1,1,1]', coords=coords)
mfvar = (u, v) = symbols('u v')
X = (cos(u) + v*cos(u/2)*cos(u))*ex + (sin(u) + v*cos(u/2)*sin(u))*ey + v*sin(u/2)*ez
MF = Manifold(X, mfvar, True, I=MV.I)
MF.Plot2DSurface([0.0, 6.28, 48], [-0.3, 0.3, 12], surf=False, skip=[4, 4], tan=0.15)
return
def Distorted_manifold_with_scalar_function():
Print_Function()
coords = symbols('x y z')
(ex, ey, ez, grad) = MV.setup('ex ey ez', metric='[1,1,1]', coords=coords)
mfvar = (u, v) = symbols('u v')
X = 2*u*ex + 2*v*ey + (u**3 + v**3/2)*ez
MF = Manifold(X, mfvar, I=MV.I)
(eu, ev) = MF.Basis()
g = (v + 1)*log(u)
dg = MF.Grad(g)
print('g =', g)
print('dg =', dg)
print('dg(1,0) =', dg.subs({u: 1, v: 0}))
G = u*eu + v*ev
dG = MF.Grad(G)
print('G =', G)
print('P(G) =', MF.Proj(G))
print('zcoef =', simplify(2*(u**2 + v**2)*(-4*u**2 - 4*v**2 - 1)))
print('dG =', dG)
print('P(dG) =', MF.Proj(dG))
PS = u*v*eu ^ ev
print('PS =', PS)
print('dPS =', MF.Grad(PS))
print('P(dPS) =', MF.Proj(MF.Grad(PS)))
return
def Simple_manifold_with_scalar_function_derivative():
Print_Function()
coords = (x, y, z) = symbols('x y z')
basis = (e1, e2, e3, grad) = MV.setup('e_1 e_2 e_3', metric='[1,1,1]', coords=coords)
# Define surface
mfvar = (u, v) = symbols('u v')
X = u*e1 + v*e2 + (u**2 + v**2)*e3
print(X)
MF = Manifold(X, mfvar)
# Define field on the surface.
g = (v + 1)*log(u)
# Method 1: Using old Manifold routines.
VectorDerivative = (MF.rbasis[0]/MF.E_sq)*diff(g, u) + (MF.rbasis[1]/MF.E_sq)*diff(g, v)
print('Vector derivative =', VectorDerivative.subs({u: 1, v: 0}))
# Method 2: Using new Manifold routines.
dg = MF.Grad(g)
print('Vector derivative =', dg.subs({u: 1, v: 0}))
return
def dummy():
return
def main():
Get_Program(True)
with GA_Printer():
enhance_print()
Test_Reciprocal_Frame()
Distorted_manifold_with_scalar_function()
Simple_manifold_with_scalar_function_derivative()
# Plot_Mobius_Strip_Manifold()
return
if __name__ == "__main__":
main()
| bsd-3-clause |
das-labor/radare2 | libr/bin/format/xnu/scripts/machtraps.py | 16 | 2515 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Example usage to regenerate traps.json:
- open the dyld cache in r2 like this:
R_DYLDCACHE_FILTER=libsystem_kernel r2 -e bin.usextr=false ~/Library/Developer/Xcode/iOS\ DeviceSupport/12.1.2\ \(16C101\)\ arm64e/Symbols/System/Library/Caches/com.apple.dyld/dyld_shared_cache_arm64e
- run the script with this command:
#!pipe python2 /path/to/this/script.py > traps.json
"""
import r2pipe, json, re
r = r2pipe.open('#!pipe')
def walk_back_until (addr, pattern, min_addr):
cursor = addr
while cursor >= min_addr:
op = r.cmdj('aoj@' + str(cursor))[0]['opcode']
if re.search(pattern, op) != None:
return cursor + 4
if re.search(r'^ret', op) != None:
return cursor + 4
if re.search(r'^b ', op) != None:
return cursor + 4
cursor -= 4
return min_addr
def carve_trap_num (addr, flag):
saved_seek = r.cmd('?v $$')
r.cmd('e io.cache=true')
r.cmd('e emu.write=true')
r.cmd('aei')
r.cmd('aeim')
min_addr = int(r.cmd('?v ' + flag), 0)
emu_start = walk_back_until(addr - 4, r'^b|^ret|^invalid', min_addr)
r.cmd('s ' + str(emu_start))
obj = r.cmd('aefa 0x%08x~[0]:0' % addr)
r.cmd('s ' + saved_seek)
val = r.cmdj('pv4j@%s+0x14' % obj)['value']
if val == 0:
val = r.cmdj('pv4j@%s+0x18' % obj)['value']
return val
def beautify_name (name):
return re.sub(r'^_', '', name)
def carve_traps ():
msgs = r.cmdj('axtj sym._mach_msg')
if len(msgs) == 0:
r.cmd('s sym._mach_msg')
r.cmd('aae $SS @ $S')
r.cmd('s-')
msgs = r.cmdj('axtj sym._mach_msg')
if len(msgs) == 0:
print 'Cannot find refs to mach_msg!'
return
traps = {}
for ref in msgs:
if ref['type'] != 'CALL' or 'realname' not in ref:
continue
name = ref['realname']
if re.search(r'^_mach_msg', name) != None:
continue
addr = ref['from']
traps[addr] = {
'name': name
}
result = []
for addr in traps:
trap = traps[addr]
flag = 'sym.%s' % trap['name']
trap['name'] = beautify_name(trap['name'])
trap['num'] = carve_trap_num(addr, flag)
if trap['num'] != None:
result.append(trap)
result.sort(key=lambda x: x['num'])
return result
if __name__ == '__main__':
traps = carve_traps()
print json.dumps(traps, indent=4)
| lgpl-3.0 |
connectIOT/MachineHypermediaToolkit | MachineHypermediaToolkit/server/http/HypermediaHttpServer.py | 3 | 9003 | """
HypermediaHttpServer extends the BaseHttpRequestHandler class in BaseHttpServer to create a dictionary driven interface
to http requesters and response handlers that allows the processing of requests in the order of path, content format, and method.
Requests and their associated responses are exposed in a dictionary interface for processing by proxies and resources
in a linked structure by passing references to the request and the associated response to resources at selected link
targets.
The server invokes a request handler callback when requests are received, and passes a reference to a structure
containing request and response elements used in processing the hypermedia.
REQUEST
URI
content-format
method
payload
RESPONSE
status (code and reason)
content-format
payload
Hypermedia handlers will process the URI and query parameters first in order to select a set of resources for processing,
then apply the content format and method along with any supplementary options to the resources.
The terms used in the request and response elements are semantically aligned with the vocabularies used in links and
forms to describe hypermedia state exchanges, and are abstracted across different protocols like http and CoAP.
Common abstractions are needed to support multiple protocols. Initially there are HTTP and CoAP bindings to a
common set of terms.
URI and Query parameters are the same in http and CoAP
methods use the following mappings
GET
PUT
POST
PATCH
DELETE
Mappings to CoAP content-format identifiers:
22001 application/collection+senml+json
22002 application/senml+json
22003 application/link-format+json
22004 application/forms+link-format+json
responseTypes are a common subset of http and CoAP response codes
Success 200, 202, 204 2.02, 2.03, 2.04, 2.05
Created 201 2.01
BadRequest 400 4.00, 4.02
Unauthorized 401 4.01
Forbidden 403 4.03
NotFound 404 4.04
MethodNotAllowed 405 4.05
NotAcceptable 406 4.06
Conflict 409 4.09
PrecondFailed 412 4.12
UnsupportedType 415 4.15
ServerError 500 5.00
JSON keys to the request-response interface are described in the example below.
This is the common CRUD interface between HTTP and CoAP, and can be used as a
generic REST proxy.
{
"uriPath": ["/","a", "b"],
"uriQuery": {"rt": "test", "obs": "", "if": "core.b"}
"contentFormat": "application/link-format+json",
"options": {}
"method": "GET",
"payload": null,
"response": {
"status": "Success",
"code": "204",
"reason": "No Content",
"contentFormat": "application/link-format+json",
"payload": "[{"href":"","rel":"self","rt":"index"}]"
}
}
Client fills request form and sends to server using selected protocol
Server processes request and fills in response and transmits back to client
Client processes the response and updates application state
"""
__version__ = "0.1"
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import socket, sys
import MachineHypermediaToolkit.terms as v
class HypermediaHTTPServer(HTTPServer):
pass
class HypermediaHTTPRequestHandler(BaseHTTPRequestHandler):
server_version = "HttpHyperServer/" + __version__
def __init__(self, appRequestHandler, *args, **kwargs):
self.handleRequest = appRequestHandler
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def handle_one_request(self):
"""
Handle a single HTTP request.
Invokes self.handleRequest with the currentRequest object.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
self.mapRequest() #map and call handler
self.wfile.flush() #actually send the response if not already done.
except socket.timeout, e:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
def mapRequest(self):
"""fill out currentRequest map and call handleRequest()"""
self.currentRequest = {}
self.currentRequest[v.uriPath] = ["/"]
for self.pathElement in self.path.split("?")[0].split("/"):
if len(self.pathElement) >0:
self.currentRequest[v.uriPath].append(self.pathElement)
self.currentRequest[v.uriQuery] = {}
if self.path.find("?") >0:
for self.queryElement in self.path.split("?")[1].split("&"):
if self.queryElement.find("=") >0:
(self.k, self.v) = self.queryElement.split("=")
self.currentRequest[v.uriQuery][self.k] = self.v
else:
self.currentRequest[v.uriQuery][self.queryElement] = True
"""self.currentRequest[v.options] = {}
for option in self.headers :
self.currentRequest[v.options][option] = self.headers[option]
"""
if self.command == v.get and ( "Accept" in self.headers) :
self.currentRequest[v.contentFormat] = self.headers['Accept']
elif "Content-Type" in self.headers:
self.currentRequest[v.contentFormat] = self.headers['Content-Type']
self.currentRequest[v.method] = self.command
"""check payload length and copy if there is a nonzero payload"""
self.contentLength = 0
if "Content-Length" in self.headers:
self.contentLength = int(self.headers['Content-Length'])
self.currentRequest[v.contentLength] = self.contentLength
if (self.contentLength > 0):
self.payload = self.rfile.read(self.contentLength)
self.currentRequest[v.payload] = self.payload
"""set up response map"""
self.currentRequest[v.response] = {v.status:v.ServerError}
"""call hypermedia application handler"""
self.handleRequest(self.currentRequest)
"""process response and headers"""
self.send_response(v.toCode[self.currentRequest[v.response][v.status]])
self.contentLength = 0
if v.payload in self.currentRequest[v.response]:
self.contentLength = len(self.currentRequest[v.response][v.payload])
self.payload = self.currentRequest[v.response][v.payload]
if v.contentFormat not in self.currentRequest[v.response]:
self.currentRequest[v.response][v.contentFormat] = self.currentRequest[v.contentFormat]
self.send_header("Content-Length", str(self.contentLength))
self.send_header("Content-Type", \
self.currentRequest[v.response][v.contentFormat])
if v.location in self.currentRequest[v.response]:
self.send_header("Location", \
self.currentRequest[v.response][v.location])
self.end_headers()
"""if there is a payload, send it"""
if self.contentLength > 0:
self.wfile.write(self.payload)
return
class TestAppHandler :
def processRequest(self, currentRequest):
self.currentRequest = currentRequest
self.currentRequest[v.response][v.status] = v.Success
self.currentRequest[v.response][v.payload] = "1234"
self.currentRequest[v.response][v.contentFormat] = "application/json"
self.currentRequest[v.response][v.location] = "/test"
print "\r\nRequest:\r\n"
print self.currentRequest
print "\r\n"
return
def test(HandlerClass = HypermediaHTTPRequestHandler,
ServerClass = HypermediaHTTPServer, protocol="HTTP/1.0"):
"""Test the HypermediaHTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
from functools import partial
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, \
partial(HandlerClass, TestAppHandler().processRequest))
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
test()
| apache-2.0 |
aarticianpc/greenpointtrees | src/oscar/apps/checkout/app.py | 2 | 2717 | from django.conf import settings
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from oscar.core.application import Application
from oscar.core.loading import get_class
class CheckoutApplication(Application):
name = 'checkout'
index_view = get_class('checkout.views', 'IndexView')
shipping_address_view = get_class('checkout.views', 'ShippingAddressView')
user_address_update_view = get_class('checkout.views',
'UserAddressUpdateView')
user_address_delete_view = get_class('checkout.views',
'UserAddressDeleteView')
shipping_method_view = get_class('checkout.views', 'ShippingMethodView')
shipping_date_view = get_class('checkout.views', 'ShippingDateView')
payment_method_view = get_class('checkout.views', 'PaymentMethodView')
payment_details_view = get_class('checkout.views', 'PaymentDetailsView')
thankyou_view = get_class('checkout.views', 'ThankYouView')
def get_urls(self):
urls = [
url(r'^$', self.index_view.as_view(), name='index'),
url(r'shipping-date/$',
self.shipping_date_view.as_view(), name='shipping-date'),
# Shipping/user address views
url(r'shipping-address/$',
self.shipping_address_view.as_view(), name='shipping-address'),
url(r'user-address/edit/(?P<pk>\d+)/$',
self.user_address_update_view.as_view(),
name='user-address-update'),
url(r'user-address/delete/(?P<pk>\d+)/$',
self.user_address_delete_view.as_view(),
name='user-address-delete'),
# Shipping method views
url(r'shipping-method/$',
self.shipping_method_view.as_view(), name='shipping-method'),
# Payment views
url(r'payment-method/$',
self.payment_method_view.as_view(), name='payment-method'),
url(r'payment-details/$',
self.payment_details_view.as_view(), name='payment-details'),
# Preview and thankyou
url(r'preview/$',
self.payment_details_view.as_view(preview=True),
name='preview'),
url(r'thank-you/$', self.thankyou_view.as_view(),
name='thank-you'),
]
return self.post_process_urls(urls)
def get_url_decorator(self, pattern):
if not settings.OSCAR_ALLOW_ANON_CHECKOUT:
return login_required
if pattern.name.startswith('user-address'):
return login_required
return None
application = CheckoutApplication()
| mit |
damiencalloway/djtut | mysite/env/lib/python2.7/site-packages/django/contrib/localflavor/za/forms.py | 109 | 1977 | """
South Africa-specific Form helpers
"""
from __future__ import unicode_literals
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField
from django.utils.checksums import luhn
from django.utils.translation import gettext as _
import re
from datetime import date
id_re = re.compile(r'^(?P<yy>\d\d)(?P<mm>\d\d)(?P<dd>\d\d)(?P<mid>\d{4})(?P<end>\d{3})')
class ZAIDField(CharField):
"""A form field for South African ID numbers -- the checksum is validated
using the Luhn checksum, and uses a simlistic (read: not entirely accurate)
check for the birthdate
"""
default_error_messages = {
'invalid': _('Enter a valid South African ID number'),
}
def clean(self, value):
super(ZAIDField, self).clean(value)
if value in EMPTY_VALUES:
return ''
# strip spaces and dashes
value = value.strip().replace(' ', '').replace('-', '')
match = re.match(id_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
g = match.groupdict()
try:
# The year 2000 is conveniently a leapyear.
# This algorithm will break in xx00 years which aren't leap years
# There is no way to guess the century of a ZA ID number
d = date(int(g['yy']) + 2000, int(g['mm']), int(g['dd']))
except ValueError:
raise ValidationError(self.error_messages['invalid'])
if not luhn(value):
raise ValidationError(self.error_messages['invalid'])
return value
class ZAPostCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a valid South African postal code'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ZAPostCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
| mit |
OpenPymeMx/OCB | addons/project_issue_sheet/project_issue_sheet.py | 52 | 2935 | #-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv,orm
from openerp.tools.translate import _
class project_issue(osv.osv):
_inherit = 'project.issue'
_description = 'project issue'
_columns = {
'timesheet_ids': fields.one2many('hr.analytic.timesheet', 'issue_id', 'Timesheets'),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
}
def on_change_project(self, cr, uid, ids, project_id, context=None):
if not project_id:
return {}
result = super(project_issue, self).on_change_project(cr, uid, ids, project_id, context=context)
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if 'value' not in result:
result['value'] = {}
account = project.analytic_account_id
if account:
result['value']['analytic_account_id'] = account.id
return result
def on_change_account_id(self, cr, uid, ids, account_id, context=None):
if not account_id:
return {}
account = self.pool.get('account.analytic.account').browse(cr, uid, account_id, context=context)
result = {}
if account and account.state == 'pending':
result = {'warning' : {'title' : _('Analytic Account'), 'message' : _('The Analytic Account is pending !')}}
return result
project_issue()
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'account analytic line'
_columns = {
'create_date' : fields.datetime('Create Date', readonly=True),
}
account_analytic_line()
class hr_analytic_issue(osv.osv):
_inherit = 'hr.analytic.timesheet'
_description = 'hr analytic timesheet'
_columns = {
'issue_id' : fields.many2one('project.issue', 'Issue'),
}
hr_analytic_issue()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
instrat-nigeria/django-instrat-oppia | oppia/templatetags/display_functions.py | 3 | 1440 | # oppia/templatetags/display_functions.py
import hashlib
import json
import math
import urllib
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(name='get_index')
def get_index(start,index):
return start+index
@register.filter(name='secs_to_duration')
def secs_to_duration(secs):
if secs == 0:
return "-"
if secs < 60:
return "< 1 min"
if secs < 120:
return "1 min"
return str(int(math.floor(secs/60))) + " mins"
#minutes = int(math.floor(secs/60))
#seconds = int(secs - (minutes*60))
#return str(minutes)+'\''+str(seconds)+'"'
@register.filter(name='title_lang')
@stringfilter
def title_lang(title,lang):
try:
titles = json.loads(title)
if lang in titles:
return titles[lang]
else:
for l in titles:
return titles[l]
except:
pass
return title
@register.filter(name='gravatar')
def gravatar(user, size):
gravatar_url = "https://www.gravatar.com/avatar.php?"
gravatar_url += urllib.urlencode({
'gravatar_id':hashlib.md5(user.email).hexdigest(),
'size':str(size)
})
return mark_safe(
'<img src="{0}" alt="gravatar for {1}" class="gravatar" width="{2}" height="{2}"/>'.format(gravatar_url, user, size)
) | gpl-3.0 |
chokribr/PIST | modules/bibformat/lib/elements/bfe_authority_subject.py | 18 | 2733 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints subject data from an Authority Record.
"""
import re
__revision__ = "$Id$"
def format_element(bfo, detail='no'):
""" Prints the data of a subject authority record in HTML. By default prints
brief version.
@param detail: whether the 'detailed' rather than the 'brief' format
@type detail: 'yes' or 'no'
"""
from invenio.messages import gettext_set_language
_ = gettext_set_language(bfo.lang) # load the right message language
# return value
out = ""
# local function
def stringify_dict(d):
""" return string composed values in d """
_str = ""
if 'a' in d:
_str += d['a']
return _str or ''
# brief
main_dicts = bfo.fields('150%%')
if len(main_dicts):
main_dict = main_dicts[0]
main = stringify_dict(main_dict)
out += "<p>" + "<strong>" + _("Main %s name") % _("subject") + "</strong>" + ": " + main + "</p>"
# detail
if detail.lower() == "yes":
sees = [stringify_dict(see_dict) for see_dict in bfo.fields('450%%')]
sees = filter(None, sees) # fastest way to remove empty ""s
sees = [re.sub(",{2,}",",", x) for x in sees] # prevent ",,"
if len(sees):
out += "<p>" + "<strong>" + _("Variant(s)") + "</strong>" + ": " + ", ".join(sees) + "</p>"
see_alsos = [stringify_dict(see_also_dict) for see_also_dict in bfo.fields('550%%')]
see_alsos = filter(None, see_alsos) # fastest way to remove empty ""s
see_alsos = [re.sub(",{2,}",",", x) for x in see_alsos] # prevent ",,"
if len(see_alsos):
out += "<p>" + "<strong>" + _("See also") + "</strong>" + ": " + ", ".join(see_alsos) + "</p>"
# return
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
sarthakmeh03/django | tests/lookup/tests.py | 12 | 37727 | from __future__ import unicode_literals
import collections
from datetime import datetime
from operator import attrgetter
from unittest import skipUnless
from django.core.exceptions import FieldError
from django.db import connection
from django.test import (
TestCase, TransactionTestCase, ignore_warnings, skipUnlessDBFeature,
)
from django.utils.deprecation import RemovedInDjango20Warning
from .models import Article, Author, Game, MyISAMArticle, Player, Season, Tag
class LookupTests(TestCase):
def setUp(self):
# Create a few Authors.
self.au1 = Author.objects.create(name='Author 1')
self.au2 = Author.objects.create(name='Author 2')
# Create a couple of Articles.
self.a1 = Article.objects.create(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1)
self.a2 = Article.objects.create(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a3 = Article.objects.create(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a4 = Article.objects.create(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1)
self.a5 = Article.objects.create(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2)
self.a6 = Article.objects.create(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2)
self.a7 = Article.objects.create(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2)
# Create a few Tags.
self.t1 = Tag.objects.create(name='Tag 1')
self.t1.articles.add(self.a1, self.a2, self.a3)
self.t2 = Tag.objects.create(name='Tag 2')
self.t2.articles.add(self.a3, self.a4, self.a5)
self.t3 = Tag.objects.create(name='Tag 3')
self.t3.articles.add(self.a5, self.a6, self.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(
Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertIsInstance(Article.objects.iterator(), collections.Iterator)
self.assertQuerysetEqual(
Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline')
)
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(
Article.objects.in_bulk(),
{
self.a1.id: self.a1,
self.a2.id: self.a2,
self.a3.id: self.a3,
self.a4.id: self.a4,
self.a5.id: self.a5,
self.a6.id: self.a6,
self.a7.id: self.a7,
}
)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
with self.assertRaises(TypeError):
Article.objects.in_bulk(headline__startswith='Blah')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
self.assertSequenceEqual(
Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
)
self.assertSequenceEqual(
Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertSequenceEqual(
list(Article.objects.values('id', 'headline').iterator()),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
)
# The values() method works with "extra" fields specified in extra(select).
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertSequenceEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}],
)
# You can specify fields from forward and reverse relations, just like filter().
self.assertSequenceEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
],
)
self.assertSequenceEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
],
)
self.assertSequenceEqual(
(
Author.objects
.values('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
],
)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a non-existent field name in values() (a field that is neither in the
# model nor in extra(select)).
with self.assertRaises(FieldError):
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertSequenceEqual(
Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0)
}],
)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
self.assertSequenceEqual(
Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
],
)
self.assertSequenceEqual(
Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
)
self.assertSequenceEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id)
],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1)
],
)
args = ('name', 'article__headline', 'article__tag__name')
self.assertSequenceEqual(
Author.objects.values_list(*args).order_by(*args),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
],
)
with self.assertRaises(TypeError):
Article.objects.values_list('id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')), '<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()), '<Article: Article 6>')
with self.assertRaises(Article.DoesNotExist):
self.a5.get_next_by_pub_date()
self.assertEqual(repr(self.a6.get_next_by_pub_date()), '<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()), '<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()), '<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>']
)
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>']
)
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='\\'),
[r'<Article: Article with \ backslash>']
)
def test_exclude(self):
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
]
)
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual([article for article in Article.objects.none().iterator()], [])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(
Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
def test_in_different_database(self):
with self.assertRaisesMessage(
ValueError,
"Subqueries aren't allowed across different databases. Force the "
"inner query to be evaluated using `list(inner_query)`."
):
list(Article.objects.filter(id__in=Article.objects.using('other').all()))
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
with self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'pub_date_year' into field. Choices are: "
"author, author_id, headline, id, pub_date, tag"
):
Article.objects.filter(pub_date_year='2005').count()
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'starts' for CharField or join on the field "
"not permitted."
):
Article.objects.filter(headline__starts='Article')
def test_relation_nested_lookup_error(self):
# An invalid nested lookup on a related field raises a useful error.
msg = 'Related Field got invalid lookup: editor'
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(author__editor__name='James')
msg = 'Related Field got invalid lookup: foo'
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(articles__foo='bar')
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
Article.objects.create(pub_date=now, headline='f')
Article.objects.create(pub_date=now, headline='fo')
Article.objects.create(pub_date=now, headline='foo')
Article.objects.create(pub_date=now, headline='fooo')
Article.objects.create(pub_date=now, headline='hey-Foo')
Article.objects.create(pub_date=now, headline='bar')
Article.objects.create(pub_date=now, headline='AbBa')
Article.objects.create(pub_date=now, headline='baz')
Article.objects.create(pub_date=now, headline='baxZ')
# zero-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>']
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
]
)
# one-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>']
)
# wildcard
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>']
)
# leading anchor
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>']
)
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'), ['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'), ['<Article: baz>'])
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>']
)
# character sets
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>']
)
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'), ['<Article: baxZ>'])
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>']
)
# and more articles:
Article.objects.create(pub_date=now, headline='foobar')
Article.objects.create(pub_date=now, headline='foobaz')
Article.objects.create(pub_date=now, headline='ooF')
Article.objects.create(pub_date=now, headline='foobarbaz')
Article.objects.create(pub_date=now, headline='zoocarfaz')
Article.objects.create(pub_date=now, headline='barfoobaz')
Article.objects.create(pub_date=now, headline='bazbaRFOO')
# alternation
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>']
)
# greedy matching
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
]
)
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
Article.objects.create(pub_date=now, headline='foobar')
Article.objects.create(pub_date=now, headline='foobaz')
Article.objects.create(pub_date=now, headline='ooF')
Article.objects.create(pub_date=now, headline='foobarbaz')
Article.objects.create(pub_date=now, headline='zoocarfaz')
Article.objects.create(pub_date=now, headline='barfoobaz')
Article.objects.create(pub_date=now, headline='bazbaRFOO')
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>']
)
def test_regex_null(self):
"""
A regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), [])
def test_regex_non_string(self):
"""
A regex lookup does not fail on non-string fields
"""
Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'), ['<Season: 2013>'])
def test_regex_non_ascii(self):
"""
A regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name='\u2660')
Player.objects.get(name__regex='\u2660')
def test_nonfield_lookups(self):
"""
A lookup query containing non-fields raises the proper exception.
"""
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah=99)
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah__exact=99)
with self.assertRaises(FieldError):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Genuine field names don't collide with built-in lookup types
('year', 'gt', 'range', 'in' etc.) (#11670).
"""
# 'gt' is used as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games.set(Game.objects.filter(season__year__in=[2009, 2010]))
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games.set(Game.objects.filter(season__year=2009))
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games.set(Game.objects.filter(season__year__in=[2011]))
johnson = Player.objects.create(name="Johnson")
johnson.games.set(Game.objects.filter(season__year__in=[2011]))
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
def test_chain_date_time_lookups(self):
self.assertQuerysetEqual(
Article.objects.filter(pub_date__month__gt=7),
['<Article: Article 5>', '<Article: Article 6>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__day__gte=27),
['<Article: Article 2>', '<Article: Article 3>',
'<Article: Article 4>', '<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__hour__lt=8),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__minute__lte=0),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 5>', '<Article: Article 6>',
'<Article: Article 7>'],
ordered=False
)
class LookupTransactionTests(TransactionTestCase):
available_apps = ['lookup']
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnless(connection.vendor == 'mysql', 'requires MySQL')
def test_mysql_lookup_search(self):
# To use fulltext indexes on MySQL either version 5.6 is needed, or one must use
# MyISAM tables. Neither of these combinations is currently available on CI, so
# lets manually create a MyISAM table for Article model.
with connection.cursor() as cursor:
cursor.execute(
"CREATE TEMPORARY TABLE myisam_article ("
" id INTEGER PRIMARY KEY AUTO_INCREMENT, "
" headline VARCHAR(100) NOT NULL "
") ENGINE MYISAM")
dr = MyISAMArticle.objects.create(headline='Django Reinhardt')
MyISAMArticle.objects.create(headline='Ringo Star')
# NOTE: Needs to be created after the article has been saved.
cursor.execute(
'CREATE FULLTEXT INDEX myisam_article_ft ON myisam_article (headline)')
self.assertSequenceEqual(MyISAMArticle.objects.filter(headline__search='Reinhardt'), [dr])
| bsd-3-clause |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/lib/site-packages/pip/commands/download.py | 340 | 7810 | from __future__ import absolute_import
import logging
import os
from pip.exceptions import CommandError
from pip.index import FormatControl
from pip.req import RequirementSet
from pip.basecommand import RequirementCommand
from pip import cmdoptions
from pip.utils import ensure_dir, normalize_path
from pip.utils.build import BuildDirectory
from pip.utils.filesystem import check_path_owner
logger = logging.getLogger(__name__)
class DownloadCommand(RequirementCommand):
"""
Download packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports downloading from "requirements files", which provide
an easy way to specify a whole environment to be downloaded.
"""
name = 'download'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Download packages.'
def __init__(self, *args, **kw):
super(DownloadCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
cmd_opts.add_option(
'-d', '--dest', '--destination-dir', '--destination-directory',
dest='download_dir',
metavar='dir',
default=os.curdir,
help=("Download packages into <dir>."),
)
cmd_opts.add_option(
'--platform',
dest='platform',
metavar='platform',
default=None,
help=("Only download wheels compatible with <platform>. "
"Defaults to the platform of the running system."),
)
cmd_opts.add_option(
'--python-version',
dest='python_version',
metavar='python_version',
default=None,
help=("Only download wheels compatible with Python "
"interpreter version <version>. If not specified, then the "
"current system interpreter minor version is used. A major "
"version (e.g. '2') can be specified to match all "
"minor revs of that major version. A minor version "
"(e.g. '34') can also be specified."),
)
cmd_opts.add_option(
'--implementation',
dest='implementation',
metavar='implementation',
default=None,
help=("Only download wheels compatible with Python "
"implementation <implementation>, e.g. 'pp', 'jy', 'cp', "
" or 'ip'. If not specified, then the current "
"interpreter implementation is used. Use 'py' to force "
"implementation-agnostic wheels."),
)
cmd_opts.add_option(
'--abi',
dest='abi',
metavar='abi',
default=None,
help=("Only download wheels compatible with Python "
"abi <abi>, e.g. 'pypy_41'. If not specified, then the "
"current interpreter abi tag is used. Generally "
"you will need to specify --implementation, "
"--platform, and --python-version when using "
"this option."),
)
index_opts = cmdoptions.make_option_group(
cmdoptions.non_deprecated_index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
options.ignore_installed = True
if options.python_version:
python_versions = [options.python_version]
else:
python_versions = None
dist_restriction_set = any([
options.python_version,
options.platform,
options.abi,
options.implementation,
])
binary_only = FormatControl(set(), set([':all:']))
if dist_restriction_set and options.format_control != binary_only:
raise CommandError(
"--only-binary=:all: must be set and --no-binary must not "
"be set (or must be set to :none:) when restricting platform "
"and interpreter constraints using --python-version, "
"--platform, --abi, or --implementation."
)
options.src_dir = os.path.abspath(options.src_dir)
options.download_dir = normalize_path(options.download_dir)
ensure_dir(options.download_dir)
with self._build_session(options) as session:
finder = self._build_package_finder(
options=options,
session=session,
platform=options.platform,
python_versions=python_versions,
abi=options.abi,
implementation=options.implementation,
)
build_delete = (not (options.no_clean or options.build_dir))
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
ignore_installed=True,
ignore_dependencies=options.ignore_dependencies,
session=session,
isolated=options.isolated_mode,
require_hashes=options.require_hashes
)
self.populate_requirement_set(
requirement_set,
args,
options,
finder,
session,
self.name,
None
)
if not requirement_set.has_requirements:
return
requirement_set.prepare_files(finder)
downloaded = ' '.join([
req.name for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
return requirement_set
| gpl-3.0 |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/scipy/signal/__init__.py | 2 | 11493 | """
=======================================
Signal processing (:mod:`scipy.signal`)
=======================================
Convolution
===========
.. autosummary::
:toctree: generated/
convolve -- N-dimensional convolution.
correlate -- N-dimensional correlation.
fftconvolve -- N-dimensional convolution using the FFT.
convolve2d -- 2-dimensional convolution (more options).
correlate2d -- 2-dimensional correlation (more options).
sepfir2d -- Convolve with a 2-D separable FIR filter.
B-splines
=========
.. autosummary::
:toctree: generated/
bspline -- B-spline basis function of order n.
cubic -- B-spline basis function of order 3.
quadratic -- B-spline basis function of order 2.
gauss_spline -- Gaussian approximation to the B-spline basis function.
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
cspline1d_eval -- Evaluate a cubic spline at the given points.
qspline1d_eval -- Evaluate a quadratic spline at the given points.
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
Filtering
=========
.. autosummary::
:toctree: generated/
order_filter -- N-dimensional order filter.
medfilt -- N-dimensional median filter.
medfilt2d -- 2-dimensional median filter (faster).
wiener -- N-dimensional wiener filter.
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
lfilter -- 1-dimensional FIR and IIR digital linear filtering.
lfiltic -- Construct initial conditions for `lfilter`.
lfilter_zi -- Compute an initial state zi for the lfilter function that
-- corresponds to the steady state of the step response.
filtfilt -- A forward-backward filter.
savgol_filter -- Filter a signal using the Savitzky-Golay filter.
deconvolve -- 1-d deconvolution using lfilter.
sosfilt -- 1-dimensional IIR digital linear filtering using
-- a second-order-sections filter representation.
sosfilt_zi -- Compute an initial state zi for the sosfilt function that
-- corresponds to the steady state of the step response.
hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
decimate -- Downsample a signal.
detrend -- Remove linear and/or constant trends from data.
resample -- Resample using Fourier method.
upfirdn -- Upsample, apply FIR filter, downsample.
Filter design
=============
.. autosummary::
:toctree: generated/
bilinear -- Digital filter from an analog filter using
-- the bilinear transform.
findfreqs -- Find array of frequencies for computing filter response.
firwin -- Windowed FIR filter design, with frequency response
-- defined as pass and stop bands.
firwin2 -- Windowed FIR filter design, with arbitrary frequency
-- response.
freqs -- Analog filter frequency response.
freqz -- Digital filter frequency response.
group_delay -- Digital filter group delay.
iirdesign -- IIR filter design given bands and gains.
iirfilter -- IIR filter design given order and critical frequencies.
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
-- the number of taps and the transition width at
-- discontinuities in the frequency response.
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
-- FIR filter attenuation.
kaiserord -- Design a Kaiser window to limit ripple and width of
-- transition region.
savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
-- filter.
remez -- Optimal FIR filter design.
unique_roots -- Unique roots and their multiplicities.
residue -- Partial fraction expansion of b(s) / a(s).
residuez -- Partial fraction expansion of b(z) / a(z).
invres -- Inverse partial fraction expansion for analog filter.
invresz -- Inverse partial fraction expansion for digital filter.
BadCoefficients -- Warning on badly conditioned filter coefficients
Lower-level filter design functions:
.. autosummary::
:toctree: generated/
abcd_normalize -- Check state-space matrices and ensure they are rank-2.
band_stop_obj -- Band Stop Objective Function for order minimization.
besselap -- Return (z,p,k) for analog prototype of Bessel filter.
buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
cmplx_sort -- Sort roots based on magnitude.
ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
lp2hp -- Transform a lowpass filter prototype to a highpass filter.
lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
normalize -- Normalize polynomial representation of a transfer function.
Matlab-style IIR filter design
==============================
.. autosummary::
:toctree: generated/
butter -- Butterworth
buttord
cheby1 -- Chebyshev Type I
cheb1ord
cheby2 -- Chebyshev Type II
cheb2ord
ellip -- Elliptic (Cauer)
ellipord
bessel -- Bessel (no order selection available -- try butterod)
Continuous-Time Linear Systems
==============================
.. autosummary::
:toctree: generated/
freqresp -- frequency response of a continuous-time LTI system.
lti -- Linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
lsim -- continuous-time simulation of output to linear system.
lsim2 -- like lsim, but `scipy.integrate.odeint` is used.
impulse -- impulse response of linear, time-invariant (LTI) system.
impulse2 -- like impulse, but `scipy.integrate.odeint` is used.
step -- step response of continous-time LTI system.
step2 -- like step, but `scipy.integrate.odeint` is used.
bode -- Calculate Bode magnitude and phase data.
Discrete-Time Linear Systems
============================
.. autosummary::
:toctree: generated/
dlsim -- simulation of output to a discrete-time linear system.
dimpulse -- impulse response of a discrete-time LTI system.
dstep -- step response of a discrete-time LTI system.
LTI Representations
===================
.. autosummary::
:toctree: generated/
tf2zpk -- transfer function to zero-pole-gain.
tf2sos -- transfer function to second-order sections.
tf2ss -- transfer function to state-space.
zpk2tf -- zero-pole-gain to transfer function.
zpk2sos -- zero-pole-gain to second-order sections.
zpk2ss -- zero-pole-gain to state-space.
ss2tf -- state-pace to transfer function.
ss2zpk -- state-space to pole-zero-gain.
sos2zpk -- second-order-sections to zero-pole-gain.
sos2tf -- second-order-sections to transfer function.
cont2discrete -- continuous-time to discrete-time LTI conversion.
place_poles -- pole placement.
Waveforms
=========
.. autosummary::
:toctree: generated/
chirp -- Frequency swept cosine signal, with several freq functions.
gausspulse -- Gaussian modulated sinusoid
max_len_seq -- Maximum length sequence
sawtooth -- Periodic sawtooth
square -- Square wave
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial
Window functions
================
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_gaussian -- Generalized Gaussian window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
slepian -- Slepian window
triang -- Triangular window
tukey -- Tukey window
Wavelets
========
.. autosummary::
:toctree: generated/
cascade -- compute scaling function and wavelet from coefficients
daub -- return low-pass
morlet -- Complex Morlet wavelet.
qmf -- return quadrature mirror filter from low-pass
ricker -- return ricker wavelet
cwt -- perform continuous wavelet transform
Peak finding
============
.. autosummary::
:toctree: generated/
find_peaks_cwt -- Attempt to find the peaks in the given 1-D array
argrelmin -- Calculate the relative minima of data
argrelmax -- Calculate the relative maxima of data
argrelextrema -- Calculate the relative extrema of data
Spectral Analysis
=================
.. autosummary::
:toctree: generated/
periodogram -- Compute a (modified) periodogram
welch -- Compute a periodogram using Welch's method
csd -- Compute the cross spectral density, using Welch's method
coherence -- Compute the magnitude squared coherence, using Welch's method
spectrogram -- Compute the spectrogram
lombscargle -- Computes the Lomb-Scargle periodogram
vectorstrength -- Computes the vector strength
"""
from __future__ import division, print_function, absolute_import
from . import sigtools
from .waveforms import *
from ._max_len_seq import max_len_seq
from ._upfirdn import upfirdn
# The spline module (a C extension) provides:
# cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2
from .spline import *
from .bsplines import *
from .cont2discrete import *
from .dltisys import *
from .filter_design import *
from .fir_filter_design import *
from .ltisys import *
from .windows import *
from .signaltools import *
from ._savitzky_golay import savgol_coeffs, savgol_filter
from .spectral import *
from .wavelets import *
from ._peak_finding import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| apache-2.0 |
hsu1994/Terminator | Server/RelyON/boost_1_61_0/tools/build/src/tools/rc.py | 11 | 7287 | # Status: being ported by Steven Watanabe
# Base revision: 47077
#
# Copyright (C) Andre Hentz 2003. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
#
# Copyright (c) 2006 Rene Rivera.
#
# Copyright (c) 2008 Steven Watanabe
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
##import type ;
##import generators ;
##import feature ;
##import errors ;
##import scanner ;
##import toolset : flags ;
import os.path
import re
import bjam
from b2.build import type, toolset, generators, scanner, feature
from b2.exceptions import AlreadyDefined
from b2.tools import builtin
from b2.util import regex
from b2.build.toolset import flags
from b2.manager import get_manager
from b2.util import utility
__debug = None
def debug():
global __debug
if __debug is None:
__debug = "--debug-configuration" in bjam.variable("ARGV")
return __debug
type.register('RC', ['rc'])
def init():
pass
def configure (command = None, condition = None, options = None):
"""
Configures a new resource compilation command specific to a condition,
usually a toolset selection condition. The possible options are:
* <rc-type>(rc|windres) - Indicates the type of options the command
accepts.
Even though the arguments are all optional, only when a command, condition,
and at minimum the rc-type option are given will the command be configured.
This is so that callers don't have to check auto-configuration values
before calling this. And still get the functionality of build failures when
the resource compiler can't be found.
"""
rc_type = feature.get_values('<rc-type>', options)
if rc_type:
assert(len(rc_type) == 1)
rc_type = rc_type[0]
if command and condition and rc_type:
flags('rc.compile.resource', '.RC', condition, command)
flags('rc.compile.resource', '.RC_TYPE', condition, rc_type.lower())
flags('rc.compile.resource', 'DEFINES', [], ['<define>'])
flags('rc.compile.resource', 'INCLUDES', [], ['<include>'])
if debug():
print 'notice: using rc compiler ::', condition, '::', command
engine = get_manager().engine()
class RCAction:
"""Class representing bjam action defined from Python.
The function must register the action to execute."""
def __init__(self, action_name, function):
self.action_name = action_name
self.function = function
def __call__(self, targets, sources, property_set):
if self.function:
self.function(targets, sources, property_set)
# FIXME: What is the proper way to dispatch actions?
def rc_register_action(action_name, function = None):
global engine
if engine.actions.has_key(action_name):
raise AlreadyDefined("Bjam action %s is already defined" % action_name)
engine.actions[action_name] = RCAction(action_name, function)
def rc_compile_resource(targets, sources, properties):
rc_type = bjam.call('get-target-variable', targets, '.RC_TYPE')
global engine
engine.set_update_action('rc.compile.resource.' + rc_type, targets, sources, properties)
rc_register_action('rc.compile.resource', rc_compile_resource)
engine.register_action(
'rc.compile.resource.rc',
'"$(.RC)" -l 0x409 "-U$(UNDEFS)" "-D$(DEFINES)" -I"$(>:D)" -I"$(<:D)" -I"$(INCLUDES)" -fo "$(<)" "$(>)"')
engine.register_action(
'rc.compile.resource.windres',
'"$(.RC)" "-U$(UNDEFS)" "-D$(DEFINES)" -I"$(>:D)" -I"$(<:D)" -I"$(INCLUDES)" -o "$(<)" -i "$(>)"')
# FIXME: this was originally declared quietly
engine.register_action(
'compile.resource.null',
'as /dev/null -o "$(<)"')
# Since it's a common practice to write
# exe hello : hello.cpp hello.rc
# we change the name of object created from RC file, to
# avoid conflict with hello.cpp.
# The reason we generate OBJ and not RES, is that gcc does not
# seem to like RES files, but works OK with OBJ.
# See http://article.gmane.org/gmane.comp.lib.boost.build/5643/
#
# Using 'register-c-compiler' adds the build directory to INCLUDES
# FIXME: switch to generators
builtin.register_c_compiler('rc.compile.resource', ['RC'], ['OBJ(%_res)'], [])
__angle_include_re = "#include[ ]*<([^<]+)>"
# Register scanner for resources
class ResScanner(scanner.Scanner):
def __init__(self, includes):
scanner.__init__ ;
self.includes = includes
def pattern(self):
return "(([^ ]+[ ]+(BITMAP|CURSOR|FONT|ICON|MESSAGETABLE|RT_MANIFEST)" +\
"[ ]+([^ \"]+|\"[^\"]+\"))|(#include[ ]*(<[^<]+>|\"[^\"]+\")))" ;
def process(self, target, matches, binding):
binding = binding[0]
angle = regex.transform(matches, "#include[ ]*<([^<]+)>")
quoted = regex.transform(matches, "#include[ ]*\"([^\"]+)\"")
res = regex.transform(matches,
"[^ ]+[ ]+(BITMAP|CURSOR|FONT|ICON|MESSAGETABLE|RT_MANIFEST)" +\
"[ ]+(([^ \"]+)|\"([^\"]+)\")", [3, 4])
# Icons and other includes may referenced as
#
# IDR_MAINFRAME ICON "res\\icon.ico"
#
# so we have to replace double backslashes to single ones.
res = [ re.sub(r'\\\\', '/', match) for match in res if match is not None ]
# CONSIDER: the new scoping rule seem to defeat "on target" variables.
g = bjam.call('get-target-variable', target, 'HDRGRIST')[0]
b = os.path.normpath(os.path.dirname(binding))
# Attach binding of including file to included targets.
# When target is directly created from virtual target
# this extra information is unnecessary. But in other
# cases, it allows to distinguish between two headers of the
# same name included from different places.
# We don't need this extra information for angle includes,
# since they should not depend on including file (we can't
# get literal "." in include path).
g2 = g + "#" + b
g = "<" + g + ">"
g2 = "<" + g2 + ">"
angle = [g + x for x in angle]
quoted = [g2 + x for x in quoted]
res = [g2 + x for x in res]
all = angle + quoted
bjam.call('mark-included', target, all)
engine = get_manager().engine()
engine.add_dependency(target, res)
bjam.call('NOCARE', all + res)
engine.set_target_variable(angle, 'SEARCH', [utility.get_value(inc) for inc in self.includes])
engine.set_target_variable(quoted, 'SEARCH', [b + utility.get_value(inc) for inc in self.includes])
engine.set_target_variable(res, 'SEARCH', [b + utility.get_value(inc) for inc in self.includes])
# Just propagate current scanner to includes, in a hope
# that includes do not change scanners.
get_manager().scanners().propagate(self, angle + quoted)
scanner.register(ResScanner, 'include')
type.set_scanner('RC', ResScanner)
| apache-2.0 |
javiergarridomellado/Empresa_django | devcodela/lib/python2.7/site-packages/setuptools/command/build_py.py | 207 | 8440 | import os
import sys
import fnmatch
import textwrap
from distutils.command.build_py import build_py as _build_py
from distutils.util import convert_path
from glob import glob
try:
from setuptools.lib2to3_ex import Mixin2to3
except ImportError:
class Mixin2to3:
def run_2to3(self, files, doctests=True):
"do nothing"
class build_py(_build_py, Mixin2to3):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
_build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = self.distribution.exclude_package_data or {}
if 'data_files' in self.__dict__: del self.__dict__['data_files']
self.__updated_files = []
self.__doctests_2to3 = []
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(_build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self, attr):
if attr=='data_files': # lazily compute data files
self.data_files = files = self._get_data_files()
return files
return _build_py.__getattr__(self,attr)
def build_module(self, module, module_file, package):
outfile, copied = _build_py.build_module(self, module, module_file, package)
if copied:
self.__updated_files.append(outfile)
return outfile, copied
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
data = []
for package in self.packages or ():
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = self.manifest_files.get(package, [])[:]
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
files.extend(glob(os.path.join(src_dir, convert_path(pattern))))
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
srcfile = os.path.abspath(srcfile)
if copied and srcfile in self.distribution.convert_2to3_doctests:
self.__doctests_2to3.append(outf)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
for path in ei_cmd.filelist.files:
d,f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d!=prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith('.py') and f==oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d],[]).append(path)
def get_data_files(self): pass # kludge 2.4 for lazy computation
if sys.version<"2.4": # Python 2.4 already has this code
def get_outputs(self, include_bytecode=1):
"""Return complete list of files copied to the build directory
This includes both '.py' files and data files, as well as '.pyc'
and '.pyo' files if 'include_bytecode' is true. (This method is
needed for the 'install_lib' command to do its job properly, and to
generate a correct installation manifest.)
"""
return _build_py.get_outputs(self, include_bytecode) + [
os.path.join(build_dir, filename)
for package, src_dir, build_dir,filenames in self.data_files
for filename in filenames
]
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = _build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg==package or pkg.startswith(package+'.'):
break
else:
return init_py
f = open(init_py,'rbU')
if 'declare_namespace'.encode() not in f.read():
from distutils import log
log.warn(
"WARNING: %s is a namespace package, but its __init__.py does\n"
"not declare_namespace(); setuptools 0.7 will REQUIRE this!\n"
'(See the setuptools manual under "Namespace Packages" for '
"details.)\n", package
)
f.close()
return init_py
def initialize_options(self):
self.packages_checked={}
_build_py.initialize_options(self)
def get_package_dir(self, package):
res = _build_py.get_package_dir(self, package)
if self.distribution.src_root is not None:
return os.path.join(self.distribution.src_root, res)
return res
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
globs = (self.exclude_package_data.get('', [])
+ self.exclude_package_data.get(package, []))
bad = []
for pattern in globs:
bad.extend(
fnmatch.filter(
files, os.path.join(src_dir, convert_path(pattern))
)
)
bad = dict.fromkeys(bad)
seen = {}
return [
f for f in files if f not in bad
and f not in seen and seen.setdefault(f,1) # ditch dupes
]
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
msg = textwrap.dedent("""
Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
""").lstrip() % path
raise DistutilsSetupError(msg)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.